ZeroTier standard .clang-format -- switch to spaces, match indentation of Rust, mostly based on LLVM format.

This commit is contained in:
Adam Ierymenko 2021-04-26 08:55:28 -04:00
parent 2698dab696
commit 12e7546ebc
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
111 changed files with 31629 additions and 26070 deletions

View file

@ -57,7 +57,7 @@ SpacesInCStyleCastParentheses: 'false'
SpacesInContainerLiterals: 'true'
SpacesInParentheses: 'false'
SpacesInSquareBrackets: 'false'
UseTab: ForIndentation
UseTab: 'false'
---
Language: Cpp

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -13,8 +13,8 @@
// AES for ARM crypto extensions and NEON.
#include "Constants.hpp"
#include "AES.hpp"
#include "Constants.hpp"
#ifdef ZT_AES_NEON
@ -24,366 +24,386 @@ namespace {
ZT_INLINE uint8x16_t s_clmul_armneon_crypto(uint8x16_t h, uint8x16_t y, const uint8_t b[16]) noexcept
{
uint8x16_t r0, r1, t0, t1;
r0 = vld1q_u8(b);
const uint8x16_t z = veorq_u8(h, h);
y = veorq_u8(r0, y);
y = vrbitq_u8(y);
const uint8x16_t p = vreinterpretq_u8_u64(vdupq_n_u64(0x0000000000000087));
t0 = vextq_u8(y, y, 8);
__asm__ __volatile__("pmull %0.1q, %1.1d, %2.1d \n\t" : "=w" (r0) : "w" (h), "w" (y));
__asm__ __volatile__("pmull2 %0.1q, %1.2d, %2.2d \n\t" :"=w" (r1) : "w" (h), "w" (y));
__asm__ __volatile__("pmull %0.1q, %1.1d, %2.1d \n\t" : "=w" (t1) : "w" (h), "w" (t0));
__asm__ __volatile__("pmull2 %0.1q, %1.2d, %2.2d \n\t" :"=w" (t0) : "w" (h), "w" (t0));
t0 = veorq_u8(t0, t1);
t1 = vextq_u8(z, t0, 8);
r0 = veorq_u8(r0, t1);
t1 = vextq_u8(t0, z, 8);
r1 = veorq_u8(r1, t1);
__asm__ __volatile__("pmull2 %0.1q, %1.2d, %2.2d \n\t" :"=w" (t0) : "w" (r1), "w" (p));
t1 = vextq_u8(t0, z, 8);
r1 = veorq_u8(r1, t1);
t1 = vextq_u8(z, t0, 8);
r0 = veorq_u8(r0, t1);
__asm__ __volatile__("pmull %0.1q, %1.1d, %2.1d \n\t" : "=w" (t0) : "w" (r1), "w" (p));
return vrbitq_u8(veorq_u8(r0, t0));
uint8x16_t r0, r1, t0, t1;
r0 = vld1q_u8(b);
const uint8x16_t z = veorq_u8(h, h);
y = veorq_u8(r0, y);
y = vrbitq_u8(y);
const uint8x16_t p = vreinterpretq_u8_u64(vdupq_n_u64(0x0000000000000087));
t0 = vextq_u8(y, y, 8);
__asm__ __volatile__("pmull %0.1q, %1.1d, %2.1d \n\t" : "=w"(r0) : "w"(h), "w"(y));
__asm__ __volatile__("pmull2 %0.1q, %1.2d, %2.2d \n\t" : "=w"(r1) : "w"(h), "w"(y));
__asm__ __volatile__("pmull %0.1q, %1.1d, %2.1d \n\t" : "=w"(t1) : "w"(h), "w"(t0));
__asm__ __volatile__("pmull2 %0.1q, %1.2d, %2.2d \n\t" : "=w"(t0) : "w"(h), "w"(t0));
t0 = veorq_u8(t0, t1);
t1 = vextq_u8(z, t0, 8);
r0 = veorq_u8(r0, t1);
t1 = vextq_u8(t0, z, 8);
r1 = veorq_u8(r1, t1);
__asm__ __volatile__("pmull2 %0.1q, %1.2d, %2.2d \n\t" : "=w"(t0) : "w"(r1), "w"(p));
t1 = vextq_u8(t0, z, 8);
r1 = veorq_u8(r1, t1);
t1 = vextq_u8(z, t0, 8);
r0 = veorq_u8(r0, t1);
__asm__ __volatile__("pmull %0.1q, %1.1d, %2.1d \n\t" : "=w"(t0) : "w"(r1), "w"(p));
return vrbitq_u8(veorq_u8(r0, t0));
}
} // anonymous namespace
} // anonymous namespace
void AES::GMAC::p_armUpdate(const uint8_t *in, unsigned int len) noexcept
void AES::GMAC::p_armUpdate(const uint8_t* in, unsigned int len) noexcept
{
uint8x16_t y = vld1q_u8(reinterpret_cast<const uint8_t *>(_y));
const uint8x16_t h = _aes.p_k.neon.h;
uint8x16_t y = vld1q_u8(reinterpret_cast<const uint8_t*>(_y));
const uint8x16_t h = _aes.p_k.neon.h;
if (_rp) {
for(;;) {
if (!len)
return;
--len;
_r[_rp++] = *(in++);
if (_rp == 16) {
y = s_clmul_armneon_crypto(h, y, _r);
break;
}
}
}
if (_rp) {
for (;;) {
if (! len)
return;
--len;
_r[_rp++] = *(in++);
if (_rp == 16) {
y = s_clmul_armneon_crypto(h, y, _r);
break;
}
}
}
while (len >= 16) {
y = s_clmul_armneon_crypto(h, y, in);
in += 16;
len -= 16;
}
while (len >= 16) {
y = s_clmul_armneon_crypto(h, y, in);
in += 16;
len -= 16;
}
vst1q_u8(reinterpret_cast<uint8_t *>(_y), y);
vst1q_u8(reinterpret_cast<uint8_t*>(_y), y);
for (unsigned int i = 0; i < len; ++i)
_r[i] = in[i];
_rp = len; // len is always less than 16 here
for (unsigned int i = 0; i < len; ++i)
_r[i] = in[i];
_rp = len; // len is always less than 16 here
}
void AES::GMAC::p_armFinish(uint8_t tag[16]) noexcept
{
uint64_t tmp[2];
uint8x16_t y = vld1q_u8(reinterpret_cast<const uint8_t *>(_y));
const uint8x16_t h = _aes.p_k.neon.h;
uint64_t tmp[2];
uint8x16_t y = vld1q_u8(reinterpret_cast<const uint8_t*>(_y));
const uint8x16_t h = _aes.p_k.neon.h;
if (_rp) {
while (_rp < 16)
_r[_rp++] = 0;
y = s_clmul_armneon_crypto(h, y, _r);
}
if (_rp) {
while (_rp < 16)
_r[_rp++] = 0;
y = s_clmul_armneon_crypto(h, y, _r);
}
tmp[0] = Utils::hton((uint64_t)_len << 3U);
tmp[1] = 0;
y = s_clmul_armneon_crypto(h, y, reinterpret_cast<const uint8_t *>(tmp));
tmp[0] = Utils::hton((uint64_t)_len << 3U);
tmp[1] = 0;
y = s_clmul_armneon_crypto(h, y, reinterpret_cast<const uint8_t*>(tmp));
Utils::copy< 12 >(tmp, _iv);
Utils::copy<12>(tmp, _iv);
#if __BYTE_ORDER == __BIG_ENDIAN
reinterpret_cast<uint32_t *>(tmp)[3] = 0x00000001;
reinterpret_cast<uint32_t*>(tmp)[3] = 0x00000001;
#else
reinterpret_cast<uint32_t *>(tmp)[3] = 0x01000000;
reinterpret_cast<uint32_t*>(tmp)[3] = 0x01000000;
#endif
_aes.encrypt(tmp, tmp);
_aes.encrypt(tmp, tmp);
uint8x16_t yy = y;
Utils::storeMachineEndian< uint64_t >(tag, tmp[0] ^ reinterpret_cast<const uint64_t *>(&yy)[0]);
Utils::storeMachineEndian< uint64_t >(tag + 8, tmp[1] ^ reinterpret_cast<const uint64_t *>(&yy)[1]);
uint8x16_t yy = y;
Utils::storeMachineEndian<uint64_t>(tag, tmp[0] ^ reinterpret_cast<const uint64_t*>(&yy)[0]);
Utils::storeMachineEndian<uint64_t>(tag + 8, tmp[1] ^ reinterpret_cast<const uint64_t*>(&yy)[1]);
}
void AES::CTR::p_armCrypt(const uint8_t *in, uint8_t *out, unsigned int len) noexcept
void AES::CTR::p_armCrypt(const uint8_t* in, uint8_t* out, unsigned int len) noexcept
{
uint8x16_t dd = vrev32q_u8(vld1q_u8(reinterpret_cast<uint8_t *>(_ctr)));
const uint32x4_t one = {0,0,0,1};
uint8x16_t dd = vrev32q_u8(vld1q_u8(reinterpret_cast<uint8_t*>(_ctr)));
const uint32x4_t one = { 0, 0, 0, 1 };
uint8x16_t k0 = _aes.p_k.neon.ek[0];
uint8x16_t k1 = _aes.p_k.neon.ek[1];
uint8x16_t k2 = _aes.p_k.neon.ek[2];
uint8x16_t k3 = _aes.p_k.neon.ek[3];
uint8x16_t k4 = _aes.p_k.neon.ek[4];
uint8x16_t k5 = _aes.p_k.neon.ek[5];
uint8x16_t k6 = _aes.p_k.neon.ek[6];
uint8x16_t k7 = _aes.p_k.neon.ek[7];
uint8x16_t k8 = _aes.p_k.neon.ek[8];
uint8x16_t k9 = _aes.p_k.neon.ek[9];
uint8x16_t k10 = _aes.p_k.neon.ek[10];
uint8x16_t k11 = _aes.p_k.neon.ek[11];
uint8x16_t k12 = _aes.p_k.neon.ek[12];
uint8x16_t k13 = _aes.p_k.neon.ek[13];
uint8x16_t k14 = _aes.p_k.neon.ek[14];
uint8x16_t k0 = _aes.p_k.neon.ek[0];
uint8x16_t k1 = _aes.p_k.neon.ek[1];
uint8x16_t k2 = _aes.p_k.neon.ek[2];
uint8x16_t k3 = _aes.p_k.neon.ek[3];
uint8x16_t k4 = _aes.p_k.neon.ek[4];
uint8x16_t k5 = _aes.p_k.neon.ek[5];
uint8x16_t k6 = _aes.p_k.neon.ek[6];
uint8x16_t k7 = _aes.p_k.neon.ek[7];
uint8x16_t k8 = _aes.p_k.neon.ek[8];
uint8x16_t k9 = _aes.p_k.neon.ek[9];
uint8x16_t k10 = _aes.p_k.neon.ek[10];
uint8x16_t k11 = _aes.p_k.neon.ek[11];
uint8x16_t k12 = _aes.p_k.neon.ek[12];
uint8x16_t k13 = _aes.p_k.neon.ek[13];
uint8x16_t k14 = _aes.p_k.neon.ek[14];
unsigned int totalLen = _len;
if ((totalLen & 15U) != 0) {
for (;;) {
if (unlikely(!len)) {
vst1q_u8(reinterpret_cast<uint8_t *>(_ctr), vrev32q_u8(dd));
_len = totalLen;
return;
}
--len;
out[totalLen++] = *(in++);
if ((totalLen & 15U) == 0) {
uint8_t *const otmp = out + (totalLen - 16);
uint8x16_t d0 = vrev32q_u8(dd);
uint8x16_t pt = vld1q_u8(otmp);
d0 = vaesmcq_u8(vaeseq_u8(d0, k0));
d0 = vaesmcq_u8(vaeseq_u8(d0, k1));
d0 = vaesmcq_u8(vaeseq_u8(d0, k2));
d0 = vaesmcq_u8(vaeseq_u8(d0, k3));
d0 = vaesmcq_u8(vaeseq_u8(d0, k4));
d0 = vaesmcq_u8(vaeseq_u8(d0, k5));
d0 = vaesmcq_u8(vaeseq_u8(d0, k6));
d0 = vaesmcq_u8(vaeseq_u8(d0, k7));
d0 = vaesmcq_u8(vaeseq_u8(d0, k8));
d0 = vaesmcq_u8(vaeseq_u8(d0, k9));
d0 = vaesmcq_u8(vaeseq_u8(d0, k10));
d0 = vaesmcq_u8(vaeseq_u8(d0, k11));
d0 = vaesmcq_u8(vaeseq_u8(d0, k12));
d0 = veorq_u8(vaeseq_u8(d0, k13), k14);
vst1q_u8(otmp, veorq_u8(pt, d0));
dd = (uint8x16_t)vaddq_u32((uint32x4_t)dd, one);
break;
}
}
}
unsigned int totalLen = _len;
if ((totalLen & 15U) != 0) {
for (;;) {
if (unlikely(! len)) {
vst1q_u8(reinterpret_cast<uint8_t*>(_ctr), vrev32q_u8(dd));
_len = totalLen;
return;
}
--len;
out[totalLen++] = *(in++);
if ((totalLen & 15U) == 0) {
uint8_t* const otmp = out + (totalLen - 16);
uint8x16_t d0 = vrev32q_u8(dd);
uint8x16_t pt = vld1q_u8(otmp);
d0 = vaesmcq_u8(vaeseq_u8(d0, k0));
d0 = vaesmcq_u8(vaeseq_u8(d0, k1));
d0 = vaesmcq_u8(vaeseq_u8(d0, k2));
d0 = vaesmcq_u8(vaeseq_u8(d0, k3));
d0 = vaesmcq_u8(vaeseq_u8(d0, k4));
d0 = vaesmcq_u8(vaeseq_u8(d0, k5));
d0 = vaesmcq_u8(vaeseq_u8(d0, k6));
d0 = vaesmcq_u8(vaeseq_u8(d0, k7));
d0 = vaesmcq_u8(vaeseq_u8(d0, k8));
d0 = vaesmcq_u8(vaeseq_u8(d0, k9));
d0 = vaesmcq_u8(vaeseq_u8(d0, k10));
d0 = vaesmcq_u8(vaeseq_u8(d0, k11));
d0 = vaesmcq_u8(vaeseq_u8(d0, k12));
d0 = veorq_u8(vaeseq_u8(d0, k13), k14);
vst1q_u8(otmp, veorq_u8(pt, d0));
dd = (uint8x16_t)vaddq_u32((uint32x4_t)dd, one);
break;
}
}
}
out += totalLen;
_len = totalLen + len;
out += totalLen;
_len = totalLen + len;
if (likely(len >= 64)) {
const uint32x4_t four = vshlq_n_u32(one, 2);
uint8x16_t dd1 = (uint8x16_t)vaddq_u32((uint32x4_t)dd, one);
uint8x16_t dd2 = (uint8x16_t)vaddq_u32((uint32x4_t)dd1, one);
uint8x16_t dd3 = (uint8x16_t)vaddq_u32((uint32x4_t)dd2, one);
for (;;) {
len -= 64;
uint8x16_t d0 = vrev32q_u8(dd);
uint8x16_t d1 = vrev32q_u8(dd1);
uint8x16_t d2 = vrev32q_u8(dd2);
uint8x16_t d3 = vrev32q_u8(dd3);
uint8x16_t pt0 = vld1q_u8(in);
uint8x16_t pt1 = vld1q_u8(in + 16);
uint8x16_t pt2 = vld1q_u8(in + 32);
uint8x16_t pt3 = vld1q_u8(in + 48);
if (likely(len >= 64)) {
const uint32x4_t four = vshlq_n_u32(one, 2);
uint8x16_t dd1 = (uint8x16_t)vaddq_u32((uint32x4_t)dd, one);
uint8x16_t dd2 = (uint8x16_t)vaddq_u32((uint32x4_t)dd1, one);
uint8x16_t dd3 = (uint8x16_t)vaddq_u32((uint32x4_t)dd2, one);
for (;;) {
len -= 64;
uint8x16_t d0 = vrev32q_u8(dd);
uint8x16_t d1 = vrev32q_u8(dd1);
uint8x16_t d2 = vrev32q_u8(dd2);
uint8x16_t d3 = vrev32q_u8(dd3);
uint8x16_t pt0 = vld1q_u8(in);
uint8x16_t pt1 = vld1q_u8(in + 16);
uint8x16_t pt2 = vld1q_u8(in + 32);
uint8x16_t pt3 = vld1q_u8(in + 48);
d0 = vaesmcq_u8(vaeseq_u8(d0, k0));
d1 = vaesmcq_u8(vaeseq_u8(d1, k0));
d2 = vaesmcq_u8(vaeseq_u8(d2, k0));
d3 = vaesmcq_u8(vaeseq_u8(d3, k0));
d0 = vaesmcq_u8(vaeseq_u8(d0, k1));
d1 = vaesmcq_u8(vaeseq_u8(d1, k1));
d2 = vaesmcq_u8(vaeseq_u8(d2, k1));
d3 = vaesmcq_u8(vaeseq_u8(d3, k1));
d0 = vaesmcq_u8(vaeseq_u8(d0, k2));
d1 = vaesmcq_u8(vaeseq_u8(d1, k2));
d2 = vaesmcq_u8(vaeseq_u8(d2, k2));
d3 = vaesmcq_u8(vaeseq_u8(d3, k2));
d0 = vaesmcq_u8(vaeseq_u8(d0, k3));
d1 = vaesmcq_u8(vaeseq_u8(d1, k3));
d2 = vaesmcq_u8(vaeseq_u8(d2, k3));
d3 = vaesmcq_u8(vaeseq_u8(d3, k3));
d0 = vaesmcq_u8(vaeseq_u8(d0, k4));
d1 = vaesmcq_u8(vaeseq_u8(d1, k4));
d2 = vaesmcq_u8(vaeseq_u8(d2, k4));
d3 = vaesmcq_u8(vaeseq_u8(d3, k4));
d0 = vaesmcq_u8(vaeseq_u8(d0, k5));
d1 = vaesmcq_u8(vaeseq_u8(d1, k5));
d2 = vaesmcq_u8(vaeseq_u8(d2, k5));
d3 = vaesmcq_u8(vaeseq_u8(d3, k5));
d0 = vaesmcq_u8(vaeseq_u8(d0, k6));
d1 = vaesmcq_u8(vaeseq_u8(d1, k6));
d2 = vaesmcq_u8(vaeseq_u8(d2, k6));
d3 = vaesmcq_u8(vaeseq_u8(d3, k6));
d0 = vaesmcq_u8(vaeseq_u8(d0, k7));
d1 = vaesmcq_u8(vaeseq_u8(d1, k7));
d2 = vaesmcq_u8(vaeseq_u8(d2, k7));
d3 = vaesmcq_u8(vaeseq_u8(d3, k7));
d0 = vaesmcq_u8(vaeseq_u8(d0, k8));
d1 = vaesmcq_u8(vaeseq_u8(d1, k8));
d2 = vaesmcq_u8(vaeseq_u8(d2, k8));
d3 = vaesmcq_u8(vaeseq_u8(d3, k8));
d0 = vaesmcq_u8(vaeseq_u8(d0, k9));
d1 = vaesmcq_u8(vaeseq_u8(d1, k9));
d2 = vaesmcq_u8(vaeseq_u8(d2, k9));
d3 = vaesmcq_u8(vaeseq_u8(d3, k9));
d0 = vaesmcq_u8(vaeseq_u8(d0, k10));
d1 = vaesmcq_u8(vaeseq_u8(d1, k10));
d2 = vaesmcq_u8(vaeseq_u8(d2, k10));
d3 = vaesmcq_u8(vaeseq_u8(d3, k10));
d0 = vaesmcq_u8(vaeseq_u8(d0, k11));
d1 = vaesmcq_u8(vaeseq_u8(d1, k11));
d2 = vaesmcq_u8(vaeseq_u8(d2, k11));
d3 = vaesmcq_u8(vaeseq_u8(d3, k11));
d0 = vaesmcq_u8(vaeseq_u8(d0, k12));
d1 = vaesmcq_u8(vaeseq_u8(d1, k12));
d2 = vaesmcq_u8(vaeseq_u8(d2, k12));
d3 = vaesmcq_u8(vaeseq_u8(d3, k12));
d0 = veorq_u8(vaeseq_u8(d0, k13), k14);
d1 = veorq_u8(vaeseq_u8(d1, k13), k14);
d2 = veorq_u8(vaeseq_u8(d2, k13), k14);
d3 = veorq_u8(vaeseq_u8(d3, k13), k14);
d0 = vaesmcq_u8(vaeseq_u8(d0, k0));
d1 = vaesmcq_u8(vaeseq_u8(d1, k0));
d2 = vaesmcq_u8(vaeseq_u8(d2, k0));
d3 = vaesmcq_u8(vaeseq_u8(d3, k0));
d0 = vaesmcq_u8(vaeseq_u8(d0, k1));
d1 = vaesmcq_u8(vaeseq_u8(d1, k1));
d2 = vaesmcq_u8(vaeseq_u8(d2, k1));
d3 = vaesmcq_u8(vaeseq_u8(d3, k1));
d0 = vaesmcq_u8(vaeseq_u8(d0, k2));
d1 = vaesmcq_u8(vaeseq_u8(d1, k2));
d2 = vaesmcq_u8(vaeseq_u8(d2, k2));
d3 = vaesmcq_u8(vaeseq_u8(d3, k2));
d0 = vaesmcq_u8(vaeseq_u8(d0, k3));
d1 = vaesmcq_u8(vaeseq_u8(d1, k3));
d2 = vaesmcq_u8(vaeseq_u8(d2, k3));
d3 = vaesmcq_u8(vaeseq_u8(d3, k3));
d0 = vaesmcq_u8(vaeseq_u8(d0, k4));
d1 = vaesmcq_u8(vaeseq_u8(d1, k4));
d2 = vaesmcq_u8(vaeseq_u8(d2, k4));
d3 = vaesmcq_u8(vaeseq_u8(d3, k4));
d0 = vaesmcq_u8(vaeseq_u8(d0, k5));
d1 = vaesmcq_u8(vaeseq_u8(d1, k5));
d2 = vaesmcq_u8(vaeseq_u8(d2, k5));
d3 = vaesmcq_u8(vaeseq_u8(d3, k5));
d0 = vaesmcq_u8(vaeseq_u8(d0, k6));
d1 = vaesmcq_u8(vaeseq_u8(d1, k6));
d2 = vaesmcq_u8(vaeseq_u8(d2, k6));
d3 = vaesmcq_u8(vaeseq_u8(d3, k6));
d0 = vaesmcq_u8(vaeseq_u8(d0, k7));
d1 = vaesmcq_u8(vaeseq_u8(d1, k7));
d2 = vaesmcq_u8(vaeseq_u8(d2, k7));
d3 = vaesmcq_u8(vaeseq_u8(d3, k7));
d0 = vaesmcq_u8(vaeseq_u8(d0, k8));
d1 = vaesmcq_u8(vaeseq_u8(d1, k8));
d2 = vaesmcq_u8(vaeseq_u8(d2, k8));
d3 = vaesmcq_u8(vaeseq_u8(d3, k8));
d0 = vaesmcq_u8(vaeseq_u8(d0, k9));
d1 = vaesmcq_u8(vaeseq_u8(d1, k9));
d2 = vaesmcq_u8(vaeseq_u8(d2, k9));
d3 = vaesmcq_u8(vaeseq_u8(d3, k9));
d0 = vaesmcq_u8(vaeseq_u8(d0, k10));
d1 = vaesmcq_u8(vaeseq_u8(d1, k10));
d2 = vaesmcq_u8(vaeseq_u8(d2, k10));
d3 = vaesmcq_u8(vaeseq_u8(d3, k10));
d0 = vaesmcq_u8(vaeseq_u8(d0, k11));
d1 = vaesmcq_u8(vaeseq_u8(d1, k11));
d2 = vaesmcq_u8(vaeseq_u8(d2, k11));
d3 = vaesmcq_u8(vaeseq_u8(d3, k11));
d0 = vaesmcq_u8(vaeseq_u8(d0, k12));
d1 = vaesmcq_u8(vaeseq_u8(d1, k12));
d2 = vaesmcq_u8(vaeseq_u8(d2, k12));
d3 = vaesmcq_u8(vaeseq_u8(d3, k12));
d0 = veorq_u8(vaeseq_u8(d0, k13), k14);
d1 = veorq_u8(vaeseq_u8(d1, k13), k14);
d2 = veorq_u8(vaeseq_u8(d2, k13), k14);
d3 = veorq_u8(vaeseq_u8(d3, k13), k14);
d0 = veorq_u8(pt0, d0);
d1 = veorq_u8(pt1, d1);
d2 = veorq_u8(pt2, d2);
d3 = veorq_u8(pt3, d3);
d0 = veorq_u8(pt0, d0);
d1 = veorq_u8(pt1, d1);
d2 = veorq_u8(pt2, d2);
d3 = veorq_u8(pt3, d3);
vst1q_u8(out, d0);
vst1q_u8(out + 16, d1);
vst1q_u8(out + 32, d2);
vst1q_u8(out + 48, d3);
vst1q_u8(out, d0);
vst1q_u8(out + 16, d1);
vst1q_u8(out + 32, d2);
vst1q_u8(out + 48, d3);
out += 64;
in += 64;
out += 64;
in += 64;
dd = (uint8x16_t)vaddq_u32((uint32x4_t)dd, four);
if (unlikely(len < 64))
break;
dd1 = (uint8x16_t)vaddq_u32((uint32x4_t)dd1, four);
dd2 = (uint8x16_t)vaddq_u32((uint32x4_t)dd2, four);
dd3 = (uint8x16_t)vaddq_u32((uint32x4_t)dd3, four);
}
}
dd = (uint8x16_t)vaddq_u32((uint32x4_t)dd, four);
if (unlikely(len < 64))
break;
dd1 = (uint8x16_t)vaddq_u32((uint32x4_t)dd1, four);
dd2 = (uint8x16_t)vaddq_u32((uint32x4_t)dd2, four);
dd3 = (uint8x16_t)vaddq_u32((uint32x4_t)dd3, four);
}
}
while (len >= 16) {
len -= 16;
uint8x16_t d0 = vrev32q_u8(dd);
uint8x16_t pt = vld1q_u8(in);
in += 16;
dd = (uint8x16_t)vaddq_u32((uint32x4_t)dd, one);
d0 = vaesmcq_u8(vaeseq_u8(d0, k0));
d0 = vaesmcq_u8(vaeseq_u8(d0, k1));
d0 = vaesmcq_u8(vaeseq_u8(d0, k2));
d0 = vaesmcq_u8(vaeseq_u8(d0, k3));
d0 = vaesmcq_u8(vaeseq_u8(d0, k4));
d0 = vaesmcq_u8(vaeseq_u8(d0, k5));
d0 = vaesmcq_u8(vaeseq_u8(d0, k6));
d0 = vaesmcq_u8(vaeseq_u8(d0, k7));
d0 = vaesmcq_u8(vaeseq_u8(d0, k8));
d0 = vaesmcq_u8(vaeseq_u8(d0, k9));
d0 = vaesmcq_u8(vaeseq_u8(d0, k10));
d0 = vaesmcq_u8(vaeseq_u8(d0, k11));
d0 = vaesmcq_u8(vaeseq_u8(d0, k12));
d0 = veorq_u8(vaeseq_u8(d0, k13), k14);
vst1q_u8(out, veorq_u8(pt, d0));
out += 16;
}
while (len >= 16) {
len -= 16;
uint8x16_t d0 = vrev32q_u8(dd);
uint8x16_t pt = vld1q_u8(in);
in += 16;
dd = (uint8x16_t)vaddq_u32((uint32x4_t)dd, one);
d0 = vaesmcq_u8(vaeseq_u8(d0, k0));
d0 = vaesmcq_u8(vaeseq_u8(d0, k1));
d0 = vaesmcq_u8(vaeseq_u8(d0, k2));
d0 = vaesmcq_u8(vaeseq_u8(d0, k3));
d0 = vaesmcq_u8(vaeseq_u8(d0, k4));
d0 = vaesmcq_u8(vaeseq_u8(d0, k5));
d0 = vaesmcq_u8(vaeseq_u8(d0, k6));
d0 = vaesmcq_u8(vaeseq_u8(d0, k7));
d0 = vaesmcq_u8(vaeseq_u8(d0, k8));
d0 = vaesmcq_u8(vaeseq_u8(d0, k9));
d0 = vaesmcq_u8(vaeseq_u8(d0, k10));
d0 = vaesmcq_u8(vaeseq_u8(d0, k11));
d0 = vaesmcq_u8(vaeseq_u8(d0, k12));
d0 = veorq_u8(vaeseq_u8(d0, k13), k14);
vst1q_u8(out, veorq_u8(pt, d0));
out += 16;
}
// Any remaining input is placed in _out. This will be picked up and crypted
// on subsequent calls to crypt() or finish() as it'll mean _len will not be
// an even multiple of 16.
for (unsigned int i = 0; i < len; ++i)
out[i] = in[i];
// Any remaining input is placed in _out. This will be picked up and crypted
// on subsequent calls to crypt() or finish() as it'll mean _len will not be
// an even multiple of 16.
for (unsigned int i = 0; i < len; ++i)
out[i] = in[i];
vst1q_u8(reinterpret_cast<uint8_t *>(_ctr), vrev32q_u8(dd));
vst1q_u8(reinterpret_cast<uint8_t*>(_ctr), vrev32q_u8(dd));
}
#define ZT_INIT_ARMNEON_CRYPTO_SUBWORD(w) ((uint32_t)s_sbox[w & 0xffU] + ((uint32_t)s_sbox[(w >> 8U) & 0xffU] << 8U) + ((uint32_t)s_sbox[(w >> 16U) & 0xffU] << 16U) + ((uint32_t)s_sbox[(w >> 24U) & 0xffU] << 24U))
#define ZT_INIT_ARMNEON_CRYPTO_SUBWORD(w) \
((uint32_t)s_sbox[w & 0xffU] + ((uint32_t)s_sbox[(w >> 8U) & 0xffU] << 8U) \
+ ((uint32_t)s_sbox[(w >> 16U) & 0xffU] << 16U) + ((uint32_t)s_sbox[(w >> 24U) & 0xffU] << 24U))
#define ZT_INIT_ARMNEON_CRYPTO_ROTWORD(w) (((w) << 8U) | ((w) >> 24U))
#define ZT_INIT_ARMNEON_CRYPTO_NK 8
#define ZT_INIT_ARMNEON_CRYPTO_NB 4
#define ZT_INIT_ARMNEON_CRYPTO_NR 14
#define ZT_INIT_ARMNEON_CRYPTO_NK 8
#define ZT_INIT_ARMNEON_CRYPTO_NB 4
#define ZT_INIT_ARMNEON_CRYPTO_NR 14
void AES::p_init_armneon_crypto(const uint8_t *key) noexcept
void AES::p_init_armneon_crypto(const uint8_t* key) noexcept
{
static const uint8_t s_sbox[256] = {0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c,
0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea,
0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16};
static const uint8_t s_sbox[256] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82,
0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26,
0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96,
0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb,
0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f,
0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff,
0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32,
0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d,
0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6,
0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e,
0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f,
0xb0, 0x54, 0xbb, 0x16
};
uint64_t h[2];
uint32_t *const w = reinterpret_cast<uint32_t *>(p_k.neon.ek);
uint64_t h[2];
uint32_t* const w = reinterpret_cast<uint32_t*>(p_k.neon.ek);
for (unsigned int i=0;i<ZT_INIT_ARMNEON_CRYPTO_NK;++i) {
const unsigned int j = i * 4;
w[i] = ((uint32_t)key[j] << 24U) | ((uint32_t)key[j + 1] << 16U) | ((uint32_t)key[j + 2] << 8U) | (uint32_t)key[j + 3];
}
for (unsigned int i = 0; i < ZT_INIT_ARMNEON_CRYPTO_NK; ++i) {
const unsigned int j = i * 4;
w[i] = ((uint32_t)key[j] << 24U) | ((uint32_t)key[j + 1] << 16U) | ((uint32_t)key[j + 2] << 8U)
| (uint32_t)key[j + 3];
}
for (unsigned int i=ZT_INIT_ARMNEON_CRYPTO_NK;i<(ZT_INIT_ARMNEON_CRYPTO_NB * (ZT_INIT_ARMNEON_CRYPTO_NR + 1));++i) {
uint32_t t = w[i - 1];
const unsigned int imod = i & (ZT_INIT_ARMNEON_CRYPTO_NK - 1);
if (imod == 0) {
t = ZT_INIT_ARMNEON_CRYPTO_SUBWORD(ZT_INIT_ARMNEON_CRYPTO_ROTWORD(t)) ^ rcon[(i - 1) / ZT_INIT_ARMNEON_CRYPTO_NK];
} else if (imod == 4) {
t = ZT_INIT_ARMNEON_CRYPTO_SUBWORD(t);
}
w[i] = w[i - ZT_INIT_ARMNEON_CRYPTO_NK] ^ t;
}
for (unsigned int i = ZT_INIT_ARMNEON_CRYPTO_NK; i < (ZT_INIT_ARMNEON_CRYPTO_NB * (ZT_INIT_ARMNEON_CRYPTO_NR + 1));
++i) {
uint32_t t = w[i - 1];
const unsigned int imod = i & (ZT_INIT_ARMNEON_CRYPTO_NK - 1);
if (imod == 0) {
t = ZT_INIT_ARMNEON_CRYPTO_SUBWORD(ZT_INIT_ARMNEON_CRYPTO_ROTWORD(t))
^ rcon[(i - 1) / ZT_INIT_ARMNEON_CRYPTO_NK];
}
else if (imod == 4) {
t = ZT_INIT_ARMNEON_CRYPTO_SUBWORD(t);
}
w[i] = w[i - ZT_INIT_ARMNEON_CRYPTO_NK] ^ t;
}
for (unsigned int i=0;i<(ZT_INIT_ARMNEON_CRYPTO_NB * (ZT_INIT_ARMNEON_CRYPTO_NR + 1));++i)
w[i] = Utils::hton(w[i]);
for (unsigned int i = 0; i < (ZT_INIT_ARMNEON_CRYPTO_NB * (ZT_INIT_ARMNEON_CRYPTO_NR + 1)); ++i)
w[i] = Utils::hton(w[i]);
p_k.neon.dk[0] = p_k.neon.ek[14];
for (int i=1;i<14;++i)
p_k.neon.dk[i] = vaesimcq_u8(p_k.neon.ek[14 - i]);
p_k.neon.dk[14] = p_k.neon.ek[0];
p_k.neon.dk[0] = p_k.neon.ek[14];
for (int i = 1; i < 14; ++i)
p_k.neon.dk[i] = vaesimcq_u8(p_k.neon.ek[14 - i]);
p_k.neon.dk[14] = p_k.neon.ek[0];
p_encrypt_armneon_crypto(Utils::ZERO256, h);
Utils::copy<16>(&(p_k.neon.h), h);
p_k.neon.h = vrbitq_u8(p_k.neon.h);
p_k.sw.h[0] = Utils::ntoh(h[0]);
p_k.sw.h[1] = Utils::ntoh(h[1]);
p_encrypt_armneon_crypto(Utils::ZERO256, h);
Utils::copy<16>(&(p_k.neon.h), h);
p_k.neon.h = vrbitq_u8(p_k.neon.h);
p_k.sw.h[0] = Utils::ntoh(h[0]);
p_k.sw.h[1] = Utils::ntoh(h[1]);
}
void AES::p_encrypt_armneon_crypto(const void *const in, void *const out) const noexcept
void AES::p_encrypt_armneon_crypto(const void* const in, void* const out) const noexcept
{
uint8x16_t tmp = vld1q_u8(reinterpret_cast<const uint8_t *>(in));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[0]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[1]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[2]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[3]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[4]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[5]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[6]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[7]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[8]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[9]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[10]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[11]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[12]));
tmp = veorq_u8(vaeseq_u8(tmp, p_k.neon.ek[13]), p_k.neon.ek[14]);
vst1q_u8(reinterpret_cast<uint8_t *>(out), tmp);
uint8x16_t tmp = vld1q_u8(reinterpret_cast<const uint8_t*>(in));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[0]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[1]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[2]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[3]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[4]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[5]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[6]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[7]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[8]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[9]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[10]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[11]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[12]));
tmp = veorq_u8(vaeseq_u8(tmp, p_k.neon.ek[13]), p_k.neon.ek[14]);
vst1q_u8(reinterpret_cast<uint8_t*>(out), tmp);
}
void AES::p_decrypt_armneon_crypto(const void *const in, void *const out) const noexcept
void AES::p_decrypt_armneon_crypto(const void* const in, void* const out) const noexcept
{
uint8x16_t tmp = vld1q_u8(reinterpret_cast<const uint8_t *>(in));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[0]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[1]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[2]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[3]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[4]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[5]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[6]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[7]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[8]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[9]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[10]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[11]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[12]));
tmp = veorq_u8(vaesdq_u8(tmp, p_k.neon.dk[13]), p_k.neon.dk[14]);
vst1q_u8(reinterpret_cast<uint8_t *>(out), tmp);
uint8x16_t tmp = vld1q_u8(reinterpret_cast<const uint8_t*>(in));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[0]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[1]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[2]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[3]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[4]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[5]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[6]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[7]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[8]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[9]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[10]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[11]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[12]));
tmp = veorq_u8(vaesdq_u8(tmp, p_k.neon.dk[13]), p_k.neon.dk[14]);
vst1q_u8(reinterpret_cast<uint8_t*>(out), tmp);
}
} // namespace ZeroTier
} // namespace ZeroTier
#endif // ZT_AES_NEON
#endif // ZT_AES_NEON

View file

@ -15,9 +15,9 @@
#define ZT_ADDRESS_HPP
#include "Constants.hpp"
#include "Utils.hpp"
#include "TriviallyCopyable.hpp"
#include "Containers.hpp"
#include "TriviallyCopyable.hpp"
#include "Utils.hpp"
#define ZT_ADDRESS_STRING_SIZE_MAX (ZT_ADDRESS_LENGTH_HEX + 1)
@ -28,136 +28,192 @@ namespace ZeroTier {
*
* This is merely a 40-bit short address packed into a uint64_t and wrapped with methods.
*/
class Address : public TriviallyCopyable
{
public:
ZT_INLINE Address() noexcept: _a(0)
{}
class Address : public TriviallyCopyable {
public:
ZT_INLINE Address() noexcept : _a(0)
{
}
ZT_INLINE Address(const uint64_t a) noexcept: _a(a)
{}
ZT_INLINE Address(const uint64_t a) noexcept : _a(a)
{
}
explicit ZT_INLINE Address(const uint8_t b[5]) noexcept:
_a(((uint64_t)b[0] << 32U) | ((uint64_t)b[1] << 24U) | ((uint64_t)b[2] << 16U) | ((uint64_t)b[3] << 8U) | (uint64_t)b[4])
{}
explicit ZT_INLINE Address(const uint8_t b[5]) noexcept
: _a(((uint64_t)b[0] << 32U) | ((uint64_t)b[1] << 24U) | ((uint64_t)b[2] << 16U) | ((uint64_t)b[3] << 8U)
| (uint64_t)b[4])
{
}
ZT_INLINE Address &operator=(const uint64_t a) noexcept
{ _a = a; return *this; }
ZT_INLINE Address& operator=(const uint64_t a) noexcept
{
_a = a;
return *this;
}
/**
* @param bits Raw address -- 5 bytes, big-endian byte order
* @param len Length of array
*/
ZT_INLINE void setTo(const uint8_t b[5]) noexcept
{ _a = ((uint64_t)b[0] << 32U) | ((uint64_t)b[1] << 24U) | ((uint64_t)b[2] << 16U) | ((uint64_t)b[3] << 8U) | (uint64_t)b[4]; }
/**
* @param bits Raw address -- 5 bytes, big-endian byte order
* @param len Length of array
*/
ZT_INLINE void setTo(const uint8_t b[5]) noexcept
{
_a = ((uint64_t)b[0] << 32U) | ((uint64_t)b[1] << 24U) | ((uint64_t)b[2] << 16U) | ((uint64_t)b[3] << 8U)
| (uint64_t)b[4];
}
/**
* @param bits Buffer to hold 5-byte address in big-endian byte order
* @param len Length of array
*/
ZT_INLINE void copyTo(uint8_t b[5]) const noexcept
{
const uint64_t a = _a;
b[0] = (uint8_t)(a >> 32U);
b[1] = (uint8_t)(a >> 24U);
b[2] = (uint8_t)(a >> 16U);
b[3] = (uint8_t)(a >> 8U);
b[4] = (uint8_t)a;
}
/**
* @param bits Buffer to hold 5-byte address in big-endian byte order
* @param len Length of array
*/
ZT_INLINE void copyTo(uint8_t b[5]) const noexcept
{
const uint64_t a = _a;
b[0] = (uint8_t)(a >> 32U);
b[1] = (uint8_t)(a >> 24U);
b[2] = (uint8_t)(a >> 16U);
b[3] = (uint8_t)(a >> 8U);
b[4] = (uint8_t)a;
}
/**
* @return Integer containing address (0 to 2^40)
*/
ZT_INLINE uint64_t toInt() const noexcept
{ return _a; }
/**
* @return Integer containing address (0 to 2^40)
*/
ZT_INLINE uint64_t toInt() const noexcept
{
return _a;
}
/**
* Set address to zero/NIL
*/
ZT_INLINE void zero() noexcept
{ _a = 0; }
/**
* Set address to zero/NIL
*/
ZT_INLINE void zero() noexcept
{
_a = 0;
}
/**
* @param s String with at least 11 characters of space available (10 + terminating NULL)
* @return Hexadecimal string
*/
ZT_INLINE char *toString(char s[ZT_ADDRESS_STRING_SIZE_MAX]) const noexcept
{
const uint64_t a = _a;
const unsigned int m = 0xf;
s[0] = Utils::HEXCHARS[(unsigned int)(a >> 36U) & m];
s[1] = Utils::HEXCHARS[(unsigned int)(a >> 32U) & m];
s[2] = Utils::HEXCHARS[(unsigned int)(a >> 28U) & m];
s[3] = Utils::HEXCHARS[(unsigned int)(a >> 24U) & m];
s[4] = Utils::HEXCHARS[(unsigned int)(a >> 20U) & m];
s[5] = Utils::HEXCHARS[(unsigned int)(a >> 16U) & m];
s[6] = Utils::HEXCHARS[(unsigned int)(a >> 12U) & m];
s[7] = Utils::HEXCHARS[(unsigned int)(a >> 8U) & m];
s[8] = Utils::HEXCHARS[(unsigned int)(a >> 4U) & m];
s[9] = Utils::HEXCHARS[(unsigned int)a & m];
s[10] = 0;
return s;
}
/**
* @param s String with at least 11 characters of space available (10 + terminating NULL)
* @return Hexadecimal string
*/
ZT_INLINE char* toString(char s[ZT_ADDRESS_STRING_SIZE_MAX]) const noexcept
{
const uint64_t a = _a;
const unsigned int m = 0xf;
s[0] = Utils::HEXCHARS[(unsigned int)(a >> 36U) & m];
s[1] = Utils::HEXCHARS[(unsigned int)(a >> 32U) & m];
s[2] = Utils::HEXCHARS[(unsigned int)(a >> 28U) & m];
s[3] = Utils::HEXCHARS[(unsigned int)(a >> 24U) & m];
s[4] = Utils::HEXCHARS[(unsigned int)(a >> 20U) & m];
s[5] = Utils::HEXCHARS[(unsigned int)(a >> 16U) & m];
s[6] = Utils::HEXCHARS[(unsigned int)(a >> 12U) & m];
s[7] = Utils::HEXCHARS[(unsigned int)(a >> 8U) & m];
s[8] = Utils::HEXCHARS[(unsigned int)(a >> 4U) & m];
s[9] = Utils::HEXCHARS[(unsigned int)a & m];
s[10] = 0;
return s;
}
ZT_INLINE String toString() const
{
char s[ZT_ADDRESS_STRING_SIZE_MAX];
toString(s);
return String(s);
}
ZT_INLINE String toString() const
{
char s[ZT_ADDRESS_STRING_SIZE_MAX];
toString(s);
return String(s);
}
/**
* Check if this address is reserved
*
* The all-zero null address and any address beginning with 0xff are
* reserved. (0xff is reserved for future use to designate possibly
* longer addresses, addresses based on IPv6 innards, etc.)
*
* @return True if address is reserved and may not be used
*/
ZT_INLINE bool isReserved() const noexcept
{ return ((!_a) || ((_a >> 32U) == ZT_ADDRESS_RESERVED_PREFIX)); }
/**
* Check if this address is reserved
*
* The all-zero null address and any address beginning with 0xff are
* reserved. (0xff is reserved for future use to designate possibly
* longer addresses, addresses based on IPv6 innards, etc.)
*
* @return True if address is reserved and may not be used
*/
ZT_INLINE bool isReserved() const noexcept
{
return ((! _a) || ((_a >> 32U) == ZT_ADDRESS_RESERVED_PREFIX));
}
ZT_INLINE unsigned long hashCode() const noexcept
{ return (unsigned long)_a; }
ZT_INLINE unsigned long hashCode() const noexcept
{
return (unsigned long)_a;
}
ZT_INLINE operator bool() const noexcept
{ return (_a != 0); }
ZT_INLINE operator bool() const noexcept
{
return (_a != 0);
}
ZT_INLINE operator uint64_t() const noexcept
{ return _a; }
ZT_INLINE operator uint64_t() const noexcept
{
return _a;
}
ZT_INLINE bool operator==(const Address &a) const noexcept
{ return _a == a._a; }
ZT_INLINE bool operator!=(const Address &a) const noexcept
{ return _a != a._a; }
ZT_INLINE bool operator>(const Address &a) const noexcept
{ return _a > a._a; }
ZT_INLINE bool operator<(const Address &a) const noexcept
{ return _a < a._a; }
ZT_INLINE bool operator>=(const Address &a) const noexcept
{ return _a >= a._a; }
ZT_INLINE bool operator<=(const Address &a) const noexcept
{ return _a <= a._a; }
ZT_INLINE bool operator==(const uint64_t a) const noexcept
{ return _a == a; }
ZT_INLINE bool operator!=(const uint64_t a) const noexcept
{ return _a != a; }
ZT_INLINE bool operator>(const uint64_t a) const noexcept
{ return _a > a; }
ZT_INLINE bool operator<(const uint64_t a) const noexcept
{ return _a < a; }
ZT_INLINE bool operator>=(const uint64_t a) const noexcept
{ return _a >= a; }
ZT_INLINE bool operator<=(const uint64_t a) const noexcept
{ return _a <= a; }
ZT_INLINE bool operator==(const Address& a) const noexcept
{
return _a == a._a;
}
private:
uint64_t _a;
ZT_INLINE bool operator!=(const Address& a) const noexcept
{
return _a != a._a;
}
ZT_INLINE bool operator>(const Address& a) const noexcept
{
return _a > a._a;
}
ZT_INLINE bool operator<(const Address& a) const noexcept
{
return _a < a._a;
}
ZT_INLINE bool operator>=(const Address& a) const noexcept
{
return _a >= a._a;
}
ZT_INLINE bool operator<=(const Address& a) const noexcept
{
return _a <= a._a;
}
ZT_INLINE bool operator==(const uint64_t a) const noexcept
{
return _a == a;
}
ZT_INLINE bool operator!=(const uint64_t a) const noexcept
{
return _a != a;
}
ZT_INLINE bool operator>(const uint64_t a) const noexcept
{
return _a > a;
}
ZT_INLINE bool operator<(const uint64_t a) const noexcept
{
return _a < a;
}
ZT_INLINE bool operator>=(const uint64_t a) const noexcept
{
return _a >= a;
}
ZT_INLINE bool operator<=(const uint64_t a) const noexcept
{
return _a <= a;
}
private:
uint64_t _a;
};
static_assert(sizeof(Address) == sizeof(uint64_t),"Address has unnecessary extra padding");
static_assert(sizeof(Address) == sizeof(uint64_t), "Address has unnecessary extra padding");
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -12,89 +12,94 @@
/****/
#include "Buf.hpp"
#include "Spinlock.hpp"
namespace ZeroTier {
static std::atomic< uintptr_t > s_pool(0);
static std::atomic< long > s_allocated(0);
static std::atomic<uintptr_t> s_pool(0);
static std::atomic<long> s_allocated(0);
// uintptr_max can never be a valid pointer, so use it to indicate that s_pool is locked (very short duration spinlock)
#define ZT_ATOMIC_PTR_LOCKED (~((uintptr_t)0))
void *Buf::operator new(std::size_t sz)
void* Buf::operator new(std::size_t sz)
{
uintptr_t bb;
for (;;) {
bb = s_pool.exchange(ZT_ATOMIC_PTR_LOCKED, std::memory_order_acquire);
uintptr_t bb;
for (;;) {
bb = s_pool.exchange(ZT_ATOMIC_PTR_LOCKED, std::memory_order_acquire);
if (likely(bb != ZT_ATOMIC_PTR_LOCKED)) {
Buf *b;
if (likely(bb != 0)) {
b = reinterpret_cast<Buf *>(bb);
s_pool.store(b->__nextInPool, std::memory_order_release);
} else {
s_pool.store(0, std::memory_order_release);
b = reinterpret_cast<Buf *>(malloc(sz));
if (!b)
throw Utils::BadAllocException;
s_allocated.fetch_add(1, std::memory_order_relaxed);
}
if (likely(bb != ZT_ATOMIC_PTR_LOCKED)) {
Buf* b;
if (likely(bb != 0)) {
b = reinterpret_cast<Buf*>(bb);
s_pool.store(b->__nextInPool, std::memory_order_release);
}
else {
s_pool.store(0, std::memory_order_release);
b = reinterpret_cast<Buf*>(malloc(sz));
if (! b)
throw Utils::BadAllocException;
s_allocated.fetch_add(1, std::memory_order_relaxed);
}
b->__refCount.store(0, std::memory_order_relaxed);
b->__refCount.store(0, std::memory_order_relaxed);
return reinterpret_cast<void *>(b);
}
return reinterpret_cast<void*>(b);
}
Spinlock::pause();
}
Spinlock::pause();
}
}
void Buf::operator delete(void *ptr)
void Buf::operator delete(void* ptr)
{
if (likely(ptr != nullptr)) {
if (s_allocated.load(std::memory_order_relaxed) > ZT_BUF_MAX_POOL_SIZE) {
s_allocated.fetch_sub(1, std::memory_order_relaxed);
free(ptr);
} else {
uintptr_t bb;
for (;;) {
bb = s_pool.exchange(ZT_ATOMIC_PTR_LOCKED, std::memory_order_acquire);
if (likely(bb != ZT_ATOMIC_PTR_LOCKED)) {
reinterpret_cast<Buf *>(ptr)->__nextInPool = bb;
s_pool.store(reinterpret_cast<uintptr_t>(ptr), std::memory_order_release);
return;
}
Spinlock::pause();
}
}
}
if (likely(ptr != nullptr)) {
if (s_allocated.load(std::memory_order_relaxed) > ZT_BUF_MAX_POOL_SIZE) {
s_allocated.fetch_sub(1, std::memory_order_relaxed);
free(ptr);
}
else {
uintptr_t bb;
for (;;) {
bb = s_pool.exchange(ZT_ATOMIC_PTR_LOCKED, std::memory_order_acquire);
if (likely(bb != ZT_ATOMIC_PTR_LOCKED)) {
reinterpret_cast<Buf*>(ptr)->__nextInPool = bb;
s_pool.store(reinterpret_cast<uintptr_t>(ptr), std::memory_order_release);
return;
}
Spinlock::pause();
}
}
}
}
void Buf::freePool() noexcept
{
uintptr_t bb;
for (;;) {
bb = s_pool.exchange(ZT_ATOMIC_PTR_LOCKED, std::memory_order_acquire);
uintptr_t bb;
for (;;) {
bb = s_pool.exchange(ZT_ATOMIC_PTR_LOCKED, std::memory_order_acquire);
if (likely(bb != ZT_ATOMIC_PTR_LOCKED)) {
s_pool.store(0, std::memory_order_release);
if (likely(bb != ZT_ATOMIC_PTR_LOCKED)) {
s_pool.store(0, std::memory_order_release);
while (bb != 0) {
const uintptr_t next = reinterpret_cast<Buf *>(bb)->__nextInPool;
s_allocated.fetch_sub(1, std::memory_order_relaxed);
free(reinterpret_cast<void *>(bb));
bb = next;
}
while (bb != 0) {
const uintptr_t next = reinterpret_cast<Buf*>(bb)->__nextInPool;
s_allocated.fetch_sub(1, std::memory_order_relaxed);
free(reinterpret_cast<void*>(bb));
bb = next;
}
return;
}
return;
}
Spinlock::pause();
}
Spinlock::pause();
}
}
long Buf::poolAllocated() noexcept
{ return s_allocated.load(std::memory_order_relaxed); }
{
return s_allocated.load(std::memory_order_relaxed);
}
} // namespace ZeroTier
} // namespace ZeroTier

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -25,107 +25,125 @@
namespace ZeroTier {
#define ZT_C25519_ECDH_PUBLIC_KEY_SIZE 32
#define ZT_C25519_ECDH_PRIVATE_KEY_SIZE 32
#define ZT_C25519_COMBINED_PUBLIC_KEY_SIZE 64
#define ZT_C25519_ECDH_PUBLIC_KEY_SIZE 32
#define ZT_C25519_ECDH_PRIVATE_KEY_SIZE 32
#define ZT_C25519_COMBINED_PUBLIC_KEY_SIZE 64
#define ZT_C25519_COMBINED_PRIVATE_KEY_SIZE 64
#define ZT_C25519_SIGNATURE_LEN 96
#define ZT_C25519_ECDH_SHARED_SECRET_SIZE 32
#define ZT_C25519_SIGNATURE_LEN 96
#define ZT_C25519_ECDH_SHARED_SECRET_SIZE 32
/**
* A combined Curve25519 ECDH and Ed25519 signature engine
*/
class C25519
{
public:
/**
* Generate a set of two 25519 keys: a C25519 ECDH key pair and an Ed25519 EDDSA key pair.
*/
static void generateCombined(uint8_t pub[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE],uint8_t priv[ZT_C25519_COMBINED_PRIVATE_KEY_SIZE]);
class C25519 {
public:
/**
* Generate a set of two 25519 keys: a C25519 ECDH key pair and an Ed25519 EDDSA key pair.
*/
static void generateCombined(
uint8_t pub[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE],
uint8_t priv[ZT_C25519_COMBINED_PRIVATE_KEY_SIZE]);
/**
* Generate a C25519 ECDH key pair only.
*/
static void generateC25519(uint8_t pub[ZT_C25519_ECDH_PUBLIC_KEY_SIZE],uint8_t priv[ZT_C25519_ECDH_PRIVATE_KEY_SIZE]);
/**
* Generate a C25519 ECDH key pair only.
*/
static void
generateC25519(uint8_t pub[ZT_C25519_ECDH_PUBLIC_KEY_SIZE], uint8_t priv[ZT_C25519_ECDH_PRIVATE_KEY_SIZE]);
/**
* Generate a key pair satisfying a condition
*
* This begins with a random keypair from a random secret key and then
* iteratively increments the random secret until cond(kp) returns true.
* This is used to compute key pairs in which the public key, its hash
* or some other aspect of it satisfies some condition, such as for a
* hashcash criteria.
*
* @param cond Condition function or function object
* @return Key pair where cond(kp) returns true
* @tparam F Type of 'cond'
*/
template<typename F>
static ZT_INLINE void generateSatisfying(F cond,uint8_t pub[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE],uint8_t priv[ZT_C25519_COMBINED_PRIVATE_KEY_SIZE])
{
Utils::getSecureRandom(priv,ZT_C25519_COMBINED_PRIVATE_KEY_SIZE);
s_calcPubED(pub, priv); // do Ed25519 key -- bytes 32-63 of pub and priv
do {
++(((uint64_t *)priv)[1]);
--(((uint64_t *)priv)[2]);
s_calcPubDH(pub, priv); // keep regenerating bytes 0-31 until satisfied
} while (!cond(pub));
}
/**
* Generate a key pair satisfying a condition
*
* This begins with a random keypair from a random secret key and then
* iteratively increments the random secret until cond(kp) returns true.
* This is used to compute key pairs in which the public key, its hash
* or some other aspect of it satisfies some condition, such as for a
* hashcash criteria.
*
* @param cond Condition function or function object
* @return Key pair where cond(kp) returns true
* @tparam F Type of 'cond'
*/
template <typename F>
static ZT_INLINE void generateSatisfying(
F cond,
uint8_t pub[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE],
uint8_t priv[ZT_C25519_COMBINED_PRIVATE_KEY_SIZE])
{
Utils::getSecureRandom(priv, ZT_C25519_COMBINED_PRIVATE_KEY_SIZE);
s_calcPubED(pub, priv); // do Ed25519 key -- bytes 32-63 of pub and priv
do {
++(((uint64_t*)priv)[1]);
--(((uint64_t*)priv)[2]);
s_calcPubDH(pub, priv); // keep regenerating bytes 0-31 until satisfied
} while (! cond(pub));
}
/**
* Perform C25519 ECC key agreement
*
* Actual key bytes are generated from one or more SHA-512 digests of
* the raw result of key agreement.
*
* @param mine My private key
* @param their Their public key
* @param rawkey Buffer to receive raw (not hashed) agreed upon key
*/
static void agree(const uint8_t mine[ZT_C25519_ECDH_PRIVATE_KEY_SIZE],const uint8_t their[ZT_C25519_ECDH_PUBLIC_KEY_SIZE],uint8_t rawkey[ZT_C25519_ECDH_SHARED_SECRET_SIZE]);
/**
* Perform C25519 ECC key agreement
*
* Actual key bytes are generated from one or more SHA-512 digests of
* the raw result of key agreement.
*
* @param mine My private key
* @param their Their public key
* @param rawkey Buffer to receive raw (not hashed) agreed upon key
*/
static void agree(
const uint8_t mine[ZT_C25519_ECDH_PRIVATE_KEY_SIZE],
const uint8_t their[ZT_C25519_ECDH_PUBLIC_KEY_SIZE],
uint8_t rawkey[ZT_C25519_ECDH_SHARED_SECRET_SIZE]);
/**
* Sign a message with a sender's key pair
*
* LEGACY: ZeroTier's ed25519 signatures contain an extra 32 bytes which are the first
* 32 bytes of SHA512(msg). These exist because an early version of the ZeroTier multicast
* algorithm did a lot of signature verification and we wanted a way to skip the more
* expensive ed25519 verification if the signature was obviously wrong.
*
* This verify() function will accept a 64 or 96 bit signature, checking the last 32
* bytes only if present.
*
* @param myPrivate My private key
* @param myPublic My public key
* @param msg Message to sign
* @param len Length of message in bytes
* @param signature Buffer to fill with signature -- MUST be 96 bytes in length
*/
static void sign(const uint8_t myPrivate[ZT_C25519_COMBINED_PRIVATE_KEY_SIZE],const uint8_t myPublic[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE],const void *msg,unsigned int len,void *signature);
/**
* Sign a message with a sender's key pair
*
* LEGACY: ZeroTier's ed25519 signatures contain an extra 32 bytes which are the first
* 32 bytes of SHA512(msg). These exist because an early version of the ZeroTier multicast
* algorithm did a lot of signature verification and we wanted a way to skip the more
* expensive ed25519 verification if the signature was obviously wrong.
*
* This verify() function will accept a 64 or 96 bit signature, checking the last 32
* bytes only if present.
*
* @param myPrivate My private key
* @param myPublic My public key
* @param msg Message to sign
* @param len Length of message in bytes
* @param signature Buffer to fill with signature -- MUST be 96 bytes in length
*/
static void sign(
const uint8_t myPrivate[ZT_C25519_COMBINED_PRIVATE_KEY_SIZE],
const uint8_t myPublic[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE],
const void* msg,
unsigned int len,
void* signature);
/**
* Verify a message's signature
*
* @param their Public key to verify against
* @param msg Message to verify signature integrity against
* @param len Length of message in bytes
* @param signature Signature bytes
* @param siglen Length of signature in bytes
* @return True if signature is valid and the message is authentic and unmodified
*/
static bool verify(const uint8_t their[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE],const void *msg,unsigned int len,const void *signature,unsigned int siglen);
/**
* Verify a message's signature
*
* @param their Public key to verify against
* @param msg Message to verify signature integrity against
* @param len Length of message in bytes
* @param signature Signature bytes
* @param siglen Length of signature in bytes
* @return True if signature is valid and the message is authentic and unmodified
*/
static bool verify(
const uint8_t their[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE],
const void* msg,
unsigned int len,
const void* signature,
unsigned int siglen);
private:
// derive first 32 bytes of kp.pub from first 32 bytes of kp.priv
// this is the ECDH key
static void s_calcPubDH(uint8_t *pub, const uint8_t *priv);
private:
// derive first 32 bytes of kp.pub from first 32 bytes of kp.priv
// this is the ECDH key
static void s_calcPubDH(uint8_t* pub, const uint8_t* priv);
// derive 2nd 32 bytes of kp.pub from 2nd 32 bytes of kp.priv
// this is the Ed25519 sign/verify key
static void s_calcPubED(uint8_t *pub, const uint8_t *priv);
// derive 2nd 32 bytes of kp.pub from 2nd 32 bytes of kp.priv
// this is the Ed25519 sign/verify key
static void s_calcPubED(uint8_t* pub, const uint8_t* priv);
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
cmake_minimum_required (VERSION 3.0)
cmake_minimum_required(VERSION 3.0)
project(zt_core)
configure_file(
@ -61,7 +61,7 @@ set(core_headers
Utils.hpp
VL1.hpp
VL2.hpp
)
)
set(core_src
AES.cpp
@ -102,22 +102,22 @@ set(core_src
Utils.cpp
VL1.cpp
VL2.cpp
)
)
add_library(${PROJECT_NAME} STATIC ${core_src} ${core_headers})
target_include_directories(${PROJECT_NAME} PRIVATE ${CMAKE_BINARY_DIR})
if(WIN32)
if (WIN32)
set(libs ${libs} wsock32 ws2_32 rpcrt4 iphlpapi)
target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_17)
else(WIN32)
else (WIN32)
set(libs ${libs} pthread)
if (APPLE)
target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_17)
else(APPLE)
else (APPLE)
target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_11)
endif(APPLE)
endif(WIN32)
endif (APPLE)
endif (WIN32)
add_executable(zt_core_tests Tests.h Tests.cpp)
target_compile_definitions(zt_core_tests PRIVATE ZT_ENABLE_TESTS=1 ZT_STANDALONE_TESTS=1)

View file

@ -25,40 +25,37 @@ namespace ZeroTier {
* graph around from function to function as needed. It's cleaner and probably
* faster than passing clock, ticks, and tPtr around everywhere.
*/
class CallContext
{
public:
ZT_INLINE CallContext(const int64_t c, const int64_t t, void *const p) :
clock(c),
ticks(t),
tPtr(p)
{}
class CallContext {
public:
ZT_INLINE CallContext(const int64_t c, const int64_t t, void* const p) : clock(c), ticks(t), tPtr(p)
{
}
/**
* Real world time in milliseconds since Unix epoch or -1 if unknown.
*
* This is used for things like checking certificate expiration. If it's
* not known then the value may be inferred from peers/roots or some
* features may be disabled.
*/
const int64_t clock;
/**
* Real world time in milliseconds since Unix epoch or -1 if unknown.
*
* This is used for things like checking certificate expiration. If it's
* not known then the value may be inferred from peers/roots or some
* features may be disabled.
*/
const int64_t clock;
/**
* Monotonic process or system clock in milliseconds since an arbitrary point.
*
* This is never -1 or undefined and is used for most timings.
*/
const int64_t ticks;
/**
* Monotonic process or system clock in milliseconds since an arbitrary point.
*
* This is never -1 or undefined and is used for most timings.
*/
const int64_t ticks;
/**
* An arbitrary pointer users pass into calls that follows the call chain
*
* By passing this back to callbacks state can be kept by the caller using
* a mechanism that is faster (on most platforms) than thread-local storage.
*/
void *const tPtr;
/**
* An arbitrary pointer users pass into calls that follows the call chain
*
* By passing this back to callbacks state can be kept by the caller using
* a mechanism that is faster (on most platforms) than thread-local storage.
*/
void* const tPtr;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -12,421 +12,451 @@
/****/
#include "CapabilityCredential.hpp"
#include "Utils.hpp"
#include "Constants.hpp"
#include "MAC.hpp"
#include "Utils.hpp"
namespace ZeroTier {
CapabilityCredential::CapabilityCredential(
const uint32_t id,
const uint64_t nwid,
const int64_t timestamp,
const ZT_VirtualNetworkRule *const rules,
const unsigned int ruleCount) noexcept:
m_nwid(nwid),
m_timestamp(timestamp),
m_id(id),
m_ruleCount((ruleCount < ZT_MAX_CAPABILITY_RULES) ? ruleCount : ZT_MAX_CAPABILITY_RULES),
m_signatureLength(0)
const uint32_t id,
const uint64_t nwid,
const int64_t timestamp,
const ZT_VirtualNetworkRule* const rules,
const unsigned int ruleCount) noexcept
: m_nwid(nwid)
, m_timestamp(timestamp)
, m_id(id)
, m_ruleCount((ruleCount < ZT_MAX_CAPABILITY_RULES) ? ruleCount : ZT_MAX_CAPABILITY_RULES)
, m_signatureLength(0)
{
Utils::zero< sizeof(m_rules) >(m_rules);
if (m_ruleCount > 0)
Utils::copy(m_rules, rules, sizeof(ZT_VirtualNetworkRule) * m_ruleCount);
Utils::zero< sizeof(m_signature) >(m_signature);
Utils::zero<sizeof(m_rules)>(m_rules);
if (m_ruleCount > 0)
Utils::copy(m_rules, rules, sizeof(ZT_VirtualNetworkRule) * m_ruleCount);
Utils::zero<sizeof(m_signature)>(m_signature);
}
bool CapabilityCredential::sign(const Identity &from, const Address &to) noexcept
bool CapabilityCredential::sign(const Identity& from, const Address& to) noexcept
{
uint8_t buf[ZT_CAPABILITY_MARSHAL_SIZE_MAX + 16];
m_issuedTo = to;
m_signedBy = from.address();
m_signatureLength = from.sign(buf, (unsigned int) marshal(buf, true), m_signature, sizeof(m_signature));
return m_signatureLength > 0;
uint8_t buf[ZT_CAPABILITY_MARSHAL_SIZE_MAX + 16];
m_issuedTo = to;
m_signedBy = from.address();
m_signatureLength = from.sign(buf, (unsigned int)marshal(buf, true), m_signature, sizeof(m_signature));
return m_signatureLength > 0;
}
int CapabilityCredential::marshal(uint8_t data[ZT_CAPABILITY_MARSHAL_SIZE_MAX], const bool forSign) const noexcept
{
int p = 0;
int p = 0;
if (forSign) {
for (int k = 0;k < 8;++k)
data[p++] = 0x7f;
}
if (forSign) {
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
Utils::storeBigEndian<uint64_t>(data + p, m_nwid);
Utils::storeBigEndian<uint64_t>(data + p + 8, (uint64_t) m_timestamp);
Utils::storeBigEndian<uint32_t>(data + p + 16, m_id);
p += 20;
Utils::storeBigEndian<uint64_t>(data + p, m_nwid);
Utils::storeBigEndian<uint64_t>(data + p + 8, (uint64_t)m_timestamp);
Utils::storeBigEndian<uint32_t>(data + p + 16, m_id);
p += 20;
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t) m_ruleCount);
p += 2;
p += CapabilityCredential::marshalVirtualNetworkRules(data + p, m_rules, m_ruleCount);
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_ruleCount);
p += 2;
p += CapabilityCredential::marshalVirtualNetworkRules(data + p, m_rules, m_ruleCount);
// LEGACY: older versions supported multiple records with this being a maximum custody
// chain length. This is deprecated so set the max chain length to one.
data[p++] = (uint8_t)1;
// LEGACY: older versions supported multiple records with this being a maximum custody
// chain length. This is deprecated so set the max chain length to one.
data[p++] = (uint8_t)1;
if (!forSign) {
m_issuedTo.copyTo(data + p);
m_signedBy.copyTo(data + p + ZT_ADDRESS_LENGTH);
p += ZT_ADDRESS_LENGTH + ZT_ADDRESS_LENGTH;
data[p++] = 1; // LEGACY: old versions require a reserved byte here
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t) m_signatureLength);
p += 2;
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
if (! forSign) {
m_issuedTo.copyTo(data + p);
m_signedBy.copyTo(data + p + ZT_ADDRESS_LENGTH);
p += ZT_ADDRESS_LENGTH + ZT_ADDRESS_LENGTH;
data[p++] = 1; // LEGACY: old versions require a reserved byte here
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_signatureLength);
p += 2;
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
// LEGACY: older versions supported more than one record terminated by a zero address.
for (int k = 0;k < ZT_ADDRESS_LENGTH;++k)
data[p++] = 0;
}
// LEGACY: older versions supported more than one record terminated by a zero address.
for (int k = 0; k < ZT_ADDRESS_LENGTH; ++k)
data[p++] = 0;
}
data[p++] = 0;
data[p++] = 0; // uint16_t size of additional fields, currently 0
data[p++] = 0;
data[p++] = 0; // uint16_t size of additional fields, currently 0
if (forSign) {
for (int k = 0;k < 8;++k)
data[p++] = 0x7f;
}
if (forSign) {
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
return p;
return p;
}
int CapabilityCredential::unmarshal(const uint8_t *data, int len) noexcept
int CapabilityCredential::unmarshal(const uint8_t* data, int len) noexcept
{
if (len < 22)
return -1;
if (len < 22)
return -1;
m_nwid = Utils::loadBigEndian<uint64_t>(data);
m_timestamp = (int64_t) Utils::loadBigEndian<uint64_t>(data + 8);
m_id = Utils::loadBigEndian<uint32_t>(data + 16);
m_nwid = Utils::loadBigEndian<uint64_t>(data);
m_timestamp = (int64_t)Utils::loadBigEndian<uint64_t>(data + 8);
m_id = Utils::loadBigEndian<uint32_t>(data + 16);
const unsigned int rc = Utils::loadBigEndian<uint16_t>(data + 20);
if (rc > ZT_MAX_CAPABILITY_RULES)
return -1;
const int rulesLen = unmarshalVirtualNetworkRules(data + 22, len - 22, m_rules, m_ruleCount, rc);
if (rulesLen < 0)
return rulesLen;
int p = 22 + rulesLen;
const unsigned int rc = Utils::loadBigEndian<uint16_t>(data + 20);
if (rc > ZT_MAX_CAPABILITY_RULES)
return -1;
const int rulesLen = unmarshalVirtualNetworkRules(data + 22, len - 22, m_rules, m_ruleCount, rc);
if (rulesLen < 0)
return rulesLen;
int p = 22 + rulesLen;
if (p >= len)
return -1;
++p; // LEGACY: skip old max record count
if (p >= len)
return -1;
++p; // LEGACY: skip old max record count
// LEGACY: since it was once supported to have multiple records, scan them all. Since
// this feature was never used, just set the signature and issued to and other related
// fields each time and we should only ever see one. If there's more than one and the
// last is not the controller, this credential will just fail validity check.
for (unsigned int i = 0;;++i) {
if ((p + ZT_ADDRESS_LENGTH) > len)
return -1;
const Address to(data + p);
p += ZT_ADDRESS_LENGTH;
// LEGACY: since it was once supported to have multiple records, scan them all. Since
// this feature was never used, just set the signature and issued to and other related
// fields each time and we should only ever see one. If there's more than one and the
// last is not the controller, this credential will just fail validity check.
for (unsigned int i = 0;; ++i) {
if ((p + ZT_ADDRESS_LENGTH) > len)
return -1;
const Address to(data + p);
p += ZT_ADDRESS_LENGTH;
if (!to)
break;
if (! to)
break;
m_issuedTo = to;
if ((p + ZT_ADDRESS_LENGTH) > len)
return -1;
m_signedBy.setTo(data + p);
p += ZT_ADDRESS_LENGTH + 1; // LEGACY: +1 to skip reserved field
m_issuedTo = to;
if ((p + ZT_ADDRESS_LENGTH) > len)
return -1;
m_signedBy.setTo(data + p);
p += ZT_ADDRESS_LENGTH + 1; // LEGACY: +1 to skip reserved field
if ((p + 2) > len)
return -1;
m_signatureLength = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
if ((m_signatureLength > sizeof(m_signature)) || ((p + (int) m_signatureLength) > len))
return -1;
Utils::copy(m_signature, data + p, m_signatureLength);
p += (int) m_signatureLength;
}
if ((p + 2) > len)
return -1;
m_signatureLength = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
if ((m_signatureLength > sizeof(m_signature)) || ((p + (int)m_signatureLength) > len))
return -1;
Utils::copy(m_signature, data + p, m_signatureLength);
p += (int)m_signatureLength;
}
if ((p + 2) > len)
return -1;
p += 2 + Utils::loadBigEndian<uint16_t>(data + p);
if ((p + 2) > len)
return -1;
p += 2 + Utils::loadBigEndian<uint16_t>(data + p);
if (p > len)
return -1;
if (p > len)
return -1;
return p;
return p;
}
int CapabilityCredential::marshalVirtualNetworkRules(uint8_t *data, const ZT_VirtualNetworkRule *const rules, const unsigned int ruleCount) noexcept
int CapabilityCredential::marshalVirtualNetworkRules(
uint8_t* data,
const ZT_VirtualNetworkRule* const rules,
const unsigned int ruleCount) noexcept
{
int p = 0;
for (unsigned int i = 0;i < ruleCount;++i) {
data[p++] = rules[i].t;
switch ((ZT_VirtualNetworkRuleType) (rules[i].t & 0x3fU)) {
default:
data[p++] = 0;
break;
case ZT_NETWORK_RULE_ACTION_TEE:
case ZT_NETWORK_RULE_ACTION_WATCH:
case ZT_NETWORK_RULE_ACTION_REDIRECT:
data[p++] = 14;
Utils::storeBigEndian<uint64_t>(data + p, rules[i].v.fwd.address);
p += 8;
Utils::storeBigEndian<uint32_t>(data + p, rules[i].v.fwd.flags);
p += 4;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.fwd.length);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_SOURCE_ZEROTIER_ADDRESS:
case ZT_NETWORK_RULE_MATCH_DEST_ZEROTIER_ADDRESS:
data[p++] = 5;
Address(rules[i].v.zt).copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
break;
case ZT_NETWORK_RULE_MATCH_VLAN_ID:
data[p++] = 2;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.vlanId);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_VLAN_PCP:
data[p++] = 1;
data[p++] = rules[i].v.vlanPcp;
break;
case ZT_NETWORK_RULE_MATCH_VLAN_DEI:
data[p++] = 1;
data[p++] = rules[i].v.vlanDei;
break;
case ZT_NETWORK_RULE_MATCH_MAC_SOURCE:
case ZT_NETWORK_RULE_MATCH_MAC_DEST:
data[p++] = 6;
MAC(rules[i].v.mac).copyTo(data + p);
p += 6;
break;
case ZT_NETWORK_RULE_MATCH_IPV4_SOURCE:
case ZT_NETWORK_RULE_MATCH_IPV4_DEST:
data[p++] = 5;
data[p++] = reinterpret_cast<const uint8_t *>(&(rules[i].v.ipv4.ip))[0];
data[p++] = reinterpret_cast<const uint8_t *>(&(rules[i].v.ipv4.ip))[1];
data[p++] = reinterpret_cast<const uint8_t *>(&(rules[i].v.ipv4.ip))[2];
data[p++] = reinterpret_cast<const uint8_t *>(&(rules[i].v.ipv4.ip))[3];
data[p++] = rules[i].v.ipv4.mask;
break;
case ZT_NETWORK_RULE_MATCH_IPV6_SOURCE:
case ZT_NETWORK_RULE_MATCH_IPV6_DEST:
data[p++] = 17;
Utils::copy<16>(data + p, rules[i].v.ipv6.ip);
p += 16;
data[p++] = rules[i].v.ipv6.mask;
break;
case ZT_NETWORK_RULE_MATCH_IP_TOS:
data[p++] = 3;
data[p++] = rules[i].v.ipTos.mask;
data[p++] = rules[i].v.ipTos.value[0];
data[p++] = rules[i].v.ipTos.value[1];
break;
case ZT_NETWORK_RULE_MATCH_IP_PROTOCOL:
data[p++] = 1;
data[p++] = rules[i].v.ipProtocol;
break;
case ZT_NETWORK_RULE_MATCH_ETHERTYPE:
data[p++] = 2;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.etherType);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_ICMP:
data[p++] = 3;
data[p++] = rules[i].v.icmp.type;
data[p++] = rules[i].v.icmp.code;
data[p++] = rules[i].v.icmp.flags;
break;
case ZT_NETWORK_RULE_MATCH_IP_SOURCE_PORT_RANGE:
case ZT_NETWORK_RULE_MATCH_IP_DEST_PORT_RANGE:
data[p++] = 4;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.port[0]);
p += 2;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.port[1]);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_CHARACTERISTICS:
data[p++] = 8;
Utils::storeBigEndian<uint64_t>(data + p, rules[i].v.characteristics);
p += 8;
break;
case ZT_NETWORK_RULE_MATCH_FRAME_SIZE_RANGE:
data[p++] = 4;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.frameSize[0]);
p += 2;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.frameSize[1]);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_RANDOM:
data[p++] = 4;
Utils::storeBigEndian<uint32_t>(data + p, rules[i].v.randomProbability);
p += 4;
break;
case ZT_NETWORK_RULE_MATCH_TAGS_DIFFERENCE:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_AND:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_OR:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_XOR:
case ZT_NETWORK_RULE_MATCH_TAGS_EQUAL:
case ZT_NETWORK_RULE_MATCH_TAG_SENDER:
case ZT_NETWORK_RULE_MATCH_TAG_RECEIVER:
data[p++] = 8;
Utils::storeBigEndian<uint32_t>(data + p, rules[i].v.tag.id);
p += 4;
Utils::storeBigEndian<uint32_t>(data + p, rules[i].v.tag.value);
p += 4;
break;
case ZT_NETWORK_RULE_MATCH_INTEGER_RANGE:
data[p++] = 19;
Utils::storeBigEndian<uint64_t>(data + p, rules[i].v.intRange.start);
p += 8;
Utils::storeBigEndian<uint64_t>(data + p, rules[i].v.intRange.start + (uint64_t) rules[i].v.intRange.end);
p += 8;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.intRange.idx);
p += 2;
data[p++] = rules[i].v.intRange.format;
break;
}
}
return p;
int p = 0;
for (unsigned int i = 0; i < ruleCount; ++i) {
data[p++] = rules[i].t;
switch ((ZT_VirtualNetworkRuleType)(rules[i].t & 0x3fU)) {
default:
data[p++] = 0;
break;
case ZT_NETWORK_RULE_ACTION_TEE:
case ZT_NETWORK_RULE_ACTION_WATCH:
case ZT_NETWORK_RULE_ACTION_REDIRECT:
data[p++] = 14;
Utils::storeBigEndian<uint64_t>(data + p, rules[i].v.fwd.address);
p += 8;
Utils::storeBigEndian<uint32_t>(data + p, rules[i].v.fwd.flags);
p += 4;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.fwd.length);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_SOURCE_ZEROTIER_ADDRESS:
case ZT_NETWORK_RULE_MATCH_DEST_ZEROTIER_ADDRESS:
data[p++] = 5;
Address(rules[i].v.zt).copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
break;
case ZT_NETWORK_RULE_MATCH_VLAN_ID:
data[p++] = 2;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.vlanId);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_VLAN_PCP:
data[p++] = 1;
data[p++] = rules[i].v.vlanPcp;
break;
case ZT_NETWORK_RULE_MATCH_VLAN_DEI:
data[p++] = 1;
data[p++] = rules[i].v.vlanDei;
break;
case ZT_NETWORK_RULE_MATCH_MAC_SOURCE:
case ZT_NETWORK_RULE_MATCH_MAC_DEST:
data[p++] = 6;
MAC(rules[i].v.mac).copyTo(data + p);
p += 6;
break;
case ZT_NETWORK_RULE_MATCH_IPV4_SOURCE:
case ZT_NETWORK_RULE_MATCH_IPV4_DEST:
data[p++] = 5;
data[p++] = reinterpret_cast<const uint8_t*>(&(rules[i].v.ipv4.ip))[0];
data[p++] = reinterpret_cast<const uint8_t*>(&(rules[i].v.ipv4.ip))[1];
data[p++] = reinterpret_cast<const uint8_t*>(&(rules[i].v.ipv4.ip))[2];
data[p++] = reinterpret_cast<const uint8_t*>(&(rules[i].v.ipv4.ip))[3];
data[p++] = rules[i].v.ipv4.mask;
break;
case ZT_NETWORK_RULE_MATCH_IPV6_SOURCE:
case ZT_NETWORK_RULE_MATCH_IPV6_DEST:
data[p++] = 17;
Utils::copy<16>(data + p, rules[i].v.ipv6.ip);
p += 16;
data[p++] = rules[i].v.ipv6.mask;
break;
case ZT_NETWORK_RULE_MATCH_IP_TOS:
data[p++] = 3;
data[p++] = rules[i].v.ipTos.mask;
data[p++] = rules[i].v.ipTos.value[0];
data[p++] = rules[i].v.ipTos.value[1];
break;
case ZT_NETWORK_RULE_MATCH_IP_PROTOCOL:
data[p++] = 1;
data[p++] = rules[i].v.ipProtocol;
break;
case ZT_NETWORK_RULE_MATCH_ETHERTYPE:
data[p++] = 2;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.etherType);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_ICMP:
data[p++] = 3;
data[p++] = rules[i].v.icmp.type;
data[p++] = rules[i].v.icmp.code;
data[p++] = rules[i].v.icmp.flags;
break;
case ZT_NETWORK_RULE_MATCH_IP_SOURCE_PORT_RANGE:
case ZT_NETWORK_RULE_MATCH_IP_DEST_PORT_RANGE:
data[p++] = 4;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.port[0]);
p += 2;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.port[1]);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_CHARACTERISTICS:
data[p++] = 8;
Utils::storeBigEndian<uint64_t>(data + p, rules[i].v.characteristics);
p += 8;
break;
case ZT_NETWORK_RULE_MATCH_FRAME_SIZE_RANGE:
data[p++] = 4;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.frameSize[0]);
p += 2;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.frameSize[1]);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_RANDOM:
data[p++] = 4;
Utils::storeBigEndian<uint32_t>(data + p, rules[i].v.randomProbability);
p += 4;
break;
case ZT_NETWORK_RULE_MATCH_TAGS_DIFFERENCE:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_AND:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_OR:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_XOR:
case ZT_NETWORK_RULE_MATCH_TAGS_EQUAL:
case ZT_NETWORK_RULE_MATCH_TAG_SENDER:
case ZT_NETWORK_RULE_MATCH_TAG_RECEIVER:
data[p++] = 8;
Utils::storeBigEndian<uint32_t>(data + p, rules[i].v.tag.id);
p += 4;
Utils::storeBigEndian<uint32_t>(data + p, rules[i].v.tag.value);
p += 4;
break;
case ZT_NETWORK_RULE_MATCH_INTEGER_RANGE:
data[p++] = 19;
Utils::storeBigEndian<uint64_t>(data + p, rules[i].v.intRange.start);
p += 8;
Utils::storeBigEndian<uint64_t>(
data + p,
rules[i].v.intRange.start + (uint64_t)rules[i].v.intRange.end);
p += 8;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.intRange.idx);
p += 2;
data[p++] = rules[i].v.intRange.format;
break;
}
}
return p;
}
int CapabilityCredential::unmarshalVirtualNetworkRules(const uint8_t *const data, const int len, ZT_VirtualNetworkRule *const rules, unsigned int &ruleCount, const unsigned int maxRuleCount) noexcept
int CapabilityCredential::unmarshalVirtualNetworkRules(
const uint8_t* const data,
const int len,
ZT_VirtualNetworkRule* const rules,
unsigned int& ruleCount,
const unsigned int maxRuleCount) noexcept
{
int p = 0;
unsigned int rc = 0;
while (rc < maxRuleCount) {
if (p >= len)
return -1;
rules[ruleCount].t = data[p++];
const int fieldLen = (int) data[p++];
if ((p + fieldLen) > len)
return -1;
switch ((ZT_VirtualNetworkRuleType) (rules[ruleCount].t & 0x3fU)) {
default:
break;
case ZT_NETWORK_RULE_ACTION_TEE:
case ZT_NETWORK_RULE_ACTION_WATCH:
case ZT_NETWORK_RULE_ACTION_REDIRECT:
if ((p + 14) > len) return -1;
rules[ruleCount].v.fwd.address = Utils::loadBigEndian<uint64_t>(data + p);
p += 8;
rules[ruleCount].v.fwd.flags = Utils::loadBigEndian<uint32_t>(data + p);
p += 4;
rules[ruleCount].v.fwd.length = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_SOURCE_ZEROTIER_ADDRESS:
case ZT_NETWORK_RULE_MATCH_DEST_ZEROTIER_ADDRESS:
if ((p + ZT_ADDRESS_LENGTH) > len) return -1;
rules[ruleCount].v.zt = Address(data + p).toInt();
p += ZT_ADDRESS_LENGTH;
break;
case ZT_NETWORK_RULE_MATCH_VLAN_ID:
if ((p + 2) > len) return -1;
rules[ruleCount].v.vlanId = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_VLAN_PCP:
if ((p + 1) > len) return -1;
rules[ruleCount].v.vlanPcp = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_VLAN_DEI:
if ((p + 1) > len) return -1;
rules[ruleCount].v.vlanDei = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_MAC_SOURCE:
case ZT_NETWORK_RULE_MATCH_MAC_DEST:
if ((p + 6) > len) return -1;
Utils::copy<6>(rules[ruleCount].v.mac, data + p);
p += 6;
break;
case ZT_NETWORK_RULE_MATCH_IPV4_SOURCE:
case ZT_NETWORK_RULE_MATCH_IPV4_DEST:
if ((p + 5) > len) return -1;
Utils::copy<4>(&(rules[ruleCount].v.ipv4.ip), data + p);
p += 4;
rules[ruleCount].v.ipv4.mask = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_IPV6_SOURCE:
case ZT_NETWORK_RULE_MATCH_IPV6_DEST:
if ((p + 17) > len) return -1;
Utils::copy<16>(rules[ruleCount].v.ipv6.ip, data + p);
p += 16;
rules[ruleCount].v.ipv6.mask = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_IP_TOS:
if ((p + 3) > len) return -1;
rules[ruleCount].v.ipTos.mask = data[p++];
rules[ruleCount].v.ipTos.value[0] = data[p++];
rules[ruleCount].v.ipTos.value[1] = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_IP_PROTOCOL:
if ((p + 1) > len) return -1;
rules[ruleCount].v.ipProtocol = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_ETHERTYPE:
if ((p + 2) > len) return -1;
rules[ruleCount].v.etherType = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_ICMP:
if ((p + 3) > len) return -1;
rules[ruleCount].v.icmp.type = data[p++];
rules[ruleCount].v.icmp.code = data[p++];
rules[ruleCount].v.icmp.flags = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_IP_SOURCE_PORT_RANGE:
case ZT_NETWORK_RULE_MATCH_IP_DEST_PORT_RANGE:
if ((p + 4) > len) return -1;
rules[ruleCount].v.port[0] = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
rules[ruleCount].v.port[1] = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_CHARACTERISTICS:
if ((p + 8) > len) return -1;
rules[ruleCount].v.characteristics = Utils::loadBigEndian<uint64_t>(data + p);
p += 8;
break;
case ZT_NETWORK_RULE_MATCH_FRAME_SIZE_RANGE:
if ((p + 4) > len) return -1;
rules[ruleCount].v.frameSize[0] = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
rules[ruleCount].v.frameSize[1] = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_RANDOM:
if ((p + 4) > len) return -1;
rules[ruleCount].v.randomProbability = Utils::loadBigEndian<uint32_t>(data + p);
p += 4;
break;
case ZT_NETWORK_RULE_MATCH_TAGS_DIFFERENCE:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_AND:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_OR:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_XOR:
case ZT_NETWORK_RULE_MATCH_TAGS_EQUAL:
case ZT_NETWORK_RULE_MATCH_TAG_SENDER:
case ZT_NETWORK_RULE_MATCH_TAG_RECEIVER:
if ((p + 4) > len) return -1;
rules[ruleCount].v.tag.id = Utils::loadBigEndian<uint32_t>(data + p);
p += 4;
rules[ruleCount].v.tag.value = Utils::loadBigEndian<uint32_t>(data + p);
p += 4;
break;
case ZT_NETWORK_RULE_MATCH_INTEGER_RANGE:
if ((p + 19) > len) return -1;
rules[ruleCount].v.intRange.start = Utils::loadBigEndian<uint64_t>(data + p);
p += 8;
rules[ruleCount].v.intRange.end = (uint32_t) (Utils::loadBigEndian<uint64_t>(data + p) - rules[ruleCount].v.intRange.start);
p += 8;
rules[ruleCount].v.intRange.idx = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
rules[ruleCount].v.intRange.format = data[p++];
break;
}
p += fieldLen;
++rc;
}
ruleCount = rc;
return p;
int p = 0;
unsigned int rc = 0;
while (rc < maxRuleCount) {
if (p >= len)
return -1;
rules[ruleCount].t = data[p++];
const int fieldLen = (int)data[p++];
if ((p + fieldLen) > len)
return -1;
switch ((ZT_VirtualNetworkRuleType)(rules[ruleCount].t & 0x3fU)) {
default:
break;
case ZT_NETWORK_RULE_ACTION_TEE:
case ZT_NETWORK_RULE_ACTION_WATCH:
case ZT_NETWORK_RULE_ACTION_REDIRECT:
if ((p + 14) > len)
return -1;
rules[ruleCount].v.fwd.address = Utils::loadBigEndian<uint64_t>(data + p);
p += 8;
rules[ruleCount].v.fwd.flags = Utils::loadBigEndian<uint32_t>(data + p);
p += 4;
rules[ruleCount].v.fwd.length = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_SOURCE_ZEROTIER_ADDRESS:
case ZT_NETWORK_RULE_MATCH_DEST_ZEROTIER_ADDRESS:
if ((p + ZT_ADDRESS_LENGTH) > len)
return -1;
rules[ruleCount].v.zt = Address(data + p).toInt();
p += ZT_ADDRESS_LENGTH;
break;
case ZT_NETWORK_RULE_MATCH_VLAN_ID:
if ((p + 2) > len)
return -1;
rules[ruleCount].v.vlanId = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_VLAN_PCP:
if ((p + 1) > len)
return -1;
rules[ruleCount].v.vlanPcp = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_VLAN_DEI:
if ((p + 1) > len)
return -1;
rules[ruleCount].v.vlanDei = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_MAC_SOURCE:
case ZT_NETWORK_RULE_MATCH_MAC_DEST:
if ((p + 6) > len)
return -1;
Utils::copy<6>(rules[ruleCount].v.mac, data + p);
p += 6;
break;
case ZT_NETWORK_RULE_MATCH_IPV4_SOURCE:
case ZT_NETWORK_RULE_MATCH_IPV4_DEST:
if ((p + 5) > len)
return -1;
Utils::copy<4>(&(rules[ruleCount].v.ipv4.ip), data + p);
p += 4;
rules[ruleCount].v.ipv4.mask = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_IPV6_SOURCE:
case ZT_NETWORK_RULE_MATCH_IPV6_DEST:
if ((p + 17) > len)
return -1;
Utils::copy<16>(rules[ruleCount].v.ipv6.ip, data + p);
p += 16;
rules[ruleCount].v.ipv6.mask = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_IP_TOS:
if ((p + 3) > len)
return -1;
rules[ruleCount].v.ipTos.mask = data[p++];
rules[ruleCount].v.ipTos.value[0] = data[p++];
rules[ruleCount].v.ipTos.value[1] = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_IP_PROTOCOL:
if ((p + 1) > len)
return -1;
rules[ruleCount].v.ipProtocol = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_ETHERTYPE:
if ((p + 2) > len)
return -1;
rules[ruleCount].v.etherType = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_ICMP:
if ((p + 3) > len)
return -1;
rules[ruleCount].v.icmp.type = data[p++];
rules[ruleCount].v.icmp.code = data[p++];
rules[ruleCount].v.icmp.flags = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_IP_SOURCE_PORT_RANGE:
case ZT_NETWORK_RULE_MATCH_IP_DEST_PORT_RANGE:
if ((p + 4) > len)
return -1;
rules[ruleCount].v.port[0] = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
rules[ruleCount].v.port[1] = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_CHARACTERISTICS:
if ((p + 8) > len)
return -1;
rules[ruleCount].v.characteristics = Utils::loadBigEndian<uint64_t>(data + p);
p += 8;
break;
case ZT_NETWORK_RULE_MATCH_FRAME_SIZE_RANGE:
if ((p + 4) > len)
return -1;
rules[ruleCount].v.frameSize[0] = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
rules[ruleCount].v.frameSize[1] = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_RANDOM:
if ((p + 4) > len)
return -1;
rules[ruleCount].v.randomProbability = Utils::loadBigEndian<uint32_t>(data + p);
p += 4;
break;
case ZT_NETWORK_RULE_MATCH_TAGS_DIFFERENCE:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_AND:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_OR:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_XOR:
case ZT_NETWORK_RULE_MATCH_TAGS_EQUAL:
case ZT_NETWORK_RULE_MATCH_TAG_SENDER:
case ZT_NETWORK_RULE_MATCH_TAG_RECEIVER:
if ((p + 4) > len)
return -1;
rules[ruleCount].v.tag.id = Utils::loadBigEndian<uint32_t>(data + p);
p += 4;
rules[ruleCount].v.tag.value = Utils::loadBigEndian<uint32_t>(data + p);
p += 4;
break;
case ZT_NETWORK_RULE_MATCH_INTEGER_RANGE:
if ((p + 19) > len)
return -1;
rules[ruleCount].v.intRange.start = Utils::loadBigEndian<uint64_t>(data + p);
p += 8;
rules[ruleCount].v.intRange.end =
(uint32_t)(Utils::loadBigEndian<uint64_t>(data + p) - rules[ruleCount].v.intRange.start);
p += 8;
rules[ruleCount].v.intRange.idx = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
rules[ruleCount].v.intRange.format = data[p++];
break;
}
p += fieldLen;
++rc;
}
ruleCount = rc;
return p;
}
} // namespace ZeroTier
} // namespace ZeroTier

View file

@ -14,15 +14,17 @@
#ifndef ZT_CAPABILITY_HPP
#define ZT_CAPABILITY_HPP
#include "Constants.hpp"
#include "Credential.hpp"
#include "Address.hpp"
#include "C25519.hpp"
#include "Utils.hpp"
#include "Constants.hpp"
#include "Credential.hpp"
#include "Identity.hpp"
#include "Utils.hpp"
#define ZT_VIRTUALNETWORKRULE_MARSHAL_SIZE_MAX 21
#define ZT_CAPABILITY_MARSHAL_SIZE_MAX (8 + 8 + 4 + 1 + 2 + (ZT_VIRTUALNETWORKRULE_MARSHAL_SIZE_MAX * ZT_MAX_CAPABILITY_RULES) + 2 + (5 + 5 + 1 + 2 + ZT_SIGNATURE_BUFFER_SIZE))
#define ZT_CAPABILITY_MARSHAL_SIZE_MAX \
(8 + 8 + 4 + 1 + 2 + (ZT_VIRTUALNETWORKRULE_MARSHAL_SIZE_MAX * ZT_MAX_CAPABILITY_RULES) + 2 \
+ (5 + 5 + 1 + 2 + ZT_SIGNATURE_BUFFER_SIZE))
namespace ZeroTier {
@ -46,141 +48,180 @@ class Context;
* Note that this is after evaluation of network scope rules and only if
* network scope rules do not deliver an explicit match.
*/
class CapabilityCredential : public Credential
{
friend class Credential;
class CapabilityCredential : public Credential {
friend class Credential;
public:
static constexpr ZT_CredentialType credentialType() noexcept
{ return ZT_CREDENTIAL_TYPE_CAPABILITY; }
public:
static constexpr ZT_CredentialType credentialType() noexcept
{
return ZT_CREDENTIAL_TYPE_CAPABILITY;
}
ZT_INLINE CapabilityCredential() noexcept
{ memoryZero(this); }
ZT_INLINE CapabilityCredential() noexcept
{
memoryZero(this);
}
/**
* @param id Capability ID
* @param nwid Network ID
* @param timestamp Timestamp (at controller)
* @param mccl Maximum custody chain length (1 to create non-transferable capability)
* @param rules Network flow rules for this capability
* @param ruleCount Number of flow rules
*/
CapabilityCredential(
const uint32_t id,
const uint64_t nwid,
const int64_t timestamp,
const ZT_VirtualNetworkRule *const rules,
const unsigned int ruleCount) noexcept;
/**
* @param id Capability ID
* @param nwid Network ID
* @param timestamp Timestamp (at controller)
* @param mccl Maximum custody chain length (1 to create non-transferable capability)
* @param rules Network flow rules for this capability
* @param ruleCount Number of flow rules
*/
CapabilityCredential(
const uint32_t id,
const uint64_t nwid,
const int64_t timestamp,
const ZT_VirtualNetworkRule* const rules,
const unsigned int ruleCount) noexcept;
/**
* @return Rules -- see ruleCount() for size of array
*/
ZT_INLINE const ZT_VirtualNetworkRule *rules() const noexcept
{ return m_rules; }
/**
* @return Rules -- see ruleCount() for size of array
*/
ZT_INLINE const ZT_VirtualNetworkRule* rules() const noexcept
{
return m_rules;
}
/**
* @return Number of rules in rules()
*/
ZT_INLINE unsigned int ruleCount() const noexcept
{ return m_ruleCount; }
/**
* @return Number of rules in rules()
*/
ZT_INLINE unsigned int ruleCount() const noexcept
{
return m_ruleCount;
}
ZT_INLINE uint32_t id() const noexcept
{ return m_id; }
ZT_INLINE uint32_t id() const noexcept
{
return m_id;
}
ZT_INLINE uint64_t networkId() const noexcept
{ return m_nwid; }
ZT_INLINE uint64_t networkId() const noexcept
{
return m_nwid;
}
ZT_INLINE int64_t timestamp() const noexcept
{ return m_timestamp; }
ZT_INLINE int64_t timestamp() const noexcept
{
return m_timestamp;
}
ZT_INLINE int64_t revision() const noexcept
{ return m_timestamp; }
ZT_INLINE int64_t revision() const noexcept
{
return m_timestamp;
}
ZT_INLINE const Address &issuedTo() const noexcept
{ return m_issuedTo; }
ZT_INLINE const Address& issuedTo() const noexcept
{
return m_issuedTo;
}
ZT_INLINE const Address &signer() const noexcept
{ return m_signedBy; }
ZT_INLINE const Address& signer() const noexcept
{
return m_signedBy;
}
ZT_INLINE const uint8_t *signature() const noexcept
{ return m_signature; }
ZT_INLINE const uint8_t* signature() const noexcept
{
return m_signature;
}
ZT_INLINE unsigned int signatureLength() const noexcept
{ return m_signatureLength; }
ZT_INLINE unsigned int signatureLength() const noexcept
{
return m_signatureLength;
}
/**
* Sign this capability and add signature to its chain of custody
*
* If this returns false, this object should be considered to be
* in an undefined state and should be discarded. False can be returned
* if there is no more room for signatures (max chain length reached)
* or if the 'from' identity does not include a secret key to allow
* it to sign anything.
*
* @param from Signing identity (must have secret)
* @param to Recipient of this signature
* @return True if signature successful and chain of custody appended
*/
bool sign(const Identity &from, const Address &to) noexcept;
/**
* Sign this capability and add signature to its chain of custody
*
* If this returns false, this object should be considered to be
* in an undefined state and should be discarded. False can be returned
* if there is no more room for signatures (max chain length reached)
* or if the 'from' identity does not include a secret key to allow
* it to sign anything.
*
* @param from Signing identity (must have secret)
* @param to Recipient of this signature
* @return True if signature successful and chain of custody appended
*/
bool sign(const Identity& from, const Address& to) noexcept;
/**
* Verify this capability's chain of custody and signatures
*
* @param RR Runtime environment to provide for peer lookup, etc.
*/
ZT_INLINE Credential::VerifyResult verify(const Context &ctx, const CallContext &cc) const noexcept
{ return s_verify(ctx, cc, *this); }
/**
* Verify this capability's chain of custody and signatures
*
* @param RR Runtime environment to provide for peer lookup, etc.
*/
ZT_INLINE Credential::VerifyResult verify(const Context& ctx, const CallContext& cc) const noexcept
{
return s_verify(ctx, cc, *this);
}
static constexpr int marshalSizeMax() noexcept
{ return ZT_CAPABILITY_MARSHAL_SIZE_MAX; }
static constexpr int marshalSizeMax() noexcept
{
return ZT_CAPABILITY_MARSHAL_SIZE_MAX;
}
int marshal(uint8_t data[ZT_CAPABILITY_MARSHAL_SIZE_MAX], bool forSign = false) const noexcept;
int unmarshal(const uint8_t *data, int len) noexcept;
int marshal(uint8_t data[ZT_CAPABILITY_MARSHAL_SIZE_MAX], bool forSign = false) const noexcept;
int unmarshal(const uint8_t* data, int len) noexcept;
/**
* Marshal a set of virtual network rules
*
* @param data Buffer to store rules (must be at least ruleCount * ZT_VIRTUALNETWORKRULE_MARSHAL_SIZE_MAX)
* @param rules Network rules
* @param ruleCount Number of rules
* @return Number of bytes written or -1 on error
*/
static int marshalVirtualNetworkRules(uint8_t *data, const ZT_VirtualNetworkRule *rules, unsigned int ruleCount) noexcept;
/**
* Marshal a set of virtual network rules
*
* @param data Buffer to store rules (must be at least ruleCount * ZT_VIRTUALNETWORKRULE_MARSHAL_SIZE_MAX)
* @param rules Network rules
* @param ruleCount Number of rules
* @return Number of bytes written or -1 on error
*/
static int
marshalVirtualNetworkRules(uint8_t* data, const ZT_VirtualNetworkRule* rules, unsigned int ruleCount) noexcept;
/**
* Unmarshal a set of virtual network rules
*
* @param data Rule set to unmarshal
* @param len Length of data
* @param rules Buffer to store rules
* @param ruleCount Result parameter to set to the number of rules decoded
* @param maxRuleCount Capacity of rules buffer
* @return Number of bytes unmarshaled or -1 on error
*/
static int unmarshalVirtualNetworkRules(const uint8_t *data, int len, ZT_VirtualNetworkRule *rules, unsigned int &ruleCount, unsigned int maxRuleCount) noexcept;
/**
* Unmarshal a set of virtual network rules
*
* @param data Rule set to unmarshal
* @param len Length of data
* @param rules Buffer to store rules
* @param ruleCount Result parameter to set to the number of rules decoded
* @param maxRuleCount Capacity of rules buffer
* @return Number of bytes unmarshaled or -1 on error
*/
static int unmarshalVirtualNetworkRules(
const uint8_t* data,
int len,
ZT_VirtualNetworkRule* rules,
unsigned int& ruleCount,
unsigned int maxRuleCount) noexcept;
// Provides natural sort order by ID
ZT_INLINE bool operator<(const CapabilityCredential &c) const noexcept
{ return (m_id < c.m_id); }
// Provides natural sort order by ID
ZT_INLINE bool operator<(const CapabilityCredential& c) const noexcept
{
return (m_id < c.m_id);
}
ZT_INLINE bool operator==(const CapabilityCredential &c) const noexcept
{ return (memcmp(this, &c, sizeof(CapabilityCredential)) == 0); }
ZT_INLINE bool operator==(const CapabilityCredential& c) const noexcept
{
return (memcmp(this, &c, sizeof(CapabilityCredential)) == 0);
}
ZT_INLINE bool operator!=(const CapabilityCredential &c) const noexcept
{ return (memcmp(this, &c, sizeof(CapabilityCredential)) != 0); }
ZT_INLINE bool operator!=(const CapabilityCredential& c) const noexcept
{
return (memcmp(this, &c, sizeof(CapabilityCredential)) != 0);
}
private:
uint64_t m_nwid;
int64_t m_timestamp;
uint32_t m_id;
unsigned int m_ruleCount;
ZT_VirtualNetworkRule m_rules[ZT_MAX_CAPABILITY_RULES];
Address m_issuedTo;
Address m_signedBy;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
private:
uint64_t m_nwid;
int64_t m_timestamp;
uint32_t m_id;
unsigned int m_ruleCount;
ZT_VirtualNetworkRule m_rules[ZT_MAX_CAPABILITY_RULES];
Address m_issuedTo;
Address m_signedBy;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

File diff suppressed because it is too large Load diff

View file

@ -14,15 +14,15 @@
#ifndef ZT_CERTIFICATE_HPP
#define ZT_CERTIFICATE_HPP
#include "Constants.hpp"
#include "SHA512.hpp"
#include "C25519.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Dictionary.hpp"
#include "ECC384.hpp"
#include "Identity.hpp"
#include "Locator.hpp"
#include "Dictionary.hpp"
#include "SHA512.hpp"
#include "Utils.hpp"
#include "Containers.hpp"
namespace ZeroTier {
@ -43,188 +43,222 @@ namespace ZeroTier {
* field, so these will not work correctly before sign() or decode() is
* called.
*/
class Certificate : public ZT_Certificate
{
public:
Certificate() noexcept;
explicit Certificate(const ZT_Certificate &apiCert);
Certificate(const Certificate &cert);
~Certificate();
class Certificate : public ZT_Certificate {
public:
Certificate() noexcept;
explicit Certificate(const ZT_Certificate& apiCert);
Certificate(const Certificate& cert);
~Certificate();
Certificate &operator=(const ZT_Certificate &cert);
Certificate& operator=(const ZT_Certificate& cert);
ZT_INLINE Certificate &operator=(const Certificate &cert) noexcept
{
if (likely(&cert != this)) {
const ZT_Certificate *const sup = &cert;
*this = *sup;
}
return *this;
}
ZT_INLINE Certificate& operator=(const Certificate& cert) noexcept
{
if (likely(&cert != this)) {
const ZT_Certificate* const sup = &cert;
*this = *sup;
}
return *this;
}
ZT_INLINE H384 getSerialNo() const noexcept
{ return H384(this->serialNo); }
ZT_INLINE H384 getSerialNo() const noexcept
{
return H384(this->serialNo);
}
/**
* Add a subject node/identity without a locator
*
* @param id Identity
* @return Pointer to C struct
*/
ZT_Certificate_Identity *addSubjectIdentity(const Identity &id);
/**
* Add a subject node/identity without a locator
*
* @param id Identity
* @return Pointer to C struct
*/
ZT_Certificate_Identity* addSubjectIdentity(const Identity& id);
/**
* Add a subject node/identity with a locator
*
* @param id Identity
* @param loc Locator signed by identity (signature is NOT checked here)
* @return Pointer to C struct
*/
ZT_Certificate_Identity *addSubjectIdentity(const Identity &id, const Locator &loc);
/**
* Add a subject node/identity with a locator
*
* @param id Identity
* @param loc Locator signed by identity (signature is NOT checked here)
* @return Pointer to C struct
*/
ZT_Certificate_Identity* addSubjectIdentity(const Identity& id, const Locator& loc);
/**
* Add a subject network
*
* @param id Network ID
* @param controller Network controller's full fingerprint
* @return Pointer to C struct
*/
ZT_Certificate_Network *addSubjectNetwork(uint64_t id, const ZT_Fingerprint &controller);
/**
* Add a subject network
*
* @param id Network ID
* @param controller Network controller's full fingerprint
* @return Pointer to C struct
*/
ZT_Certificate_Network* addSubjectNetwork(uint64_t id, const ZT_Fingerprint& controller);
/**
* Add an update URL to the updateUrls list
*
* @param url Update URL
*/
void addSubjectUpdateUrl(const char *url);
/**
* Add an update URL to the updateUrls list
*
* @param url Update URL
*/
void addSubjectUpdateUrl(const char* url);
/**
* Sign subject with unique ID private key and set.
*
* This is done when you createCSR but can also be done explicitly here. This
* is mostly for testing purposes.
*
* @param uniqueIdPrivate Unique ID private key (includes public)
* @param uniqueIdPrivateSize Size of private key
* @return True on success
*/
ZT_INLINE bool setSubjectUniqueId(const void *uniqueIdPrivate, unsigned int uniqueIdPrivateSize)
{ return m_setSubjectUniqueId(this->subject, uniqueIdPrivate, uniqueIdPrivateSize); }
/**
* Sign subject with unique ID private key and set.
*
* This is done when you createCSR but can also be done explicitly here. This
* is mostly for testing purposes.
*
* @param uniqueIdPrivate Unique ID private key (includes public)
* @param uniqueIdPrivateSize Size of private key
* @return True on success
*/
ZT_INLINE bool setSubjectUniqueId(const void* uniqueIdPrivate, unsigned int uniqueIdPrivateSize)
{
return m_setSubjectUniqueId(this->subject, uniqueIdPrivate, uniqueIdPrivateSize);
}
/**
* Marshal this certificate in binary form
*
* The internal encoding used here is Dictionary to permit easy
* extensibility.
*
* @param omitSignature If true omit the signature field (for signing and verification, default is false)
* @return Marshaled certificate
*/
Vector< uint8_t > encode(bool omitSignature = false) const;
/**
* Marshal this certificate in binary form
*
* The internal encoding used here is Dictionary to permit easy
* extensibility.
*
* @param omitSignature If true omit the signature field (for signing and verification, default is false)
* @return Marshaled certificate
*/
Vector<uint8_t> encode(bool omitSignature = false) const;
/**
* Decode this certificate from marshaled bytes.
*
* @param data Marshalled certificate
* @param len Length of marshalled certificate
* @return True if input is valid and was unmarshalled (signature is NOT checked)
*/
bool decode(const void *data, unsigned int len);
/**
* Decode this certificate from marshaled bytes.
*
* @param data Marshalled certificate
* @param len Length of marshalled certificate
* @return True if input is valid and was unmarshalled (signature is NOT checked)
*/
bool decode(const void* data, unsigned int len);
/**
* Sign this certificate.
*
* This sets serialNo, issuer, issuerPublicKey, and signature.
*
* @return True on success
*/
bool sign(const uint8_t issuer[ZT_CERTIFICATE_HASH_SIZE], const void *issuerPrivateKey, unsigned int issuerPrivateKeySize);
/**
* Sign this certificate.
*
* This sets serialNo, issuer, issuerPublicKey, and signature.
*
* @return True on success
*/
bool sign(
const uint8_t issuer[ZT_CERTIFICATE_HASH_SIZE],
const void* issuerPrivateKey,
unsigned int issuerPrivateKeySize);
/**
* Verify self-contained signatures and validity of certificate structure
*
* This cannot check the chain of trust back to a CA, only the internal validity
* of this certificate.
*
* @param clock If non-negative, also do verifyTimeWindow()
* @param checkSignatures If true, perform full signature check (which is more expensive than other checks)
* @return OK (0) or error code indicating why certificate failed verification.
*/
ZT_CertificateError verify(int64_t clock, bool checkSignatures) const;
/**
* Verify self-contained signatures and validity of certificate structure
*
* This cannot check the chain of trust back to a CA, only the internal validity
* of this certificate.
*
* @param clock If non-negative, also do verifyTimeWindow()
* @param checkSignatures If true, perform full signature check (which is more expensive than other checks)
* @return OK (0) or error code indicating why certificate failed verification.
*/
ZT_CertificateError verify(int64_t clock, bool checkSignatures) const;
/**
* Check this certificate's expiration status
*
* @param clock Current real world time in milliseconds since epoch
* @return True if certificate is not expired or outside window
*/
ZT_INLINE bool verifyTimeWindow(int64_t clock) const noexcept
{ return ((clock >= this->validity[0]) && (clock <= this->validity[1]) && (this->validity[0] <= this->validity[1])); }
/**
* Check this certificate's expiration status
*
* @param clock Current real world time in milliseconds since epoch
* @return True if certificate is not expired or outside window
*/
ZT_INLINE bool verifyTimeWindow(int64_t clock) const noexcept
{
return (
(clock >= this->validity[0]) && (clock <= this->validity[1]) && (this->validity[0] <= this->validity[1]));
}
/**
* Create a new certificate public/private key pair
*
* @param type Key pair type to create
* @param publicKey Buffer to fill with public key
* @param publicKeySize Result parameter: set to size of public key
* @param privateKey Buffer to fill with private key
* @param privateKeySize Result parameter: set to size of private key
* @return True on success
*/
static bool newKeyPair(const ZT_CertificatePublicKeyAlgorithm type, uint8_t publicKey[ZT_CERTIFICATE_MAX_PUBLIC_KEY_SIZE], int *const publicKeySize, uint8_t privateKey[ZT_CERTIFICATE_MAX_PRIVATE_KEY_SIZE], int *const privateKeySize);
/**
* Create a new certificate public/private key pair
*
* @param type Key pair type to create
* @param publicKey Buffer to fill with public key
* @param publicKeySize Result parameter: set to size of public key
* @param privateKey Buffer to fill with private key
* @param privateKeySize Result parameter: set to size of private key
* @return True on success
*/
static bool newKeyPair(
const ZT_CertificatePublicKeyAlgorithm type,
uint8_t publicKey[ZT_CERTIFICATE_MAX_PUBLIC_KEY_SIZE],
int* const publicKeySize,
uint8_t privateKey[ZT_CERTIFICATE_MAX_PRIVATE_KEY_SIZE],
int* const privateKeySize);
/**
* Create a CSR that encodes the subject of this certificate
*
* @param s Subject to encode
* @param certificatePublicKey Public key for certificate
* @param certificatePublicKeySize Size of public key
* @param uniqueIdPrivate Unique ID private key for proof signature or NULL if none
* @param uniqueIdPrivateSize Size of unique ID private key
* @return Encoded subject (without any unique ID fields) or empty vector on error
*/
static Vector< uint8_t > createCSR(const ZT_Certificate_Subject &s, const void *certificatePublicKey, unsigned int certificatePublicKeySize, const void *uniqueIdPrivate, unsigned int uniqueIdPrivateSize);
/**
* Create a CSR that encodes the subject of this certificate
*
* @param s Subject to encode
* @param certificatePublicKey Public key for certificate
* @param certificatePublicKeySize Size of public key
* @param uniqueIdPrivate Unique ID private key for proof signature or NULL if none
* @param uniqueIdPrivateSize Size of unique ID private key
* @return Encoded subject (without any unique ID fields) or empty vector on error
*/
static Vector<uint8_t> createCSR(
const ZT_Certificate_Subject& s,
const void* certificatePublicKey,
unsigned int certificatePublicKeySize,
const void* uniqueIdPrivate,
unsigned int uniqueIdPrivateSize);
ZT_INLINE unsigned long hashCode() const noexcept
{ return (unsigned long)Utils::loadMachineEndian< uint32_t >(this->serialNo); }
ZT_INLINE unsigned long hashCode() const noexcept
{
return (unsigned long)Utils::loadMachineEndian<uint32_t>(this->serialNo);
}
ZT_INLINE bool operator==(const ZT_Certificate &c) const noexcept
{ return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) == 0; }
ZT_INLINE bool operator==(const ZT_Certificate& c) const noexcept
{
return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) == 0;
}
ZT_INLINE bool operator!=(const ZT_Certificate &c) const noexcept
{ return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) != 0; }
ZT_INLINE bool operator!=(const ZT_Certificate& c) const noexcept
{
return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) != 0;
}
ZT_INLINE bool operator<(const ZT_Certificate &c) const noexcept
{ return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) < 0; }
ZT_INLINE bool operator<(const ZT_Certificate& c) const noexcept
{
return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) < 0;
}
ZT_INLINE bool operator<=(const ZT_Certificate &c) const noexcept
{ return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) <= 0; }
ZT_INLINE bool operator<=(const ZT_Certificate& c) const noexcept
{
return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) <= 0;
}
ZT_INLINE bool operator>(const ZT_Certificate &c) const noexcept
{ return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) > 0; }
ZT_INLINE bool operator>(const ZT_Certificate& c) const noexcept
{
return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) > 0;
}
ZT_INLINE bool operator>=(const ZT_Certificate &c) const noexcept
{ return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) >= 0; }
ZT_INLINE bool operator>=(const ZT_Certificate& c) const noexcept
{
return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) >= 0;
}
private:
void m_clear();
static bool m_setSubjectUniqueId(ZT_Certificate_Subject &s, const void *uniqueIdPrivate, unsigned int uniqueIdPrivateSize);
static void m_encodeSubject(const ZT_Certificate_Subject &s, Dictionary &d, bool omitUniqueIdProofSignature);
private:
void m_clear();
static bool
m_setSubjectUniqueId(ZT_Certificate_Subject& s, const void* uniqueIdPrivate, unsigned int uniqueIdPrivateSize);
static void m_encodeSubject(const ZT_Certificate_Subject& s, Dictionary& d, bool omitUniqueIdProofSignature);
// These hold any identity or locator objects that are owned by and should
// be deleted with this certificate. Lists are used so the pointers never
// change.
ForwardList< Identity > m_identities;
ForwardList< Locator > m_locators;
ForwardList< String > m_strings;
// These hold any identity or locator objects that are owned by and should
// be deleted with this certificate. Lists are used so the pointers never
// change.
ForwardList<Identity> m_identities;
ForwardList<Locator> m_locators;
ForwardList<String> m_strings;
// These are stored in a vector because the memory needs to be contiguous.
Vector< ZT_Certificate_Identity > m_subjectIdentities;
Vector< ZT_Certificate_Network > m_subjectNetworks;
Vector< const char * > m_updateUrls;
Vector< uint8_t > m_extendedAttributes;
// These are stored in a vector because the memory needs to be contiguous.
Vector<ZT_Certificate_Identity> m_subjectIdentities;
Vector<ZT_Certificate_Network> m_subjectNetworks;
Vector<const char*> m_updateUrls;
Vector<uint8_t> m_extendedAttributes;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -20,8 +20,8 @@
#define ZT_CORE 1
#include "OS.hpp"
#include "zerotier.h"
#include "version.h"
#include "zerotier.h"
/**
* Length of a ZeroTier address in bytes
@ -190,7 +190,8 @@
#define ZT_PEER_WHOIS_RATE_LIMIT 100
/**
* General rate limit for other kinds of rate-limited packets (HELLO, credential request, etc.) both inbound and outbound
* General rate limit for other kinds of rate-limited packets (HELLO, credential request, etc.) both inbound and
* outbound
*/
#define ZT_PEER_GENERAL_RATE_LIMIT 500
@ -209,7 +210,7 @@
/* Ethernet frame types that might be relevant to us */
#define ZT_ETHERTYPE_IPV4 0x0800
#define ZT_ETHERTYPE_ARP 0x0806
#define ZT_ETHERTYPE_ARP 0x0806
#define ZT_ETHERTYPE_IPV6 0x86dd
#endif

View file

@ -19,111 +19,104 @@
#include "Constants.hpp"
#include "Utils.hpp"
#include <map>
#include <vector>
#include <algorithm>
#include <list>
#include <map>
#include <set>
#include <string>
#include <algorithm>
#include <vector>
#ifdef __CPP11__
#include <atomic>
#include <unordered_map>
#include <forward_list>
#include <unordered_map>
#endif
namespace ZeroTier {
template< typename V >
class Vector : public std::vector< V >
{
public:
ZT_INLINE Vector() :
std::vector< V >()
{}
template <typename V> class Vector : public std::vector<V> {
public:
ZT_INLINE Vector() : std::vector<V>()
{
}
template< typename I >
ZT_INLINE Vector(I begin, I end) :
std::vector< V >(begin, end)
{}
template <typename I> ZT_INLINE Vector(I begin, I end) : std::vector<V>(begin, end)
{
}
};
template< typename V >
class List : public std::list< V >
{
template <typename V> class List : public std::list<V> {
};
#ifdef __CPP11__
struct intl_MapHasher
{
template< typename O >
std::size_t operator()(const O &obj) const noexcept
{ return (std::size_t)obj.hashCode(); }
struct intl_MapHasher {
template <typename O> std::size_t operator()(const O& obj) const noexcept
{
return (std::size_t)obj.hashCode();
}
std::size_t operator()(const Vector< uint8_t > &bytes) const noexcept
{ return (std::size_t)Utils::fnv1a32(bytes.data(), (unsigned int)bytes.size()); }
std::size_t operator()(const Vector<uint8_t>& bytes) const noexcept
{
return (std::size_t)Utils::fnv1a32(bytes.data(), (unsigned int)bytes.size());
}
std::size_t operator()(const uint64_t i) const noexcept
{ return (std::size_t)Utils::hash64(i ^ Utils::s_mapNonce); }
std::size_t operator()(const uint64_t i) const noexcept
{
return (std::size_t)Utils::hash64(i ^ Utils::s_mapNonce);
}
std::size_t operator()(const int64_t i) const noexcept
{ return (std::size_t)Utils::hash64((uint64_t)i ^ Utils::s_mapNonce); }
std::size_t operator()(const int64_t i) const noexcept
{
return (std::size_t)Utils::hash64((uint64_t)i ^ Utils::s_mapNonce);
}
std::size_t operator()(const uint32_t i) const noexcept
{ return (std::size_t)Utils::hash32(i ^ (uint32_t)Utils::s_mapNonce); }
std::size_t operator()(const uint32_t i) const noexcept
{
return (std::size_t)Utils::hash32(i ^ (uint32_t)Utils::s_mapNonce);
}
std::size_t operator()(const int32_t i) const noexcept
{ return (std::size_t)Utils::hash32((uint32_t)i ^ (uint32_t)Utils::s_mapNonce); }
std::size_t operator()(const int32_t i) const noexcept
{
return (std::size_t)Utils::hash32((uint32_t)i ^ (uint32_t)Utils::s_mapNonce);
}
};
template< typename K, typename V >
class Map : public std::unordered_map< K, V, intl_MapHasher >
{
template <typename K, typename V> class Map : public std::unordered_map<K, V, intl_MapHasher> {
};
template< typename K, typename V >
class MultiMap : public std::unordered_multimap< K, V, intl_MapHasher, std::equal_to< K > >
{
template <typename K, typename V>
class MultiMap : public std::unordered_multimap<K, V, intl_MapHasher, std::equal_to<K> > {
};
#else
template<typename K, typename V>
class Map : public std::map< K, V >
{};
template <typename K, typename V> class Map : public std::map<K, V> {
};
template<typename K, typename V>
class MultiMap : public std::multimap< K, V >
{};
template <typename K, typename V> class MultiMap : public std::multimap<K, V> {
};
#endif
template< typename K, typename V >
class SortedMap : public std::map< K, V >
{
template <typename K, typename V> class SortedMap : public std::map<K, V> {
};
#ifdef __CPP11__
template< typename V >
class ForwardList : public std::forward_list< V >
{
template <typename V> class ForwardList : public std::forward_list<V> {
};
#else
template< typename V >
class ForwardList : public std::list< V >
{};
template <typename V> class ForwardList : public std::list<V> {
};
#endif
template< typename V >
class Set : public std::set< V, std::less< V > >
{
template <typename V> class Set : public std::set<V, std::less<V> > {
};
typedef std::string String;
@ -131,48 +124,72 @@ typedef std::string String;
/**
* A 384-bit hash
*/
struct H384
{
uint64_t data[6];
struct H384 {
uint64_t data[6];
ZT_INLINE H384() noexcept
{ Utils::zero< sizeof(data) >(data); }
ZT_INLINE H384() noexcept
{
Utils::zero<sizeof(data)>(data);
}
ZT_INLINE H384(const H384 &b) noexcept
{ Utils::copy< 48 >(data, b.data); }
ZT_INLINE H384(const H384& b) noexcept
{
Utils::copy<48>(data, b.data);
}
explicit ZT_INLINE H384(const void *const d) noexcept
{ Utils::copy< 48 >(data, d); }
explicit ZT_INLINE H384(const void* const d) noexcept
{
Utils::copy<48>(data, d);
}
ZT_INLINE H384 &operator=(const H384 &b) noexcept
{
Utils::copy< 48 >(data, b.data);
return *this;
}
ZT_INLINE H384& operator=(const H384& b) noexcept
{
Utils::copy<48>(data, b.data);
return *this;
}
ZT_INLINE unsigned long hashCode() const noexcept
{ return (unsigned long)data[0]; }
ZT_INLINE unsigned long hashCode() const noexcept
{
return (unsigned long)data[0];
}
ZT_INLINE operator bool() const noexcept
{ return ((data[0] != 0) && (data[1] != 0) && (data[2] != 0) && (data[3] != 0) && (data[4] != 0) && (data[5] != 0)); }
ZT_INLINE operator bool() const noexcept
{
return (
(data[0] != 0) && (data[1] != 0) && (data[2] != 0) && (data[3] != 0) && (data[4] != 0) && (data[5] != 0));
}
ZT_INLINE bool operator==(const H384 &b) const noexcept
{ return ((data[0] == b.data[0]) && (data[1] == b.data[1]) && (data[2] == b.data[2]) && (data[3] == b.data[3]) && (data[4] == b.data[4]) && (data[5] == b.data[5])); }
ZT_INLINE bool operator==(const H384& b) const noexcept
{
return (
(data[0] == b.data[0]) && (data[1] == b.data[1]) && (data[2] == b.data[2]) && (data[3] == b.data[3])
&& (data[4] == b.data[4]) && (data[5] == b.data[5]));
}
ZT_INLINE bool operator!=(const H384 &b) const noexcept
{ return !(*this == b); }
ZT_INLINE bool operator!=(const H384& b) const noexcept
{
return ! (*this == b);
}
ZT_INLINE bool operator<(const H384 &b) const noexcept
{ return std::lexicographical_compare(data, data + 6, b.data, b.data + 6); }
ZT_INLINE bool operator<(const H384& b) const noexcept
{
return std::lexicographical_compare(data, data + 6, b.data, b.data + 6);
}
ZT_INLINE bool operator<=(const H384 &b) const noexcept
{ return !(b < *this); }
ZT_INLINE bool operator<=(const H384& b) const noexcept
{
return ! (b < *this);
}
ZT_INLINE bool operator>(const H384 &b) const noexcept
{ return (b < *this); }
ZT_INLINE bool operator>(const H384& b) const noexcept
{
return (b < *this);
}
ZT_INLINE bool operator>=(const H384 &b) const noexcept
{ return !(*this < b); }
ZT_INLINE bool operator>=(const H384& b) const noexcept
{
return ! (*this < b);
}
};
static_assert(sizeof(H384) == 48, "H384 contains unnecessary padding");
@ -182,59 +199,79 @@ static_assert(sizeof(H384) == 48, "H384 contains unnecessary padding");
*
* @tparam S Size in bytes
*/
template< unsigned long S >
struct Blob
{
uint8_t data[S];
template <unsigned long S> struct Blob {
uint8_t data[S];
ZT_INLINE Blob() noexcept
{ Utils::zero< S >(data); }
ZT_INLINE Blob() noexcept
{
Utils::zero<S>(data);
}
ZT_INLINE Blob(const Blob &b) noexcept
{ Utils::copy< S >(data, b.data); }
ZT_INLINE Blob(const Blob& b) noexcept
{
Utils::copy<S>(data, b.data);
}
explicit ZT_INLINE Blob(const void *const d) noexcept
{ Utils::copy< S >(data, d); }
explicit ZT_INLINE Blob(const void* const d) noexcept
{
Utils::copy<S>(data, d);
}
explicit ZT_INLINE Blob(const void *const d, const unsigned int l) noexcept
{
Utils::copy(data, d, (l > (unsigned int)S) ? (unsigned int)S : l);
if (l < S) {
Utils::zero(data + l, S - l);
}
}
explicit ZT_INLINE Blob(const void* const d, const unsigned int l) noexcept
{
Utils::copy(data, d, (l > (unsigned int)S) ? (unsigned int)S : l);
if (l < S) {
Utils::zero(data + l, S - l);
}
}
ZT_INLINE Blob &operator=(const Blob &b) noexcept
{
Utils::copy< S >(data, b.data);
return *this;
}
ZT_INLINE Blob& operator=(const Blob& b) noexcept
{
Utils::copy<S>(data, b.data);
return *this;
}
ZT_INLINE unsigned long hashCode() const noexcept
{ return Utils::fnv1a32(data, (unsigned int)S); }
ZT_INLINE unsigned long hashCode() const noexcept
{
return Utils::fnv1a32(data, (unsigned int)S);
}
ZT_INLINE operator bool() const noexcept
{ return Utils::allZero(data, (unsigned int)S); }
ZT_INLINE operator bool() const noexcept
{
return Utils::allZero(data, (unsigned int)S);
}
ZT_INLINE bool operator==(const Blob &b) const noexcept
{ return (memcmp(data, b.data, S) == 0); }
ZT_INLINE bool operator==(const Blob& b) const noexcept
{
return (memcmp(data, b.data, S) == 0);
}
ZT_INLINE bool operator!=(const Blob &b) const noexcept
{ return (memcmp(data, b.data, S) != 0); }
ZT_INLINE bool operator!=(const Blob& b) const noexcept
{
return (memcmp(data, b.data, S) != 0);
}
ZT_INLINE bool operator<(const Blob &b) const noexcept
{ return (memcmp(data, b.data, S) < 0); }
ZT_INLINE bool operator<(const Blob& b) const noexcept
{
return (memcmp(data, b.data, S) < 0);
}
ZT_INLINE bool operator<=(const Blob &b) const noexcept
{ return (memcmp(data, b.data, S) <= 0); }
ZT_INLINE bool operator<=(const Blob& b) const noexcept
{
return (memcmp(data, b.data, S) <= 0);
}
ZT_INLINE bool operator>(const Blob &b) const noexcept
{ return (memcmp(data, b.data, S) > 0); }
ZT_INLINE bool operator>(const Blob& b) const noexcept
{
return (memcmp(data, b.data, S) > 0);
}
ZT_INLINE bool operator>=(const Blob &b) const noexcept
{ return (memcmp(data, b.data, S) >= 0); }
ZT_INLINE bool operator>=(const Blob& b) const noexcept
{
return (memcmp(data, b.data, S) >= 0);
}
};
} // ZeroTier
} // namespace ZeroTier
#endif

View file

@ -14,12 +14,12 @@
#ifndef ZT_RUNTIMEENVIRONMENT_HPP
#define ZT_RUNTIMEENVIRONMENT_HPP
#include "Constants.hpp"
#include "Utils.hpp"
#include "Identity.hpp"
#include "AES.hpp"
#include "TinyMap.hpp"
#include "Constants.hpp"
#include "Identity.hpp"
#include "SharedPtr.hpp"
#include "TinyMap.hpp"
#include "Utils.hpp"
namespace ZeroTier {
@ -38,68 +38,69 @@ class Network;
/**
* Node instance context
*/
class Context
{
public:
ZT_INLINE Context(Node *const n) noexcept:
instanceId(Utils::getSecureRandomU64()),
node(n),
uPtr(nullptr),
localNetworkController(nullptr),
store(nullptr),
networks(nullptr),
t(nullptr),
expect(nullptr),
vl2(nullptr),
vl1(nullptr),
topology(nullptr),
sa(nullptr),
ts(nullptr)
{
publicIdentityStr[0] = 0;
secretIdentityStr[0] = 0;
}
class Context {
public:
ZT_INLINE Context(Node* const n) noexcept
: instanceId(Utils::getSecureRandomU64())
, node(n)
, uPtr(nullptr)
, localNetworkController(nullptr)
, store(nullptr)
, networks(nullptr)
, t(nullptr)
, expect(nullptr)
, vl2(nullptr)
, vl1(nullptr)
, topology(nullptr)
, sa(nullptr)
, ts(nullptr)
{
publicIdentityStr[0] = 0;
secretIdentityStr[0] = 0;
}
ZT_INLINE ~Context() noexcept
{ Utils::burn(secretIdentityStr, sizeof(secretIdentityStr)); }
ZT_INLINE ~Context() noexcept
{
Utils::burn(secretIdentityStr, sizeof(secretIdentityStr));
}
// Unique ID generated on startup
const uint64_t instanceId;
// Unique ID generated on startup
const uint64_t instanceId;
// Node instance that owns this RuntimeEnvironment
Node *const restrict node;
// Node instance that owns this RuntimeEnvironment
Node* const restrict node;
// Callbacks specified by caller who created node
ZT_Node_Callbacks cb;
// Callbacks specified by caller who created node
ZT_Node_Callbacks cb;
// User pointer specified by external code via API
void *restrict uPtr;
// User pointer specified by external code via API
void* restrict uPtr;
// This is set externally to an instance of this base class
NetworkController *restrict localNetworkController;
// This is set externally to an instance of this base class
NetworkController* restrict localNetworkController;
Store *restrict store;
TinyMap< SharedPtr< Network > > *restrict networks;
Trace *restrict t;
Expect *restrict expect;
VL2 *restrict vl2;
VL1 *restrict vl1;
Topology *restrict topology;
SelfAwareness *restrict sa;
TrustStore *restrict ts;
Store* restrict store;
TinyMap<SharedPtr<Network> >* restrict networks;
Trace* restrict t;
Expect* restrict expect;
VL2* restrict vl2;
VL1* restrict vl1;
Topology* restrict topology;
SelfAwareness* restrict sa;
TrustStore* restrict ts;
// This node's identity and string representations thereof
Identity identity;
char publicIdentityStr[ZT_IDENTITY_STRING_BUFFER_LENGTH];
char secretIdentityStr[ZT_IDENTITY_STRING_BUFFER_LENGTH];
// This node's identity and string representations thereof
Identity identity;
char publicIdentityStr[ZT_IDENTITY_STRING_BUFFER_LENGTH];
char secretIdentityStr[ZT_IDENTITY_STRING_BUFFER_LENGTH];
// Symmetric key for encrypting secrets at rest on this system.
AES localSecretCipher;
// Symmetric key for encrypting secrets at rest on this system.
AES localSecretCipher;
// Privileged ports from 1 to 1023 in a random order (for IPv4 NAT traversal)
uint16_t randomPrivilegedPortOrder[1023];
// Privileged ports from 1 to 1023 in a random order (for IPv4 NAT traversal)
uint16_t randomPrivilegedPortOrder[1023];
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -11,15 +11,16 @@
*/
/****/
#include "Credential.hpp"
#include "CapabilityCredential.hpp"
#include "Constants.hpp"
#include "Context.hpp"
#include "Credential.hpp"
#include "CapabilityCredential.hpp"
#include "TagCredential.hpp"
#include "MembershipCredential.hpp"
#include "Network.hpp"
#include "OwnershipCredential.hpp"
#include "RevocationCredential.hpp"
#include "Network.hpp"
#include "TagCredential.hpp"
#include "Topology.hpp"
// These are compile-time asserts to make sure temporary marshal buffers here and
@ -42,57 +43,77 @@
namespace ZeroTier {
template< typename CRED >
static ZT_INLINE Credential::VerifyResult p_credVerify(const Context &ctx, const CallContext &cc, CRED credential)
template <typename CRED>
static ZT_INLINE Credential::VerifyResult p_credVerify(const Context& ctx, const CallContext& cc, CRED credential)
{
uint8_t tmp[ZT_BUF_MEM_SIZE + 16];
uint8_t tmp[ZT_BUF_MEM_SIZE + 16];
const Address signedBy(credential.signer());
const uint64_t networkId = credential.networkId();
if ((!signedBy) || (signedBy != Network::controllerFor(networkId)))
return Credential::VERIFY_BAD_SIGNATURE;
const Address signedBy(credential.signer());
const uint64_t networkId = credential.networkId();
if ((! signedBy) || (signedBy != Network::controllerFor(networkId)))
return Credential::VERIFY_BAD_SIGNATURE;
const SharedPtr< Peer > peer(ctx.topology->peer(cc, signedBy));
if (!peer)
return Credential::VERIFY_NEED_IDENTITY;
const SharedPtr<Peer> peer(ctx.topology->peer(cc, signedBy));
if (! peer)
return Credential::VERIFY_NEED_IDENTITY;
try {
int l = credential.marshal(tmp, true);
if (l <= 0)
return Credential::VERIFY_BAD_SIGNATURE;
return (peer->identity().verify(tmp, (unsigned int)l, credential.signature(), credential.signatureLength()) ? Credential::VERIFY_OK : Credential::VERIFY_BAD_SIGNATURE);
} catch (...) {}
try {
int l = credential.marshal(tmp, true);
if (l <= 0)
return Credential::VERIFY_BAD_SIGNATURE;
return (
peer->identity().verify(tmp, (unsigned int)l, credential.signature(), credential.signatureLength())
? Credential::VERIFY_OK
: Credential::VERIFY_BAD_SIGNATURE);
}
catch (...) {
}
return Credential::VERIFY_BAD_SIGNATURE;
return Credential::VERIFY_BAD_SIGNATURE;
}
Credential::VerifyResult Credential::s_verify(const Context &ctx, const CallContext &cc, const RevocationCredential &credential)
{ return p_credVerify(ctx, cc, credential); }
Credential::VerifyResult Credential::s_verify(const Context &ctx, const CallContext &cc, const TagCredential &credential)
{ return p_credVerify(ctx, cc, credential); }
Credential::VerifyResult Credential::s_verify(const Context &ctx, const CallContext &cc, const CapabilityCredential &credential)
{ return p_credVerify(ctx, cc, credential); }
Credential::VerifyResult Credential::s_verify(const Context &ctx, const CallContext &cc, const OwnershipCredential &credential)
{ return p_credVerify(ctx, cc, credential); }
Credential::VerifyResult Credential::s_verify(const Context &ctx, const CallContext &cc, const MembershipCredential &credential)
Credential::VerifyResult
Credential::s_verify(const Context& ctx, const CallContext& cc, const RevocationCredential& credential)
{
// Sanity check network ID.
if ((!credential.m_signedBy) || (credential.m_signedBy != Network::controllerFor(credential.m_networkId)))
return Credential::VERIFY_BAD_SIGNATURE;
// If we don't know the peer, get its identity. This shouldn't happen here but should be handled.
const SharedPtr< Peer > peer(ctx.topology->peer(cc, credential.m_signedBy));
if (!peer)
return Credential::VERIFY_NEED_IDENTITY;
// Now verify the controller's signature.
uint64_t buf[ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX / 8];
const unsigned int bufSize = credential.m_fillSigningBuf(buf);
return peer->identity().verify(buf, bufSize, credential.m_signature, credential.m_signatureLength) ? Credential::VERIFY_OK : Credential::VERIFY_BAD_SIGNATURE;
return p_credVerify(ctx, cc, credential);
}
} // namespace ZeroTier
Credential::VerifyResult
Credential::s_verify(const Context& ctx, const CallContext& cc, const TagCredential& credential)
{
return p_credVerify(ctx, cc, credential);
}
Credential::VerifyResult
Credential::s_verify(const Context& ctx, const CallContext& cc, const CapabilityCredential& credential)
{
return p_credVerify(ctx, cc, credential);
}
Credential::VerifyResult
Credential::s_verify(const Context& ctx, const CallContext& cc, const OwnershipCredential& credential)
{
return p_credVerify(ctx, cc, credential);
}
Credential::VerifyResult
Credential::s_verify(const Context& ctx, const CallContext& cc, const MembershipCredential& credential)
{
// Sanity check network ID.
if ((! credential.m_signedBy) || (credential.m_signedBy != Network::controllerFor(credential.m_networkId)))
return Credential::VERIFY_BAD_SIGNATURE;
// If we don't know the peer, get its identity. This shouldn't happen here but should be handled.
const SharedPtr<Peer> peer(ctx.topology->peer(cc, credential.m_signedBy));
if (! peer)
return Credential::VERIFY_NEED_IDENTITY;
// Now verify the controller's signature.
uint64_t buf[ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX / 8];
const unsigned int bufSize = credential.m_fillSigningBuf(buf);
return peer->identity().verify(buf, bufSize, credential.m_signature, credential.m_signatureLength)
? Credential::VERIFY_OK
: Credential::VERIFY_BAD_SIGNATURE;
}
} // namespace ZeroTier

View file

@ -14,9 +14,9 @@
#ifndef ZT_CREDENTIAL_HPP
#define ZT_CREDENTIAL_HPP
#include "CallContext.hpp"
#include "Constants.hpp"
#include "TriviallyCopyable.hpp"
#include "CallContext.hpp"
namespace ZeroTier {
@ -35,27 +35,21 @@ class Context;
* All credential verification methods are implemented in Credential.cpp as they share a lot
* of common code and logic and grouping them makes auditing easier.
*/
class Credential : public TriviallyCopyable
{
public:
/**
* Result of verify() operations
*/
enum VerifyResult
{
VERIFY_OK = 0,
VERIFY_BAD_SIGNATURE = 1,
VERIFY_NEED_IDENTITY = 2
};
class Credential : public TriviallyCopyable {
public:
/**
* Result of verify() operations
*/
enum VerifyResult { VERIFY_OK = 0, VERIFY_BAD_SIGNATURE = 1, VERIFY_NEED_IDENTITY = 2 };
protected:
static VerifyResult s_verify(const Context &ctx, const CallContext &cc, const MembershipCredential &credential);
static VerifyResult s_verify(const Context &ctx, const CallContext &cc, const RevocationCredential &credential);
static VerifyResult s_verify(const Context &ctx, const CallContext &cc, const TagCredential &credential);
static VerifyResult s_verify(const Context &ctx, const CallContext &cc, const OwnershipCredential &credential);
static VerifyResult s_verify(const Context &ctx, const CallContext &cc, const CapabilityCredential &credential);
protected:
static VerifyResult s_verify(const Context& ctx, const CallContext& cc, const MembershipCredential& credential);
static VerifyResult s_verify(const Context& ctx, const CallContext& cc, const RevocationCredential& credential);
static VerifyResult s_verify(const Context& ctx, const CallContext& cc, const TagCredential& credential);
static VerifyResult s_verify(const Context& ctx, const CallContext& cc, const OwnershipCredential& credential);
static VerifyResult s_verify(const Context& ctx, const CallContext& cc, const CapabilityCredential& credential);
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -16,9 +16,9 @@
namespace ZeroTier {
namespace Defaults {
const uint8_t *CERTIFICATE[DEFAULT_CERTIFICATE_COUNT] = {};
const uint8_t* CERTIFICATE[DEFAULT_CERTIFICATE_COUNT] = {};
unsigned int CERTIFICATE_SIZE[DEFAULT_CERTIFICATE_COUNT] = {};
} // namespace Defaults
} // namespace ZeroTier
} // namespace Defaults
} // namespace ZeroTier

View file

@ -21,10 +21,10 @@ namespace Defaults {
#define DEFAULT_CERTIFICATE_COUNT 0
extern const uint8_t *CERTIFICATE[DEFAULT_CERTIFICATE_COUNT];
extern const uint8_t* CERTIFICATE[DEFAULT_CERTIFICATE_COUNT];
extern unsigned int CERTIFICATE_SIZE[DEFAULT_CERTIFICATE_COUNT];
} // namespace Defaults
} // namespace ZeroTier
} // namespace Defaults
} // namespace ZeroTier
#endif

View file

@ -14,13 +14,13 @@
#ifndef ZT_DEFRAGMENTER_HPP
#define ZT_DEFRAGMENTER_HPP
#include "Constants.hpp"
#include "Buf.hpp"
#include "SharedPtr.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "FCV.hpp"
#include "Mutex.hpp"
#include "Path.hpp"
#include "FCV.hpp"
#include "Containers.hpp"
#include "SharedPtr.hpp"
namespace ZeroTier {
@ -40,318 +40,324 @@ namespace ZeroTier {
* the ones used throughout the ZeroTier core.
*
* @tparam MF Maximum number of fragments that each message can possess (default: ZT_MAX_PACKET_FRAGMENTS)
* @tparam MFP Maximum number of incoming fragments per path (if paths are specified) (default: ZT_MAX_INCOMING_FRAGMENTS_PER_PATH)
* @tparam MFP Maximum number of incoming fragments per path (if paths are specified) (default:
* ZT_MAX_INCOMING_FRAGMENTS_PER_PATH)
* @tparam GCS Garbage collection target size for the incoming message queue (default: ZT_MAX_PACKET_FRAGMENTS * 2)
* @tparam GCT Garbage collection trigger threshold, usually 2X GCS (default: ZT_MAX_PACKET_FRAGMENTS * 4)
* @tparam P Type for pointer to a path object (default: SharedPtr<Path>)
*/
template<
unsigned int MF = ZT_MAX_PACKET_FRAGMENTS,
unsigned int MFP = ZT_MAX_INCOMING_FRAGMENTS_PER_PATH,
unsigned int GCS = (ZT_MAX_PACKET_FRAGMENTS * 2),
unsigned int GCT = (ZT_MAX_PACKET_FRAGMENTS * 4),
typename P = SharedPtr< Path > >
class Defragmenter
{
public:
/**
* Return values from assemble()
*/
enum ResultCode
{
/**
* No error occurred, fragment accepted
*/
OK,
template <
unsigned int MF = ZT_MAX_PACKET_FRAGMENTS,
unsigned int MFP = ZT_MAX_INCOMING_FRAGMENTS_PER_PATH,
unsigned int GCS = (ZT_MAX_PACKET_FRAGMENTS * 2),
unsigned int GCT = (ZT_MAX_PACKET_FRAGMENTS * 4),
typename P = SharedPtr<Path> >
class Defragmenter {
public:
/**
* Return values from assemble()
*/
enum ResultCode {
/**
* No error occurred, fragment accepted
*/
OK,
/**
* Message fully assembled and placed in message vector
*/
COMPLETE,
/**
* Message fully assembled and placed in message vector
*/
COMPLETE,
/**
* We already have this fragment number or the message is complete
*/
ERR_DUPLICATE_FRAGMENT,
/**
* We already have this fragment number or the message is complete
*/
ERR_DUPLICATE_FRAGMENT,
/**
* The fragment is invalid, such as e.g. having a fragment number beyond the expected count.
*/
ERR_INVALID_FRAGMENT,
/**
* The fragment is invalid, such as e.g. having a fragment number beyond the expected count.
*/
ERR_INVALID_FRAGMENT,
/**
* Too many fragments are in flight for this path
*
* The message will be marked as if it's done (all fragments received) but will
* be abandoned. Subsequent fragments will generate a DUPLICATE_FRAGMENT error.
*
* This is an anti-denial-of-service feature to limit the number of inbound
* fragments that can be in flight over a given physical network path.
*/
ERR_TOO_MANY_FRAGMENTS_FOR_PATH,
/**
* Too many fragments are in flight for this path
*
* The message will be marked as if it's done (all fragments received) but will
* be abandoned. Subsequent fragments will generate a DUPLICATE_FRAGMENT error.
*
* This is an anti-denial-of-service feature to limit the number of inbound
* fragments that can be in flight over a given physical network path.
*/
ERR_TOO_MANY_FRAGMENTS_FOR_PATH,
/**
* Memory (or some other limit) exhausted
*/
ERR_OUT_OF_MEMORY
};
/**
* Memory (or some other limit) exhausted
*/
ERR_OUT_OF_MEMORY
};
ZT_INLINE Defragmenter()
{}
ZT_INLINE Defragmenter()
{
}
/**
* Process a fragment of a multi-part message
*
* The message ID is arbitrary but must be something that can uniquely
* group fragments for a given final message. The total fragments
* value is expected to be the same for all fragments in a message. Results
* are undefined and probably wrong if this value changes across a message.
* Fragment numbers must be sequential starting with 0 and going up to
* one minus total fragments expected (non-inclusive range).
*
* Fragments can arrive in any order. Duplicates are dropped and ignored.
*
* It's the responsibility of the caller to do whatever validation needs to
* be done before considering a fragment valid and to make sure the fragment
* data index and size parameters are valid.
*
* The fragment supplied to this function is kept and held under the supplied
* message ID until or unless (1) the message is fully assembled, (2) the
* message is orphaned and its entry is taken by a new message, or (3) the
* clear() function is called to forget all incoming messages. The pointer
* at the 'fragment' reference will be zeroed since this pointer is handed
* off, so the SharedPtr<> passed in as 'fragment' will be NULL after this
* function is called.
*
* The 'via' parameter causes this fragment to be registered with a path and
* unregistered when done or abandoned. It's only used the first time it's
* supplied (the first non-NULL) for a given message ID. This is a mitigation
* against memory exhausting DOS attacks.
*
* @tparam X Template parameter type for Buf<> containing fragment (inferred)
* @param messageId Message ID (a unique ID identifying this message)
* @param message Fixed capacity vector that will be filled with the result if result code is DONE
* @param fragment Buffer containing fragment that will be filed under this message's ID
* @param fragmentDataIndex Index of data in fragment's data.bytes (fragment's data.fields type is ignored)
* @param fragmentDataSize Length of data in fragment's data.bytes (fragment's data.fields type is ignored)
* @param fragmentNo Number of fragment (0..totalFragmentsExpected, non-inclusive)
* @param totalFragmentsExpected Total number of expected fragments in this message or 0 to use cached value
* @param ts Current time
* @param via If non-NULL this is the path on which this message fragment was received
* @return Result code
*/
ZT_INLINE ResultCode assemble(
const uint64_t messageId,
FCV< Buf::Slice, MF > &message,
SharedPtr< Buf > &fragment,
const unsigned int fragmentDataIndex,
const unsigned int fragmentDataSize,
const unsigned int fragmentNo,
const unsigned int totalFragmentsExpected,
const int64_t ts,
const P &via)
{
// Sanity checks for malformed fragments or invalid input parameters.
if ((fragmentNo >= totalFragmentsExpected) || (totalFragmentsExpected > MF) || (totalFragmentsExpected == 0))
return ERR_INVALID_FRAGMENT;
/**
* Process a fragment of a multi-part message
*
* The message ID is arbitrary but must be something that can uniquely
* group fragments for a given final message. The total fragments
* value is expected to be the same for all fragments in a message. Results
* are undefined and probably wrong if this value changes across a message.
* Fragment numbers must be sequential starting with 0 and going up to
* one minus total fragments expected (non-inclusive range).
*
* Fragments can arrive in any order. Duplicates are dropped and ignored.
*
* It's the responsibility of the caller to do whatever validation needs to
* be done before considering a fragment valid and to make sure the fragment
* data index and size parameters are valid.
*
* The fragment supplied to this function is kept and held under the supplied
* message ID until or unless (1) the message is fully assembled, (2) the
* message is orphaned and its entry is taken by a new message, or (3) the
* clear() function is called to forget all incoming messages. The pointer
* at the 'fragment' reference will be zeroed since this pointer is handed
* off, so the SharedPtr<> passed in as 'fragment' will be NULL after this
* function is called.
*
* The 'via' parameter causes this fragment to be registered with a path and
* unregistered when done or abandoned. It's only used the first time it's
* supplied (the first non-NULL) for a given message ID. This is a mitigation
* against memory exhausting DOS attacks.
*
* @tparam X Template parameter type for Buf<> containing fragment (inferred)
* @param messageId Message ID (a unique ID identifying this message)
* @param message Fixed capacity vector that will be filled with the result if result code is DONE
* @param fragment Buffer containing fragment that will be filed under this message's ID
* @param fragmentDataIndex Index of data in fragment's data.bytes (fragment's data.fields type is ignored)
* @param fragmentDataSize Length of data in fragment's data.bytes (fragment's data.fields type is ignored)
* @param fragmentNo Number of fragment (0..totalFragmentsExpected, non-inclusive)
* @param totalFragmentsExpected Total number of expected fragments in this message or 0 to use cached value
* @param ts Current time
* @param via If non-NULL this is the path on which this message fragment was received
* @return Result code
*/
ZT_INLINE ResultCode assemble(
const uint64_t messageId,
FCV<Buf::Slice, MF>& message,
SharedPtr<Buf>& fragment,
const unsigned int fragmentDataIndex,
const unsigned int fragmentDataSize,
const unsigned int fragmentNo,
const unsigned int totalFragmentsExpected,
const int64_t ts,
const P& via)
{
// Sanity checks for malformed fragments or invalid input parameters.
if ((fragmentNo >= totalFragmentsExpected) || (totalFragmentsExpected > MF) || (totalFragmentsExpected == 0))
return ERR_INVALID_FRAGMENT;
// We hold the read lock on _messages unless we need to add a new entry or do GC.
RWMutex::RMaybeWLock ml(m_messages_l);
// We hold the read lock on _messages unless we need to add a new entry or do GC.
RWMutex::RMaybeWLock ml(m_messages_l);
// Check message hash table size and perform GC if necessary.
if (m_messages.size() >= GCT) {
try {
// Scan messages with read lock still locked first and make a sorted list of
// message entries by last modified time. Then lock for writing and delete
// the oldest entries to bring the size of the messages hash table down to
// under the target size. This tries to minimize the amount of time the write
// lock is held since many threads can hold the read lock but all threads must
// wait if someone holds the write lock.
std::vector< std::pair< int64_t, uint64_t > > messagesByLastUsedTime;
messagesByLastUsedTime.reserve(m_messages.size());
// Check message hash table size and perform GC if necessary.
if (m_messages.size() >= GCT) {
try {
// Scan messages with read lock still locked first and make a sorted list of
// message entries by last modified time. Then lock for writing and delete
// the oldest entries to bring the size of the messages hash table down to
// under the target size. This tries to minimize the amount of time the write
// lock is held since many threads can hold the read lock but all threads must
// wait if someone holds the write lock.
std::vector<std::pair<int64_t, uint64_t> > messagesByLastUsedTime;
messagesByLastUsedTime.reserve(m_messages.size());
for (typename Map< uint64_t, p_E >::const_iterator i(m_messages.begin()); i != m_messages.end(); ++i)
messagesByLastUsedTime.push_back(std::pair< int64_t, uint64_t >(i->second.lastUsed, i->first));
std::sort(messagesByLastUsedTime.begin(), messagesByLastUsedTime.end());
for (typename Map<uint64_t, p_E>::const_iterator i(m_messages.begin()); i != m_messages.end(); ++i)
messagesByLastUsedTime.push_back(std::pair<int64_t, uint64_t>(i->second.lastUsed, i->first));
std::sort(messagesByLastUsedTime.begin(), messagesByLastUsedTime.end());
ml.writing(); // acquire write lock on _messages
for (unsigned long x = 0, y = (messagesByLastUsedTime.size() - GCS); x <= y; ++x)
m_messages.erase(messagesByLastUsedTime[x].second);
} catch (...) {
return ERR_OUT_OF_MEMORY;
}
}
ml.writing(); // acquire write lock on _messages
for (unsigned long x = 0, y = (messagesByLastUsedTime.size() - GCS); x <= y; ++x)
m_messages.erase(messagesByLastUsedTime[x].second);
}
catch (...) {
return ERR_OUT_OF_MEMORY;
}
}
// Get or create message fragment.
Defragmenter< MF, MFP, GCS, GCT, P >::p_E *e;
{
typename Map< uint64_t, Defragmenter< MF, MFP, GCS, GCT, P >::p_E >::iterator ee(m_messages.find(messageId));
if (ee == m_messages.end()) {
ml.writing(); // acquire write lock on _messages if not already
try {
e = &(m_messages[messageId]);
} catch (...) {
return ERR_OUT_OF_MEMORY;
}
e->id = messageId;
} else {
e = &(ee->second);
}
}
// Get or create message fragment.
Defragmenter<MF, MFP, GCS, GCT, P>::p_E* e;
{
typename Map<uint64_t, Defragmenter<MF, MFP, GCS, GCT, P>::p_E>::iterator ee(m_messages.find(messageId));
if (ee == m_messages.end()) {
ml.writing(); // acquire write lock on _messages if not already
try {
e = &(m_messages[messageId]);
}
catch (...) {
return ERR_OUT_OF_MEMORY;
}
e->id = messageId;
}
else {
e = &(ee->second);
}
}
// Switch back to holding only the read lock on _messages if we have locked for write
ml.reading();
// Switch back to holding only the read lock on _messages if we have locked for write
ml.reading();
// Acquire lock on entry itself
Mutex::Lock el(e->lock);
// Acquire lock on entry itself
Mutex::Lock el(e->lock);
// This magic value means this message has already been assembled and is done.
if (e->lastUsed < 0)
return ERR_DUPLICATE_FRAGMENT;
// This magic value means this message has already been assembled and is done.
if (e->lastUsed < 0)
return ERR_DUPLICATE_FRAGMENT;
// Update last-activity timestamp for this entry, delaying GC.
e->lastUsed = ts;
// Update last-activity timestamp for this entry, delaying GC.
e->lastUsed = ts;
// Learn total fragments expected if a value is given. Otherwise the cached
// value gets used. This is to support the implementation of fragmentation
// in the ZT protocol where only fragments carry the total.
if (totalFragmentsExpected > 0)
e->totalFragmentsExpected = totalFragmentsExpected;
// Learn total fragments expected if a value is given. Otherwise the cached
// value gets used. This is to support the implementation of fragmentation
// in the ZT protocol where only fragments carry the total.
if (totalFragmentsExpected > 0)
e->totalFragmentsExpected = totalFragmentsExpected;
// If there is a path associated with this fragment make sure we've registered
// ourselves as in flight, check the limit, and abort if exceeded.
if ((via) && (!e->via)) {
e->via = via;
bool tooManyPerPath = false;
via->m_inboundFragmentedMessages_l.lock();
try {
if (via->m_inboundFragmentedMessages.size() < MFP) {
via->m_inboundFragmentedMessages.insert(messageId);
} else {
tooManyPerPath = true;
}
} catch (...) {
// This would indicate something like bad_alloc thrown by the set. Treat
// it as limit exceeded.
tooManyPerPath = true;
}
via->m_inboundFragmentedMessages_l.unlock();
if (tooManyPerPath)
return ERR_TOO_MANY_FRAGMENTS_FOR_PATH;
}
// If there is a path associated with this fragment make sure we've registered
// ourselves as in flight, check the limit, and abort if exceeded.
if ((via) && (! e->via)) {
e->via = via;
bool tooManyPerPath = false;
via->m_inboundFragmentedMessages_l.lock();
try {
if (via->m_inboundFragmentedMessages.size() < MFP) {
via->m_inboundFragmentedMessages.insert(messageId);
}
else {
tooManyPerPath = true;
}
}
catch (...) {
// This would indicate something like bad_alloc thrown by the set. Treat
// it as limit exceeded.
tooManyPerPath = true;
}
via->m_inboundFragmentedMessages_l.unlock();
if (tooManyPerPath)
return ERR_TOO_MANY_FRAGMENTS_FOR_PATH;
}
// If we already have fragment number X, abort. Note that we do not
// actually compare data here. Two same-numbered fragments with different
// data would just mean the transfer is corrupt and would be detected
// later e.g. by packet MAC check. Other use cases of this code like
// network configs check each fragment so this basically can't happen.
Buf::Slice &s = e->message.at(fragmentNo);
if (s.b)
return ERR_DUPLICATE_FRAGMENT;
// If we already have fragment number X, abort. Note that we do not
// actually compare data here. Two same-numbered fragments with different
// data would just mean the transfer is corrupt and would be detected
// later e.g. by packet MAC check. Other use cases of this code like
// network configs check each fragment so this basically can't happen.
Buf::Slice& s = e->message.at(fragmentNo);
if (s.b)
return ERR_DUPLICATE_FRAGMENT;
// Take ownership of fragment, setting 'fragment' pointer to NULL. The simple
// transfer of the pointer avoids a synchronized increment/decrement of the object's
// reference count.
s.b.move(fragment);
s.s = fragmentDataIndex;
s.e = fragmentDataIndex + fragmentDataSize;
++e->fragmentsReceived;
// Take ownership of fragment, setting 'fragment' pointer to NULL. The simple
// transfer of the pointer avoids a synchronized increment/decrement of the object's
// reference count.
s.b.move(fragment);
s.s = fragmentDataIndex;
s.e = fragmentDataIndex + fragmentDataSize;
++e->fragmentsReceived;
// If we now have all fragments then assemble them.
if ((e->fragmentsReceived >= e->totalFragmentsExpected) && (e->totalFragmentsExpected > 0)) {
// This message is done so de-register it with its path if one is associated.
if (e->via) {
e->via->m_inboundFragmentedMessages_l.lock();
e->via->m_inboundFragmentedMessages.erase(messageId);
e->via->m_inboundFragmentedMessages_l.unlock();
e->via.zero();
}
// If we now have all fragments then assemble them.
if ((e->fragmentsReceived >= e->totalFragmentsExpected) && (e->totalFragmentsExpected > 0)) {
// This message is done so de-register it with its path if one is associated.
if (e->via) {
e->via->m_inboundFragmentedMessages_l.lock();
e->via->m_inboundFragmentedMessages.erase(messageId);
e->via->m_inboundFragmentedMessages_l.unlock();
e->via.zero();
}
// Slices are TriviallyCopyable and so may be raw copied from e->message to
// the result parameter. This is fast.
e->message.unsafeMoveTo(message);
e->lastUsed = -1; // mark as "done" and force GC to collect
// Slices are TriviallyCopyable and so may be raw copied from e->message to
// the result parameter. This is fast.
e->message.unsafeMoveTo(message);
e->lastUsed = -1; // mark as "done" and force GC to collect
return COMPLETE;
}
return COMPLETE;
}
return OK;
}
return OK;
}
/**
* Erase all message entries in the internal queue
*/
ZT_INLINE void clear()
{
RWMutex::Lock ml(m_messages_l);
m_messages.clear();
}
/**
* Erase all message entries in the internal queue
*/
ZT_INLINE void clear()
{
RWMutex::Lock ml(m_messages_l);
m_messages.clear();
}
/**
* @return Number of entries currently in message defragmentation cache
*/
ZT_INLINE unsigned int cacheSize() noexcept
{
RWMutex::RLock ml(m_messages_l);
return m_messages.size();
}
/**
* @return Number of entries currently in message defragmentation cache
*/
ZT_INLINE unsigned int cacheSize() noexcept
{
RWMutex::RLock ml(m_messages_l);
return m_messages.size();
}
private:
// p_E is an entry in the message queue.
struct p_E
{
ZT_INLINE p_E() noexcept:
id(0),
lastUsed(0),
totalFragmentsExpected(0),
fragmentsReceived(0)
{}
private:
// p_E is an entry in the message queue.
struct p_E {
ZT_INLINE p_E() noexcept
: id(0)
, lastUsed(0)
, totalFragmentsExpected(0)
, fragmentsReceived(0)
{
}
ZT_INLINE p_E(const p_E &e) noexcept:
id(e.id),
lastUsed(e.lastUsed),
totalFragmentsExpected(e.totalFragmentsExpected),
fragmentsReceived(e.fragmentsReceived),
via(e.via),
message(e.message),
lock()
{}
ZT_INLINE p_E(const p_E& e) noexcept
: id(e.id)
, lastUsed(e.lastUsed)
, totalFragmentsExpected(e.totalFragmentsExpected)
, fragmentsReceived(e.fragmentsReceived)
, via(e.via)
, message(e.message)
, lock()
{
}
ZT_INLINE ~p_E()
{
if (via) {
via->m_inboundFragmentedMessages_l.lock();
via->m_inboundFragmentedMessages.erase(id);
via->m_inboundFragmentedMessages_l.unlock();
}
}
ZT_INLINE ~p_E()
{
if (via) {
via->m_inboundFragmentedMessages_l.lock();
via->m_inboundFragmentedMessages.erase(id);
via->m_inboundFragmentedMessages_l.unlock();
}
}
ZT_INLINE p_E &operator=(const p_E &e)
{
if (this != &e) {
id = e.id;
lastUsed = e.lastUsed;
totalFragmentsExpected = e.totalFragmentsExpected;
fragmentsReceived = e.fragmentsReceived;
via = e.via;
message = e.message;
}
return *this;
}
ZT_INLINE p_E& operator=(const p_E& e)
{
if (this != &e) {
id = e.id;
lastUsed = e.lastUsed;
totalFragmentsExpected = e.totalFragmentsExpected;
fragmentsReceived = e.fragmentsReceived;
via = e.via;
message = e.message;
}
return *this;
}
uint64_t id;
int64_t lastUsed;
unsigned int totalFragmentsExpected;
unsigned int fragmentsReceived;
P via;
FCV< Buf::Slice, MF > message;
Mutex lock;
};
uint64_t id;
int64_t lastUsed;
unsigned int totalFragmentsExpected;
unsigned int fragmentsReceived;
P via;
FCV<Buf::Slice, MF> message;
Mutex lock;
};
Map< uint64_t, Defragmenter< MF, MFP, GCS, GCT, P >::p_E > m_messages;
RWMutex m_messages_l;
Map<uint64_t, Defragmenter<MF, MFP, GCS, GCT, P>::p_E> m_messages;
RWMutex m_messages_l;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -15,157 +15,168 @@
namespace ZeroTier {
Vector< uint8_t > &Dictionary::operator[](const char *const k)
{ return m_entries[k]; }
const Vector< uint8_t > &Dictionary::operator[](const char *const k) const
Vector<uint8_t>& Dictionary::operator[](const char* const k)
{
static const Vector< uint8_t > s_emptyEntry;
const SortedMap< String, Vector< uint8_t > >::const_iterator e(m_entries.find(k));
return (e == m_entries.end()) ? s_emptyEntry : e->second;
return m_entries[k];
}
void Dictionary::add(const char *k, const Address &v)
const Vector<uint8_t>& Dictionary::operator[](const char* const k) const
{
char tmp[ZT_ADDRESS_STRING_SIZE_MAX];
v.toString(tmp);
add(k, tmp);
static const Vector<uint8_t> s_emptyEntry;
const SortedMap<String, Vector<uint8_t> >::const_iterator e(m_entries.find(k));
return (e == m_entries.end()) ? s_emptyEntry : e->second;
}
void Dictionary::add(const char *k, const char *v)
void Dictionary::add(const char* k, const Address& v)
{
Vector< uint8_t > &e = (*this)[k];
e.clear();
if (v) {
while (*v)
e.push_back((uint8_t)*(v++));
}
char tmp[ZT_ADDRESS_STRING_SIZE_MAX];
v.toString(tmp);
add(k, tmp);
}
void Dictionary::add(const char *k, const void *data, unsigned int len)
void Dictionary::add(const char* k, const char* v)
{
Vector< uint8_t > &e = (*this)[k];
if (likely(len != 0)) {
e.assign((const uint8_t *)data, (const uint8_t *)data + len);
} else {
e.clear();
}
Vector<uint8_t>& e = (*this)[k];
e.clear();
if (v) {
while (*v)
e.push_back((uint8_t) * (v++));
}
}
uint64_t Dictionary::getUI(const char *k, uint64_t dfl) const
void Dictionary::add(const char* k, const void* data, unsigned int len)
{
char tmp[32];
getS(k, tmp, sizeof(tmp));
if (tmp[0])
return Utils::unhex(tmp);
return dfl;
Vector<uint8_t>& e = (*this)[k];
if (likely(len != 0)) {
e.assign((const uint8_t*)data, (const uint8_t*)data + len);
}
else {
e.clear();
}
}
char *Dictionary::getS(const char *k, char *v, const unsigned int cap) const
uint64_t Dictionary::getUI(const char* k, uint64_t dfl) const
{
if (cap == 0) // sanity check
return v;
char tmp[32];
getS(k, tmp, sizeof(tmp));
if (tmp[0])
return Utils::unhex(tmp);
return dfl;
}
const Vector< uint8_t > &e = (*this)[k];
if (e.empty()) {
v[0] = 0;
return v;
}
char* Dictionary::getS(const char* k, char* v, const unsigned int cap) const
{
if (cap == 0) // sanity check
return v;
for (unsigned int i = 0, last = (cap - 1);; ++i) {
if ((i >= last) || (i >= (unsigned int)e.size())) {
v[i] = 0;
break;
}
if ((v[i] = (char)e[i]) == 0)
break;
}
const Vector<uint8_t>& e = (*this)[k];
if (e.empty()) {
v[0] = 0;
return v;
}
return v;
for (unsigned int i = 0, last = (cap - 1);; ++i) {
if ((i >= last) || (i >= (unsigned int)e.size())) {
v[i] = 0;
break;
}
if ((v[i] = (char)e[i]) == 0)
break;
}
return v;
}
void Dictionary::clear()
{ m_entries.clear(); }
void Dictionary::encode(Vector< uint8_t > &out) const
{
out.clear();
for (SortedMap< String, Vector< uint8_t > >::const_iterator ti(m_entries.begin()); ti != m_entries.end(); ++ti) {
s_appendKey(out, ti->first.data());
for (Vector< uint8_t >::const_iterator i(ti->second.begin()); i != ti->second.end(); ++i)
s_appendValueByte(out, *i);
out.push_back((uint8_t)'\n');
}
m_entries.clear();
}
bool Dictionary::decode(const void *data, unsigned int len)
void Dictionary::encode(Vector<uint8_t>& out) const
{
clear();
String k;
Vector< uint8_t > *v = nullptr;
bool escape = false;
for (unsigned int di = 0; di < len; ++di) {
const uint8_t c = reinterpret_cast<const uint8_t *>(data)[di];
if (c) {
if (v) {
if (escape) {
escape = false;
switch (c) {
case 48:
v->push_back(0);
break;
case 101:
v->push_back(61);
break;
case 110:
v->push_back(10);
break;
case 114:
v->push_back(13);
break;
default:
v->push_back(c);
break;
}
} else {
if (c == (uint8_t)'\n') {
k.clear();
v = nullptr;
} else if (c == 92) { // backslash
escape = true;
} else {
v->push_back(c);
}
}
} else {
if (c == (uint8_t)'=') {
v = &m_entries[k];
} else {
k.push_back(c);
}
}
} else {
break;
}
}
return true;
out.clear();
for (SortedMap<String, Vector<uint8_t> >::const_iterator ti(m_entries.begin()); ti != m_entries.end(); ++ti) {
s_appendKey(out, ti->first.data());
for (Vector<uint8_t>::const_iterator i(ti->second.begin()); i != ti->second.end(); ++i)
s_appendValueByte(out, *i);
out.push_back((uint8_t)'\n');
}
}
char *Dictionary::arraySubscript(char *buf, unsigned int bufSize, const char *name, const unsigned long sub) noexcept
bool Dictionary::decode(const void* data, unsigned int len)
{
if (bufSize < 17) { // sanity check
buf[0] = 0;
return buf;
}
for (unsigned int i = 0; i < (bufSize - 17); ++i) {
if ((buf[i] = name[i]) == 0) {
buf[i++] = '#';
Utils::hex(sub, buf + i);
return buf;
}
}
buf[0] = 0;
return buf;
clear();
String k;
Vector<uint8_t>* v = nullptr;
bool escape = false;
for (unsigned int di = 0; di < len; ++di) {
const uint8_t c = reinterpret_cast<const uint8_t*>(data)[di];
if (c) {
if (v) {
if (escape) {
escape = false;
switch (c) {
case 48:
v->push_back(0);
break;
case 101:
v->push_back(61);
break;
case 110:
v->push_back(10);
break;
case 114:
v->push_back(13);
break;
default:
v->push_back(c);
break;
}
}
else {
if (c == (uint8_t)'\n') {
k.clear();
v = nullptr;
}
else if (c == 92) { // backslash
escape = true;
}
else {
v->push_back(c);
}
}
}
else {
if (c == (uint8_t)'=') {
v = &m_entries[k];
}
else {
k.push_back(c);
}
}
}
else {
break;
}
}
return true;
}
} // namespace ZeroTier
char* Dictionary::arraySubscript(char* buf, unsigned int bufSize, const char* name, const unsigned long sub) noexcept
{
if (bufSize < 17) { // sanity check
buf[0] = 0;
return buf;
}
for (unsigned int i = 0; i < (bufSize - 17); ++i) {
if ((buf[i] = name[i]) == 0) {
buf[i++] = '#';
Utils::hex(sub, buf + i);
return buf;
}
}
buf[0] = 0;
return buf;
}
} // namespace ZeroTier

View file

@ -14,11 +14,11 @@
#ifndef ZT_DICTIONARY_HPP
#define ZT_DICTIONARY_HPP
#include "Constants.hpp"
#include "Utils.hpp"
#include "Address.hpp"
#include "Buf.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Utils.hpp"
namespace ZeroTier {
@ -37,420 +37,428 @@ class Identity;
* The fastest way to build a dictionary to send is to use the append
* static functions, not to populate and then encode a Dictionary.
*/
class Dictionary
{
public:
typedef SortedMap< String, Vector< uint8_t > >::const_iterator const_iterator;
class Dictionary {
public:
typedef SortedMap<String, Vector<uint8_t> >::const_iterator const_iterator;
ZT_INLINE Dictionary()
{}
ZT_INLINE Dictionary()
{
}
ZT_INLINE ~Dictionary()
{}
ZT_INLINE ~Dictionary()
{
}
/*
ZT_INLINE void dump() const
{
printf("\n--\n");
for (const_iterator e(begin()); e != end(); ++e) {
printf("%.8x %s=", Utils::fnv1a32(e->second.data(), (unsigned int)e->second.size()), e->first.c_str());
bool binary = false;
for (Vector< uint8_t >::const_iterator c(e->second.begin()); c != e->second.end(); ++c) {
if ((*c < 33) || (*c > 126)) {
binary = true;
break;
}
}
if (binary) {
for (Vector< uint8_t >::const_iterator c(e->second.begin()); c != e->second.end(); ++c)
printf("%.2x", (unsigned int)*c);
} else {
Vector< uint8_t > s(e->second);
s.push_back(0);
printf("%s", s.data());
}
printf("\n");
}
printf("--\n");
}
*/
/*
ZT_INLINE void dump() const
{
printf("\n--\n");
for (const_iterator e(begin()); e != end(); ++e) {
printf("%.8x %s=", Utils::fnv1a32(e->second.data(), (unsigned int)e->second.size()), e->first.c_str());
bool binary = false;
for (Vector< uint8_t >::const_iterator c(e->second.begin()); c != e->second.end(); ++c) {
if ((*c < 33) || (*c > 126)) {
binary = true;
break;
}
}
if (binary) {
for (Vector< uint8_t >::const_iterator c(e->second.begin()); c != e->second.end(); ++c)
printf("%.2x", (unsigned int)*c);
} else {
Vector< uint8_t > s(e->second);
s.push_back(0);
printf("%s", s.data());
}
printf("\n");
}
printf("--\n");
}
*/
/**
* Get a reference to a value
*
* @param k Key to look up
* @return Reference to value
*/
Vector< uint8_t > &operator[](const char *k);
/**
* Get a reference to a value
*
* @param k Key to look up
* @return Reference to value
*/
Vector<uint8_t>& operator[](const char* k);
/**
* Get a const reference to a value
*
* @param k Key to look up
* @return Reference to value or to empty vector if not found
*/
const Vector< uint8_t > &operator[](const char *k) const;
/**
* Get a const reference to a value
*
* @param k Key to look up
* @return Reference to value or to empty vector if not found
*/
const Vector<uint8_t>& operator[](const char* k) const;
/**
* @return Start of key->value pairs
*/
ZT_INLINE const_iterator begin() const noexcept
{ return m_entries.begin(); }
/**
* @return Start of key->value pairs
*/
ZT_INLINE const_iterator begin() const noexcept
{
return m_entries.begin();
}
/**
* @return End of key->value pairs
*/
ZT_INLINE const_iterator end() const noexcept
{ return m_entries.end(); }
/**
* @return End of key->value pairs
*/
ZT_INLINE const_iterator end() const noexcept
{
return m_entries.end();
}
/**
* Add an integer as a hexadecimal string value
*
* @param k Key to set
* @param v Integer to set, will be cast to uint64_t and stored as hex
*/
ZT_INLINE void add(const char *const k, const uint64_t v)
{
char buf[24];
add(k, Utils::hex((uint64_t)(v), buf));
}
/**
* Add an integer as a hexadecimal string value
*
* @param k Key to set
* @param v Integer to set, will be cast to uint64_t and stored as hex
*/
ZT_INLINE void add(const char* const k, const uint64_t v)
{
char buf[24];
add(k, Utils::hex((uint64_t)(v), buf));
}
/**
* Add an integer as a hexadecimal string value
*
* @param k Key to set
* @param v Integer to set, will be cast to uint64_t and stored as hex
*/
ZT_INLINE void add(const char *const k, const int64_t v)
{
char buf[24];
add(k, Utils::hex((uint64_t)(v), buf));
}
/**
* Add an integer as a hexadecimal string value
*
* @param k Key to set
* @param v Integer to set, will be cast to uint64_t and stored as hex
*/
ZT_INLINE void add(const char* const k, const int64_t v)
{
char buf[24];
add(k, Utils::hex((uint64_t)(v), buf));
}
/**
* Add an address in 10-digit hex string format
*/
void add(const char *k, const Address &v);
/**
* Add an address in 10-digit hex string format
*/
void add(const char* k, const Address& v);
/**
* Add a C string as a value
*/
void add(const char *k, const char *v);
/**
* Add a C string as a value
*/
void add(const char* k, const char* v);
/**
* Add a binary blob as a value
*/
void add(const char *k, const void *data, unsigned int len);
/**
* Add a binary blob as a value
*/
void add(const char* k, const void* data, unsigned int len);
/**
* Get an integer
*
* @param k Key to look up
* @param dfl Default value (default: 0)
* @return Value of key or default if not found
*/
uint64_t getUI(const char *k, uint64_t dfl = 0) const;
/**
* Get an integer
*
* @param k Key to look up
* @param dfl Default value (default: 0)
* @return Value of key or default if not found
*/
uint64_t getUI(const char* k, uint64_t dfl = 0) const;
/**
* Get a C string
*
* If the buffer is too small the string will be truncated, but the
* buffer will always end in a terminating null no matter what.
*
* @param k Key to look up
* @param v Buffer to hold string
* @param cap Maximum size of string (including terminating null)
*/
char *getS(const char *k, char *v, unsigned int cap) const;
/**
* Get a C string
*
* If the buffer is too small the string will be truncated, but the
* buffer will always end in a terminating null no matter what.
*
* @param k Key to look up
* @param v Buffer to hold string
* @param cap Maximum size of string (including terminating null)
*/
char* getS(const char* k, char* v, unsigned int cap) const;
/**
* Get an object supporting the marshal/unmarshal interface pattern
*
* @tparam T Object type (inferred)
* @param k Key to look up
* @param obj Object to unmarshal() into
* @return True if unmarshal was successful
*/
template< typename T >
ZT_INLINE bool getO(const char *k, T &obj) const
{
const Vector< uint8_t > &d = (*this)[k];
if (d.empty())
return false;
return (obj.unmarshal(d.data(), (unsigned int)d.size()) > 0);
}
/**
* Get an object supporting the marshal/unmarshal interface pattern
*
* @tparam T Object type (inferred)
* @param k Key to look up
* @param obj Object to unmarshal() into
* @return True if unmarshal was successful
*/
template <typename T> ZT_INLINE bool getO(const char* k, T& obj) const
{
const Vector<uint8_t>& d = (*this)[k];
if (d.empty())
return false;
return (obj.unmarshal(d.data(), (unsigned int)d.size()) > 0);
}
/**
* Add an object supporting the marshal/unmarshal interface pattern
*
* @tparam T Object type (inferred)
* @param k Key to add
* @param obj Object to marshal() into vector
* @return True if successful
*/
template< typename T >
ZT_INLINE bool addO(const char *k, T &obj)
{
Vector< uint8_t > &d = (*this)[k];
d.resize(T::marshalSizeMax());
const int l = obj.marshal(d.data());
if (l > 0) {
d.resize(l);
return true;
}
d.clear();
return false;
}
/**
* Add an object supporting the marshal/unmarshal interface pattern
*
* @tparam T Object type (inferred)
* @param k Key to add
* @param obj Object to marshal() into vector
* @return True if successful
*/
template <typename T> ZT_INLINE bool addO(const char* k, T& obj)
{
Vector<uint8_t>& d = (*this)[k];
d.resize(T::marshalSizeMax());
const int l = obj.marshal(d.data());
if (l > 0) {
d.resize(l);
return true;
}
d.clear();
return false;
}
/**
* Erase all entries in dictionary
*/
void clear();
/**
* Erase all entries in dictionary
*/
void clear();
/**
* @return Number of entries
*/
ZT_INLINE unsigned int size() const noexcept
{ return (unsigned int)m_entries.size(); }
/**
* @return Number of entries
*/
ZT_INLINE unsigned int size() const noexcept
{
return (unsigned int)m_entries.size();
}
/**
* @return True if dictionary is not empty
*/
ZT_INLINE bool empty() const noexcept
{ return m_entries.empty(); }
/**
* @return True if dictionary is not empty
*/
ZT_INLINE bool empty() const noexcept
{
return m_entries.empty();
}
/**
* Encode to a string in the supplied vector
*
* @param out String encoded dictionary
*/
void encode(Vector< uint8_t > &out) const;
/**
* Encode to a string in the supplied vector
*
* @param out String encoded dictionary
*/
void encode(Vector<uint8_t>& out) const;
/**
* Decode a string encoded dictionary
*
* This will decode up to 'len' but will also abort if it finds a
* null/zero as this could be a C string.
*
* @param data Data to decode
* @param len Length of data
* @return True if dictionary was formatted correctly and valid, false on error
*/
bool decode(const void *data, unsigned int len);
/**
* Decode a string encoded dictionary
*
* This will decode up to 'len' but will also abort if it finds a
* null/zero as this could be a C string.
*
* @param data Data to decode
* @param len Length of data
* @return True if dictionary was formatted correctly and valid, false on error
*/
bool decode(const void* data, unsigned int len);
/**
* Append a key=value pair to a buffer (vector or FCV)
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Value
*/
template< typename V >
ZT_INLINE static void append(V &out, const char *const k, const bool v)
{
s_appendKey(out, k);
out.push_back((uint8_t)(v ? '1' : '0'));
out.push_back((uint8_t)'\n');
}
/**
* Append a key=value pair to a buffer (vector or FCV)
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Value
*/
template <typename V> ZT_INLINE static void append(V& out, const char* const k, const bool v)
{
s_appendKey(out, k);
out.push_back((uint8_t)(v ? '1' : '0'));
out.push_back((uint8_t)'\n');
}
/**
* Append a key=value pair to a buffer (vector or FCV)
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Value
*/
template< typename V >
ZT_INLINE static void append(V &out, const char *const k, const Address v)
{
s_appendKey(out, k);
const uint64_t a = v.toInt();
static_assert(ZT_ADDRESS_LENGTH_HEX == 10, "this must be rewritten for any change in address length");
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 36U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 32U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 28U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 24U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 20U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 16U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 12U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 8U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 4U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[a & 0xfU]);
out.push_back((uint8_t)'\n');
}
/**
* Append a key=value pair to a buffer (vector or FCV)
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Value
*/
template <typename V> ZT_INLINE static void append(V& out, const char* const k, const Address v)
{
s_appendKey(out, k);
const uint64_t a = v.toInt();
static_assert(ZT_ADDRESS_LENGTH_HEX == 10, "this must be rewritten for any change in address length");
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 36U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 32U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 28U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 24U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 20U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 16U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 12U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 8U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 4U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[a & 0xfU]);
out.push_back((uint8_t)'\n');
}
/**
* Append a key=value pair to a buffer (vector or FCV)
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Value
*/
template< typename V >
ZT_INLINE static void append(V &out, const char *const k, const uint64_t v)
{
s_appendKey(out, k);
char buf[17];
Utils::hex(v, buf);
unsigned int i = 0;
while (buf[i])
out.push_back((uint8_t)buf[i++]);
out.push_back((uint8_t)'\n');
}
/**
* Append a key=value pair to a buffer (vector or FCV)
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Value
*/
template <typename V> ZT_INLINE static void append(V& out, const char* const k, const uint64_t v)
{
s_appendKey(out, k);
char buf[17];
Utils::hex(v, buf);
unsigned int i = 0;
while (buf[i])
out.push_back((uint8_t)buf[i++]);
out.push_back((uint8_t)'\n');
}
template< typename V >
ZT_INLINE static void append(V &out, const char *const k, const int64_t v)
{ append(out, k, (uint64_t)v); }
template <typename V> ZT_INLINE static void append(V& out, const char* const k, const int64_t v)
{
append(out, k, (uint64_t)v);
}
template< typename V >
ZT_INLINE static void append(V &out, const char *const k, const uint32_t v)
{ append(out, k, (uint64_t)v); }
template <typename V> ZT_INLINE static void append(V& out, const char* const k, const uint32_t v)
{
append(out, k, (uint64_t)v);
}
template< typename V >
ZT_INLINE static void append(V &out, const char *const k, const int32_t v)
{ append(out, k, (uint64_t)v); }
template <typename V> ZT_INLINE static void append(V& out, const char* const k, const int32_t v)
{
append(out, k, (uint64_t)v);
}
template< typename V >
ZT_INLINE static void append(V &out, const char *const k, const uint16_t v)
{ append(out, k, (uint64_t)v); }
template <typename V> ZT_INLINE static void append(V& out, const char* const k, const uint16_t v)
{
append(out, k, (uint64_t)v);
}
template< typename V >
ZT_INLINE static void append(V &out, const char *const k, const int16_t v)
{ append(out, k, (uint64_t)v); }
template <typename V> ZT_INLINE static void append(V& out, const char* const k, const int16_t v)
{
append(out, k, (uint64_t)v);
}
template< typename V >
ZT_INLINE static void append(V &out, const char *const k, const uint8_t v)
{ append(out, k, (uint64_t)v); }
template <typename V> ZT_INLINE static void append(V& out, const char* const k, const uint8_t v)
{
append(out, k, (uint64_t)v);
}
template< typename V >
ZT_INLINE static void append(V &out, const char *const k, const int8_t v)
{ append(out, k, (uint64_t)v); }
template <typename V> ZT_INLINE static void append(V& out, const char* const k, const int8_t v)
{
append(out, k, (uint64_t)v);
}
/**
* Append a key=value pair to a buffer (vector or FCV)
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Value
*/
template< typename V >
ZT_INLINE static void append(V &out, const char *const k, const char *v)
{
if ((v) && (*v)) {
s_appendKey(out, k);
while (*v)
s_appendValueByte(out, (uint8_t)*(v++));
out.push_back((uint8_t)'\n');
}
}
/**
* Append a key=value pair to a buffer (vector or FCV)
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Value
*/
template <typename V> ZT_INLINE static void append(V& out, const char* const k, const char* v)
{
if ((v) && (*v)) {
s_appendKey(out, k);
while (*v)
s_appendValueByte(out, (uint8_t) * (v++));
out.push_back((uint8_t)'\n');
}
}
/**
* Append a key=value pair to a buffer (vector or FCV)
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Value
* @param vlen Value length in bytes
*/
template< typename V >
ZT_INLINE static void append(V &out, const char *const k, const void *const v, const unsigned int vlen)
{
s_appendKey(out, k);
for (unsigned int i = 0; i < vlen; ++i)
s_appendValueByte(out, reinterpret_cast<const uint8_t *>(v)[i]);
out.push_back((uint8_t)'\n');
}
/**
* Append a key=value pair to a buffer (vector or FCV)
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Value
* @param vlen Value length in bytes
*/
template <typename V>
ZT_INLINE static void append(V& out, const char* const k, const void* const v, const unsigned int vlen)
{
s_appendKey(out, k);
for (unsigned int i = 0; i < vlen; ++i)
s_appendValueByte(out, reinterpret_cast<const uint8_t*>(v)[i]);
out.push_back((uint8_t)'\n');
}
/**
* Append a packet ID as raw bytes in the provided byte order
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param pid Packet ID
*/
template< typename V >
static ZT_INLINE void appendPacketId(V &out, const char *const k, const uint64_t pid)
{ append(out, k, &pid, 8); }
/**
* Append a packet ID as raw bytes in the provided byte order
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param pid Packet ID
*/
template <typename V> static ZT_INLINE void appendPacketId(V& out, const char* const k, const uint64_t pid)
{
append(out, k, &pid, 8);
}
/**
* Append key=value with any object implementing the correct marshal interface
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Marshal-able object
* @return Bytes appended or negative on error (return value of marshal())
*/
template< typename V, typename T >
static ZT_INLINE int appendObject(V &out, const char *const k, const T &v)
{
uint8_t tmp[2048]; // large enough for any current object
if (T::marshalSizeMax() > sizeof(tmp))
return -1;
const int mlen = v.marshal(tmp);
if (mlen > 0)
append(out, k, tmp, (unsigned int)mlen);
return mlen;
}
/**
* Append key=value with any object implementing the correct marshal interface
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Marshal-able object
* @return Bytes appended or negative on error (return value of marshal())
*/
template <typename V, typename T> static ZT_INLINE int appendObject(V& out, const char* const k, const T& v)
{
uint8_t tmp[2048]; // large enough for any current object
if (T::marshalSizeMax() > sizeof(tmp))
return -1;
const int mlen = v.marshal(tmp);
if (mlen > 0)
append(out, k, tmp, (unsigned int)mlen);
return mlen;
}
/**
* Append #sub where sub is a hexadecimal string to 'name' and store in 'buf'
*
* @param buf Buffer to store subscript key
* @param name Root name
* @param sub Subscript index
* @return Pointer to 'buf'
*/
static char *arraySubscript(char *buf, unsigned int bufSize, const char *name, const unsigned long sub) noexcept;
/**
* Append #sub where sub is a hexadecimal string to 'name' and store in 'buf'
*
* @param buf Buffer to store subscript key
* @param name Root name
* @param sub Subscript index
* @return Pointer to 'buf'
*/
static char* arraySubscript(char* buf, unsigned int bufSize, const char* name, const unsigned long sub) noexcept;
private:
template< typename V >
ZT_INLINE static void s_appendValueByte(V &out, const uint8_t c)
{
switch (c) {
case 0:
out.push_back(92);
out.push_back(48);
break;
case 10:
out.push_back(92);
out.push_back(110);
break;
case 13:
out.push_back(92);
out.push_back(114);
break;
case 61:
out.push_back(92);
out.push_back(101);
break;
case 92:
out.push_back(92);
out.push_back(92);
break;
default:
out.push_back(c);
break;
}
}
private:
template <typename V> ZT_INLINE static void s_appendValueByte(V& out, const uint8_t c)
{
switch (c) {
case 0:
out.push_back(92);
out.push_back(48);
break;
case 10:
out.push_back(92);
out.push_back(110);
break;
case 13:
out.push_back(92);
out.push_back(114);
break;
case 61:
out.push_back(92);
out.push_back(101);
break;
case 92:
out.push_back(92);
out.push_back(92);
break;
default:
out.push_back(c);
break;
}
}
template< typename V >
ZT_INLINE static void s_appendKey(V &out, const char *k)
{
for (;;) {
const char c = *(k++);
if (c == 0)
break;
out.push_back((uint8_t)c);
}
out.push_back((uint8_t)'=');
}
template <typename V> ZT_INLINE static void s_appendKey(V& out, const char* k)
{
for (;;) {
const char c = *(k++);
if (c == 0)
break;
out.push_back((uint8_t)c);
}
out.push_back((uint8_t)'=');
}
// Dictionary maps need to be sorted so that they always encode in the same order
// to yield blobs that can be hashed and signed reproducibly. Other than for areas
// where dictionaries are signed and verified the order doesn't matter.
SortedMap< String, Vector< uint8_t > > m_entries;
// Dictionary maps need to be sorted so that they always encode in the same order
// to yield blobs that can be hashed and signed reproducibly. Other than for areas
// where dictionaries are signed and verified the order doesn't matter.
SortedMap<String, Vector<uint8_t> > m_entries;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

File diff suppressed because it is too large Load diff

View file

@ -65,7 +65,7 @@ namespace ZeroTier {
* @param pub Buffer to receive point compressed public key
* @param priv Buffer to receiver private key
*/
void ECC384GenerateKey(uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE],uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE]);
void ECC384GenerateKey(uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE], uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE]);
/**
* Sign a hash with a NIST P-384 private key
@ -77,7 +77,10 @@ void ECC384GenerateKey(uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE],uint8_t priv[ZT_EC
* @param hash 48-byte hash
* @param sig Buffer to receive signature
*/
void ECC384ECDSASign(const uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE],const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE],uint8_t sig[ZT_ECC384_SIGNATURE_SIZE]);
void ECC384ECDSASign(
const uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE],
const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE],
uint8_t sig[ZT_ECC384_SIGNATURE_SIZE]);
/**
* Verify a signature
@ -87,7 +90,10 @@ void ECC384ECDSASign(const uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE],const uint8_
* @param sig Signature to check
* @return True if signature is valid
*/
bool ECC384ECDSAVerify(const uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE],const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE],const uint8_t sig[ZT_ECC384_SIGNATURE_SIZE]);
bool ECC384ECDSAVerify(
const uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE],
const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE],
const uint8_t sig[ZT_ECC384_SIGNATURE_SIZE]);
/**
* Perform ECDH key agreement
@ -99,8 +105,11 @@ bool ECC384ECDSAVerify(const uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE],const uint8_
* @param ourPriv Local private key
* @param secret Buffer to receive 48-byte secret
*/
bool ECC384ECDH(const uint8_t theirPub[ZT_ECC384_PUBLIC_KEY_SIZE],const uint8_t ourPriv[ZT_ECC384_PRIVATE_KEY_SIZE],uint8_t secret[ZT_ECC384_SHARED_SECRET_SIZE]);
bool ECC384ECDH(
const uint8_t theirPub[ZT_ECC384_PUBLIC_KEY_SIZE],
const uint8_t ourPriv[ZT_ECC384_PRIVATE_KEY_SIZE],
uint8_t secret[ZT_ECC384_SHARED_SECRET_SIZE]);
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -12,257 +12,278 @@
/****/
#include "Endpoint.hpp"
#include "Utils.hpp"
namespace ZeroTier {
static ZT_INLINE char s_endpointTypeChar(const ZT_EndpointType t)
{
switch(t) {
default: return '0';
case ZT_ENDPOINT_TYPE_ZEROTIER: return 'z';
case ZT_ENDPOINT_TYPE_ETHERNET: return 'e';
case ZT_ENDPOINT_TYPE_WIFI_DIRECT: return 'd';
case ZT_ENDPOINT_TYPE_BLUETOOTH: return 'b';
case ZT_ENDPOINT_TYPE_IP: return 'i';
case ZT_ENDPOINT_TYPE_IP_UDP: return 'u';
case ZT_ENDPOINT_TYPE_IP_TCP: return 't';
case ZT_ENDPOINT_TYPE_IP_TCP_WS: return 'w';
}
switch (t) {
default:
return '0';
case ZT_ENDPOINT_TYPE_ZEROTIER:
return 'z';
case ZT_ENDPOINT_TYPE_ETHERNET:
return 'e';
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
return 'd';
case ZT_ENDPOINT_TYPE_BLUETOOTH:
return 'b';
case ZT_ENDPOINT_TYPE_IP:
return 'i';
case ZT_ENDPOINT_TYPE_IP_UDP:
return 'u';
case ZT_ENDPOINT_TYPE_IP_TCP:
return 't';
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
return 'w';
}
}
static ZT_INLINE ZT_EndpointType s_charEndpointType(const char c)
{
switch(c) {
default: return ZT_ENDPOINT_TYPE_NIL;
case 'z': return ZT_ENDPOINT_TYPE_ZEROTIER;
case 'e': return ZT_ENDPOINT_TYPE_ETHERNET;
case 'd': return ZT_ENDPOINT_TYPE_WIFI_DIRECT;
case 'b': return ZT_ENDPOINT_TYPE_BLUETOOTH;
case 'i': return ZT_ENDPOINT_TYPE_IP;
case 'u': return ZT_ENDPOINT_TYPE_IP_UDP;
case 't': return ZT_ENDPOINT_TYPE_IP_TCP;
case 'w': return ZT_ENDPOINT_TYPE_IP_TCP_WS;
}
switch (c) {
default:
return ZT_ENDPOINT_TYPE_NIL;
case 'z':
return ZT_ENDPOINT_TYPE_ZEROTIER;
case 'e':
return ZT_ENDPOINT_TYPE_ETHERNET;
case 'd':
return ZT_ENDPOINT_TYPE_WIFI_DIRECT;
case 'b':
return ZT_ENDPOINT_TYPE_BLUETOOTH;
case 'i':
return ZT_ENDPOINT_TYPE_IP;
case 'u':
return ZT_ENDPOINT_TYPE_IP_UDP;
case 't':
return ZT_ENDPOINT_TYPE_IP_TCP;
case 'w':
return ZT_ENDPOINT_TYPE_IP_TCP_WS;
}
}
char *Endpoint::toString(char s[ZT_ENDPOINT_STRING_SIZE_MAX]) const noexcept
char* Endpoint::toString(char s[ZT_ENDPOINT_STRING_SIZE_MAX]) const noexcept
{
static_assert(ZT_ENDPOINT_STRING_SIZE_MAX > (ZT_INETADDRESS_STRING_SIZE_MAX + 4), "overflow");
static_assert(ZT_ENDPOINT_STRING_SIZE_MAX > (ZT_FINGERPRINT_STRING_SIZE_MAX + 4), "overflow");
static_assert(ZT_ENDPOINT_STRING_SIZE_MAX > (ZT_INETADDRESS_STRING_SIZE_MAX + 4), "overflow");
static_assert(ZT_ENDPOINT_STRING_SIZE_MAX > (ZT_FINGERPRINT_STRING_SIZE_MAX + 4), "overflow");
s[0] = s_endpointTypeChar(this->type);
switch (this->type) {
default: // ZT_ENDPOINT_TYPE_NIL
s[1] = 0;
break;
case ZT_ENDPOINT_TYPE_ZEROTIER:
s[1] = '/';
zt().toString(s + 2);
break;
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH:
s[1] = '/';
eth().toString(s + 2);
break;
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
s[1] = '/';
ip().toString(s + 2);
break;
}
s[0] = s_endpointTypeChar(this->type);
switch (this->type) {
default: // ZT_ENDPOINT_TYPE_NIL
s[1] = 0;
break;
case ZT_ENDPOINT_TYPE_ZEROTIER:
s[1] = '/';
zt().toString(s + 2);
break;
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH:
s[1] = '/';
eth().toString(s + 2);
break;
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
s[1] = '/';
ip().toString(s + 2);
break;
}
return s;
return s;
}
bool Endpoint::fromString(const char *s) noexcept
bool Endpoint::fromString(const char* s) noexcept
{
memoryZero(this);
if ((!s) || (!*s)) {
// Empty strings are considered NIL endpoints.
return true;
} else if (s[1] == '/') {
// type/ADDRESS is a fully qualified endpoint.
this->type = s_charEndpointType(s[0]);
switch(this->type) {
case ZT_ENDPOINT_TYPE_NIL:
break;
case ZT_ENDPOINT_TYPE_ZEROTIER:
if (!s[2])
return false;
break;
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH: {
if (!s[2])
return false;
MAC tmpmac;
tmpmac.fromString(s + 2);
this->value.mac = tmpmac.toInt();
} break;
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS: {
if (!s[2])
return false;
if (!asInetAddress(this->value.ss).fromString(s + 2))
return false;
} break;
}
} else if (strchr(s, '/') != nullptr) {
// IP/port is parsed as an IP_UDP endpoint for backward compatibility.
this->type = ZT_ENDPOINT_TYPE_IP_UDP;
return asInetAddress(this->value.ss).fromString(s);
}
return false;
memoryZero(this);
if ((! s) || (! *s)) {
// Empty strings are considered NIL endpoints.
return true;
}
else if (s[1] == '/') {
// type/ADDRESS is a fully qualified endpoint.
this->type = s_charEndpointType(s[0]);
switch (this->type) {
case ZT_ENDPOINT_TYPE_NIL:
break;
case ZT_ENDPOINT_TYPE_ZEROTIER:
if (! s[2])
return false;
break;
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH: {
if (! s[2])
return false;
MAC tmpmac;
tmpmac.fromString(s + 2);
this->value.mac = tmpmac.toInt();
} break;
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS: {
if (! s[2])
return false;
if (! asInetAddress(this->value.ss).fromString(s + 2))
return false;
} break;
}
}
else if (strchr(s, '/') != nullptr) {
// IP/port is parsed as an IP_UDP endpoint for backward compatibility.
this->type = ZT_ENDPOINT_TYPE_IP_UDP;
return asInetAddress(this->value.ss).fromString(s);
}
return false;
}
int Endpoint::marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const noexcept
{
switch (this->type) {
default: // ZT_ENDPOINT_TYPE_NIL
// NIL endpoints get serialized like NIL InetAddress instances.
data[0] = ZT_ENDPOINT_TYPE_NIL;
return 1;
switch (this->type) {
default: // ZT_ENDPOINT_TYPE_NIL
// NIL endpoints get serialized like NIL InetAddress instances.
data[0] = ZT_ENDPOINT_TYPE_NIL;
return 1;
case ZT_ENDPOINT_TYPE_ZEROTIER:
data[0] = 16 + ZT_ENDPOINT_TYPE_ZEROTIER;
Address(this->value.fp.address).copyTo(data + 1);
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(data + 1 + ZT_ADDRESS_LENGTH, this->value.fp.hash);
return 1 + ZT_ADDRESS_LENGTH + ZT_FINGERPRINT_HASH_SIZE;
case ZT_ENDPOINT_TYPE_ZEROTIER:
data[0] = 16 + ZT_ENDPOINT_TYPE_ZEROTIER;
Address(this->value.fp.address).copyTo(data + 1);
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(data + 1 + ZT_ADDRESS_LENGTH, this->value.fp.hash);
return 1 + ZT_ADDRESS_LENGTH + ZT_FINGERPRINT_HASH_SIZE;
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH:
data[0] = 16 + (uint8_t)this->type;
MAC(this->value.mac).copyTo(data + 1);
return 7;
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH:
data[0] = 16 + (uint8_t)this->type;
MAC(this->value.mac).copyTo(data + 1);
return 7;
case ZT_ENDPOINT_TYPE_IP_UDP:
// Default UDP mode gets serialized to look exactly like an InetAddress.
return asInetAddress(this->value.ss).marshal(data);
case ZT_ENDPOINT_TYPE_IP_UDP:
// Default UDP mode gets serialized to look exactly like an InetAddress.
return asInetAddress(this->value.ss).marshal(data);
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
// Other IP types get serialized as new version Endpoint instances with type.
data[0] = 16 + (uint8_t)this->type;
return 1 + asInetAddress(this->value.ss).marshal(data + 1);
}
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
// Other IP types get serialized as new version Endpoint instances with type.
data[0] = 16 + (uint8_t)this->type;
return 1 + asInetAddress(this->value.ss).marshal(data + 1);
}
}
int Endpoint::unmarshal(const uint8_t *restrict data, int len) noexcept
int Endpoint::unmarshal(const uint8_t* restrict data, int len) noexcept
{
memoryZero(this);
if (unlikely(len <= 0))
return -1;
memoryZero(this);
if (unlikely(len <= 0))
return -1;
// Serialized endpoints with type bytes less than 16 are passed through
// to the unmarshal method of InetAddress and considered UDP endpoints.
// This allows backward compatibility with old endpoint fields in the
// protocol that were serialized InetAddress instances.
// Serialized endpoints with type bytes less than 16 are passed through
// to the unmarshal method of InetAddress and considered UDP endpoints.
// This allows backward compatibility with old endpoint fields in the
// protocol that were serialized InetAddress instances.
if (data[0] < 16) {
switch (data[0]) {
case 0:
return 1;
case 4:
case 6:
this->type = ZT_ENDPOINT_TYPE_IP_UDP;
return asInetAddress(this->value.ss).unmarshal(data, len);
}
return -1;
}
if (data[0] < 16) {
switch (data[0]) {
case 0:
return 1;
case 4:
case 6:
this->type = ZT_ENDPOINT_TYPE_IP_UDP;
return asInetAddress(this->value.ss).unmarshal(data, len);
}
return -1;
}
switch ((this->type = (ZT_EndpointType)(data[0] - 16))) {
case ZT_ENDPOINT_TYPE_NIL:
return 1;
switch ((this->type = (ZT_EndpointType)(data[0] - 16))) {
case ZT_ENDPOINT_TYPE_NIL:
return 1;
case ZT_ENDPOINT_TYPE_ZEROTIER:
if (len >= (1 + ZT_ADDRESS_LENGTH + ZT_FINGERPRINT_HASH_SIZE)) {
this->value.fp.address = Address(data + 1).toInt();
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(this->value.fp.hash, data + 1 + ZT_ADDRESS_LENGTH);
return 1 + ZT_ADDRESS_LENGTH + ZT_FINGERPRINT_HASH_SIZE;
}
return -1;
case ZT_ENDPOINT_TYPE_ZEROTIER:
if (len >= (1 + ZT_ADDRESS_LENGTH + ZT_FINGERPRINT_HASH_SIZE)) {
this->value.fp.address = Address(data + 1).toInt();
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(this->value.fp.hash, data + 1 + ZT_ADDRESS_LENGTH);
return 1 + ZT_ADDRESS_LENGTH + ZT_FINGERPRINT_HASH_SIZE;
}
return -1;
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH:
if (len >= 7) {
MAC tmp;
tmp.setTo(data + 1);
this->value.mac = tmp.toInt();
return 7;
}
return -1;
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH:
if (len >= 7) {
MAC tmp;
tmp.setTo(data + 1);
this->value.mac = tmp.toInt();
return 7;
}
return -1;
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
return asInetAddress(this->value.ss).unmarshal(data + 1, len - 1);
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
return asInetAddress(this->value.ss).unmarshal(data + 1, len - 1);
default:
break;
}
default:
break;
}
// Unrecognized types can still be passed over in a valid stream if they are
// prefixed by a 16-bit size. This allows forward compatibility with future
// endpoint types.
this->type = ZT_ENDPOINT_TYPE_NIL;
if (len < 3)
return -1;
const int unrecLen = 1 + (int) Utils::loadBigEndian<uint16_t>(data + 1);
return (unrecLen > len) ? -1 : unrecLen;
// Unrecognized types can still be passed over in a valid stream if they are
// prefixed by a 16-bit size. This allows forward compatibility with future
// endpoint types.
this->type = ZT_ENDPOINT_TYPE_NIL;
if (len < 3)
return -1;
const int unrecLen = 1 + (int)Utils::loadBigEndian<uint16_t>(data + 1);
return (unrecLen > len) ? -1 : unrecLen;
}
bool Endpoint::operator==(const Endpoint &ep) const noexcept
bool Endpoint::operator==(const Endpoint& ep) const noexcept
{
if (this->type == ep.type) {
switch(this->type) {
case ZT_ENDPOINT_TYPE_ZEROTIER:
return zt() == ep.zt();
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH:
return this->value.mac == ep.value.mac;
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
return ip() == ep.ip();
default:
return true;
}
}
return false;
if (this->type == ep.type) {
switch (this->type) {
case ZT_ENDPOINT_TYPE_ZEROTIER:
return zt() == ep.zt();
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH:
return this->value.mac == ep.value.mac;
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
return ip() == ep.ip();
default:
return true;
}
}
return false;
}
bool Endpoint::operator<(const Endpoint &ep) const noexcept
bool Endpoint::operator<(const Endpoint& ep) const noexcept
{
if (this->type == ep.type) {
switch(this->type) {
case ZT_ENDPOINT_TYPE_ZEROTIER:
return zt() < ep.zt();
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH:
return this->value.mac < ep.value.mac;
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
return ip() < ep.ip();
default:
return true;
}
}
return (int)this->type < (int)ep.type;
if (this->type == ep.type) {
switch (this->type) {
case ZT_ENDPOINT_TYPE_ZEROTIER:
return zt() < ep.zt();
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH:
return this->value.mac < ep.value.mac;
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
return ip() < ep.ip();
default:
return true;
}
}
return (int)this->type < (int)ep.type;
}
} // namespace ZeroTier
} // namespace ZeroTier

View file

@ -14,226 +14,260 @@
#ifndef ZT_ENDPOINT_HPP
#define ZT_ENDPOINT_HPP
#include "Constants.hpp"
#include "InetAddress.hpp"
#include "Address.hpp"
#include "Utils.hpp"
#include "TriviallyCopyable.hpp"
#include "Constants.hpp"
#include "Fingerprint.hpp"
#include "InetAddress.hpp"
#include "MAC.hpp"
#include "TriviallyCopyable.hpp"
#include "Utils.hpp"
#define ZT_ENDPOINT_STRING_SIZE_MAX 256
#define ZT_ENDPOINT_STRING_SIZE_MAX 256
#define ZT_ENDPOINT_MARSHAL_SIZE_MAX 192
namespace ZeroTier {
static_assert((ZT_ENDPOINT_MARSHAL_SIZE_MAX - 1) > ZT_INETADDRESS_MARSHAL_SIZE_MAX, "ZT_ENDPOINT_MARSHAL_SIZE_MAX not large enough");
static_assert((ZT_ENDPOINT_MARSHAL_SIZE_MAX - 1) > sizeof(ZT_Fingerprint), "ZT_ENDPOINT_MARSHAL_SIZE_MAX not large enough");
static_assert((ZT_ENDPOINT_MARSHAL_SIZE_MAX - 1) > sizeof(InetAddress), "ZT_ENDPOINT_MARSHAL_SIZE_MAX not large enough");
static_assert(
(ZT_ENDPOINT_MARSHAL_SIZE_MAX - 1) > ZT_INETADDRESS_MARSHAL_SIZE_MAX,
"ZT_ENDPOINT_MARSHAL_SIZE_MAX not large enough");
static_assert(
(ZT_ENDPOINT_MARSHAL_SIZE_MAX - 1) > sizeof(ZT_Fingerprint),
"ZT_ENDPOINT_MARSHAL_SIZE_MAX not large enough");
static_assert(
(ZT_ENDPOINT_MARSHAL_SIZE_MAX - 1) > sizeof(InetAddress),
"ZT_ENDPOINT_MARSHAL_SIZE_MAX not large enough");
static_assert((ZT_ENDPOINT_MARSHAL_SIZE_MAX - 1) > sizeof(MAC), "ZT_ENDPOINT_MARSHAL_SIZE_MAX not large enough");
static_assert((ZT_ENDPOINT_MARSHAL_SIZE_MAX - 1) > sizeof(Fingerprint), "ZT_ENDPOINT_MARSHAL_SIZE_MAX not large enough");
static_assert(
(ZT_ENDPOINT_MARSHAL_SIZE_MAX - 1) > sizeof(Fingerprint),
"ZT_ENDPOINT_MARSHAL_SIZE_MAX not large enough");
/**
* Endpoint variant specifying some form of network endpoint.
*
*
* This is sort of a superset of InetAddress and for the standard UDP
* protocol marshals and unmarshals to a compatible format. This makes
* it backward compatible with older node versions' protocol fields
* where InetAddress was used as long as only the UDP type is exchanged
* with those nodes.
*/
class Endpoint : public ZT_Endpoint, public TriviallyCopyable
{
public:
/**
* Create a NIL/empty endpoint
*/
ZT_INLINE Endpoint() noexcept
{ memoryZero(this); }
class Endpoint
: public ZT_Endpoint
, public TriviallyCopyable {
public:
/**
* Create a NIL/empty endpoint
*/
ZT_INLINE Endpoint() noexcept
{
memoryZero(this);
}
ZT_INLINE Endpoint(const ZT_Endpoint &ep) noexcept
{ Utils::copy< sizeof(ZT_Endpoint) >((ZT_Endpoint *)this, &ep); }
ZT_INLINE Endpoint(const ZT_Endpoint& ep) noexcept
{
Utils::copy<sizeof(ZT_Endpoint)>((ZT_Endpoint*)this, &ep);
}
/**
* Create an endpoint for a type that uses an IP
*
* @param a IP/port
* @param et Endpoint type (default: IP_UDP)
*/
ZT_INLINE Endpoint(const InetAddress &inaddr, const ZT_EndpointType et = ZT_ENDPOINT_TYPE_IP_UDP) noexcept
{
if (inaddr) {
this->type = et;
Utils::copy< sizeof(struct sockaddr_storage) >(&(this->value.ss), &(inaddr.as.ss));
} else {
memoryZero(this);
}
}
/**
* Create an endpoint for a type that uses an IP
*
* @param a IP/port
* @param et Endpoint type (default: IP_UDP)
*/
ZT_INLINE Endpoint(const InetAddress& inaddr, const ZT_EndpointType et = ZT_ENDPOINT_TYPE_IP_UDP) noexcept
{
if (inaddr) {
this->type = et;
Utils::copy<sizeof(struct sockaddr_storage)>(&(this->value.ss), &(inaddr.as.ss));
}
else {
memoryZero(this);
}
}
/**
* Create an endpoint for ZeroTier relaying (ZEROTIER type)
*
* @param zt_ ZeroTier identity fingerprint
*/
ZT_INLINE Endpoint(const Fingerprint &zt_) noexcept
{
if (zt_) {
this->type = ZT_ENDPOINT_TYPE_ZEROTIER;
this->value.fp = zt_;
} else {
memoryZero(this);
}
}
/**
* Create an endpoint for ZeroTier relaying (ZEROTIER type)
*
* @param zt_ ZeroTier identity fingerprint
*/
ZT_INLINE Endpoint(const Fingerprint& zt_) noexcept
{
if (zt_) {
this->type = ZT_ENDPOINT_TYPE_ZEROTIER;
this->value.fp = zt_;
}
else {
memoryZero(this);
}
}
/**
* Create an endpoint for a type that uses a MAC address
*
* @param eth_ Ethernet address
* @param et Endpoint type (default: ETHERNET)
*/
ZT_INLINE Endpoint(const MAC &eth_, const ZT_EndpointType et = ZT_ENDPOINT_TYPE_ETHERNET) noexcept
{
if (eth_) {
this->type = et;
this->value.mac = eth_.toInt();
} else {
memoryZero(this);
}
}
/**
* Create an endpoint for a type that uses a MAC address
*
* @param eth_ Ethernet address
* @param et Endpoint type (default: ETHERNET)
*/
ZT_INLINE Endpoint(const MAC& eth_, const ZT_EndpointType et = ZT_ENDPOINT_TYPE_ETHERNET) noexcept
{
if (eth_) {
this->type = et;
this->value.mac = eth_.toInt();
}
else {
memoryZero(this);
}
}
/**
* @return True if endpoint type isn't NIL
*/
ZT_INLINE operator bool() const noexcept
{ return this->type != ZT_ENDPOINT_TYPE_NIL; }
/**
* @return True if endpoint type isn't NIL
*/
ZT_INLINE operator bool() const noexcept
{
return this->type != ZT_ENDPOINT_TYPE_NIL;
}
/**
* @return True if this endpoint type has an InetAddress address type and thus ip() is valid
*/
ZT_INLINE bool isInetAddr() const noexcept
{
switch (this->type) {
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
return true;
default:
return false;
}
}
/**
* @return True if this endpoint type has an InetAddress address type and thus ip() is valid
*/
ZT_INLINE bool isInetAddr() const noexcept
{
switch (this->type) {
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
return true;
default:
return false;
}
}
/**
* Check whether this endpoint's address is the same as another.
*
* Right now this checks whether IPs are equal if both are IP based endpoints.
* Otherwise it checks for simple equality.
*
* @param ep Endpoint to check
* @return True if endpoints seem to refer to the same address/host
*/
ZT_INLINE bool isSameAddress(const Endpoint &ep) const noexcept
{
switch (this->type) {
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
switch (ep.type) {
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
return ip().ipsEqual(ep.ip());
default:
break;
}
break;
default:
break;
}
return (*this) == ep;
}
/**
* Check whether this endpoint's address is the same as another.
*
* Right now this checks whether IPs are equal if both are IP based endpoints.
* Otherwise it checks for simple equality.
*
* @param ep Endpoint to check
* @return True if endpoints seem to refer to the same address/host
*/
ZT_INLINE bool isSameAddress(const Endpoint& ep) const noexcept
{
switch (this->type) {
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
switch (ep.type) {
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
return ip().ipsEqual(ep.ip());
default:
break;
}
break;
default:
break;
}
return (*this) == ep;
}
/**
* Get InetAddress if this type uses IPv4 or IPv6 addresses (undefined otherwise)
*
* @return InetAddress instance
*/
ZT_INLINE const InetAddress &ip() const noexcept
{ return asInetAddress(this->value.ss); }
/**
* Get InetAddress if this type uses IPv4 or IPv6 addresses (undefined otherwise)
*
* @return InetAddress instance
*/
ZT_INLINE const InetAddress& ip() const noexcept
{
return asInetAddress(this->value.ss);
}
/**
* Get MAC if this is an Ethernet, WiFi direct, or Bluetooth type (undefined otherwise)
*
* @return Ethernet MAC
*/
ZT_INLINE MAC eth() const noexcept
{ return MAC(this->value.mac); }
/**
* Get MAC if this is an Ethernet, WiFi direct, or Bluetooth type (undefined otherwise)
*
* @return Ethernet MAC
*/
ZT_INLINE MAC eth() const noexcept
{
return MAC(this->value.mac);
}
/**
* Get fingerprint if this is a ZeroTier endpoint type (undefined otherwise)
*
* @return ZeroTier fingerprint
*/
ZT_INLINE Fingerprint zt() const noexcept
{ return Fingerprint(this->value.fp); }
/**
* Get fingerprint if this is a ZeroTier endpoint type (undefined otherwise)
*
* @return ZeroTier fingerprint
*/
ZT_INLINE Fingerprint zt() const noexcept
{
return Fingerprint(this->value.fp);
}
ZT_INLINE unsigned long hashCode() const noexcept
{
switch (this->type) {
default:
return 1;
case ZT_ENDPOINT_TYPE_ZEROTIER:
return (unsigned long)this->value.fp.address;
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH:
return (unsigned long)Utils::hash64(this->value.mac);
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
return ip().hashCode();
}
}
ZT_INLINE unsigned long hashCode() const noexcept
{
switch (this->type) {
default:
return 1;
case ZT_ENDPOINT_TYPE_ZEROTIER:
return (unsigned long)this->value.fp.address;
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH:
return (unsigned long)Utils::hash64(this->value.mac);
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
return ip().hashCode();
}
}
char *toString(char s[ZT_ENDPOINT_STRING_SIZE_MAX]) const noexcept;
char* toString(char s[ZT_ENDPOINT_STRING_SIZE_MAX]) const noexcept;
ZT_INLINE String toString() const
{
char tmp[ZT_ENDPOINT_STRING_SIZE_MAX];
return String(toString(tmp));
}
ZT_INLINE String toString() const
{
char tmp[ZT_ENDPOINT_STRING_SIZE_MAX];
return String(toString(tmp));
}
bool fromString(const char *s) noexcept;
bool fromString(const char* s) noexcept;
static constexpr int marshalSizeMax() noexcept
{ return ZT_ENDPOINT_MARSHAL_SIZE_MAX; }
static constexpr int marshalSizeMax() noexcept
{
return ZT_ENDPOINT_MARSHAL_SIZE_MAX;
}
int marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const noexcept;
int marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const noexcept;
int unmarshal(const uint8_t *restrict data, int len) noexcept;
int unmarshal(const uint8_t* restrict data, int len) noexcept;
bool operator==(const Endpoint &ep) const noexcept;
bool operator==(const Endpoint& ep) const noexcept;
ZT_INLINE bool operator!=(const Endpoint &ep) const noexcept
{ return !((*this) == ep); }
ZT_INLINE bool operator!=(const Endpoint& ep) const noexcept
{
return ! ((*this) == ep);
}
bool operator<(const Endpoint &ep) const noexcept;
bool operator<(const Endpoint& ep) const noexcept;
ZT_INLINE bool operator>(const Endpoint &ep) const noexcept
{ return (ep < *this); }
ZT_INLINE bool operator>(const Endpoint& ep) const noexcept
{
return (ep < *this);
}
ZT_INLINE bool operator<=(const Endpoint &ep) const noexcept
{ return !(ep < *this); }
ZT_INLINE bool operator<=(const Endpoint& ep) const noexcept
{
return ! (ep < *this);
}
ZT_INLINE bool operator>=(const Endpoint &ep) const noexcept
{ return !(*this < ep); }
ZT_INLINE bool operator>=(const Endpoint& ep) const noexcept
{
return ! (*this < ep);
}
};
static_assert(sizeof(Endpoint) == sizeof(ZT_Endpoint), "size mismatch");
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -36,41 +36,50 @@ namespace ZeroTier {
/**
* Tracker for expected OK replies to packet IDs of sent packets
*/
class Expect
{
public:
ZT_INLINE Expect() :
m_packetIdSent()
{}
class Expect {
public:
ZT_INLINE Expect() : m_packetIdSent()
{
}
/**
* Called by other code when something is sending a packet that could potentially receive an OK response
*
* @param packetId Packet ID of packet being sent (be sure it's post-armor())
* @param now Current time
*/
ZT_INLINE void sending(const uint64_t packetId, const int64_t now) noexcept
{ m_packetIdSent[Utils::hash64(packetId ^ Utils::s_mapNonce) % ZT_EXPECT_BUCKETS] = (uint32_t)(now / ZT_EXPECT_TTL); }
/**
* Called by other code when something is sending a packet that could potentially receive an OK response
*
* @param packetId Packet ID of packet being sent (be sure it's post-armor())
* @param now Current time
*/
ZT_INLINE void sending(const uint64_t packetId, const int64_t now) noexcept
{
m_packetIdSent[Utils::hash64(packetId ^ Utils::s_mapNonce) % ZT_EXPECT_BUCKETS] =
(uint32_t)(now / ZT_EXPECT_TTL);
}
/**
* Check if an OK is expected and if so reset the corresponding bucket.
*
* This means this call mutates the state. If it returns true, it will
* subsequently return false. This is to filter OKs against replays or
* responses to queries we did not send.
*
* @param inRePacketId In-re packet ID we're expecting
* @param now Current time
* @return True if we're expecting a reply (and a reset occurred)
*/
ZT_INLINE bool expecting(const uint64_t inRePacketId, const int64_t now) noexcept
{ return (((now / ZT_EXPECT_TTL) - (int64_t)m_packetIdSent[(unsigned long)Utils::hash64(inRePacketId ^ Utils::s_mapNonce) % ZT_EXPECT_BUCKETS].exchange(0)) <= 1); }
/**
* Check if an OK is expected and if so reset the corresponding bucket.
*
* This means this call mutates the state. If it returns true, it will
* subsequently return false. This is to filter OKs against replays or
* responses to queries we did not send.
*
* @param inRePacketId In-re packet ID we're expecting
* @param now Current time
* @return True if we're expecting a reply (and a reset occurred)
*/
ZT_INLINE bool expecting(const uint64_t inRePacketId, const int64_t now) noexcept
{
return (
((now / ZT_EXPECT_TTL)
- (int64_t)
m_packetIdSent[(unsigned long)Utils::hash64(inRePacketId ^ Utils::s_mapNonce) % ZT_EXPECT_BUCKETS]
.exchange(0))
<= 1);
}
private:
// Each bucket contains a timestamp in units of the max expect duration.
std::atomic< uint32_t > m_packetIdSent[ZT_EXPECT_BUCKETS];
private:
// Each bucket contains a timestamp in units of the max expect duration.
std::atomic<uint32_t> m_packetIdSent[ZT_EXPECT_BUCKETS];
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -16,8 +16,8 @@
#include "Constants.hpp"
#include <iterator>
#include <algorithm>
#include <iterator>
#include <memory>
#include <stdexcept>
@ -32,272 +32,305 @@ namespace ZeroTier {
* @tparam T Type to contain
* @tparam C Maximum capacity of vector
*/
template< typename T, unsigned int C >
class FCV
{
public:
typedef T *iterator;
typedef const T *const_iterator;
template <typename T, unsigned int C> class FCV {
public:
typedef T* iterator;
typedef const T* const_iterator;
ZT_INLINE FCV() noexcept: _s(0)
{}
ZT_INLINE FCV() noexcept : _s(0)
{
}
ZT_INLINE FCV(const FCV &v) : _s(0)
{ *this = v; }
ZT_INLINE FCV(const FCV& v) : _s(0)
{
*this = v;
}
ZT_INLINE FCV(const T *const contents, const unsigned int len) :
_s(len)
{
const unsigned int l = std::min(len, C);
for (unsigned int i = 0; i < l; ++i)
new(reinterpret_cast<T *>(_m) + i) T(contents[i]);
}
ZT_INLINE FCV(const T* const contents, const unsigned int len) : _s(len)
{
const unsigned int l = std::min(len, C);
for (unsigned int i = 0; i < l; ++i)
new (reinterpret_cast<T*>(_m) + i) T(contents[i]);
}
template< typename I >
ZT_INLINE FCV(I i, I end) :
_s(0)
{
while (i != end) {
push_back(*i);
++i;
}
}
template <typename I> ZT_INLINE FCV(I i, I end) : _s(0)
{
while (i != end) {
push_back(*i);
++i;
}
}
ZT_INLINE ~FCV()
{ this->clear(); }
ZT_INLINE ~FCV()
{
this->clear();
}
ZT_INLINE FCV &operator=(const FCV &v)
{
if (likely(&v != this)) {
this->clear();
const unsigned int s = v._s;
_s = s;
for (unsigned int i = 0; i < s; ++i)
new(reinterpret_cast<T *>(_m) + i) T(*(reinterpret_cast<const T *>(v._m) + i));
}
return *this;
}
ZT_INLINE FCV& operator=(const FCV& v)
{
if (likely(&v != this)) {
this->clear();
const unsigned int s = v._s;
_s = s;
for (unsigned int i = 0; i < s; ++i)
new (reinterpret_cast<T*>(_m) + i) T(*(reinterpret_cast<const T*>(v._m) + i));
}
return *this;
}
/**
* Clear this vector, destroying all content objects
*/
ZT_INLINE void clear()
{
const unsigned int s = _s;
_s = 0;
for (unsigned int i = 0; i < s; ++i)
(reinterpret_cast<T *>(_m) + i)->~T();
}
/**
* Clear this vector, destroying all content objects
*/
ZT_INLINE void clear()
{
const unsigned int s = _s;
_s = 0;
for (unsigned int i = 0; i < s; ++i)
(reinterpret_cast<T*>(_m) + i)->~T();
}
/**
* Move contents from this vector to another and clear this vector.
*
* @param v Target vector
*/
ZT_INLINE void unsafeMoveTo(FCV &v) noexcept
{
Utils::copy(v._m, _m, (v._s = _s) * sizeof(T));
_s = 0;
}
/**
* Move contents from this vector to another and clear this vector.
*
* @param v Target vector
*/
ZT_INLINE void unsafeMoveTo(FCV& v) noexcept
{
Utils::copy(v._m, _m, (v._s = _s) * sizeof(T));
_s = 0;
}
ZT_INLINE iterator begin() noexcept
{ return reinterpret_cast<T *>(_m); }
ZT_INLINE iterator begin() noexcept
{
return reinterpret_cast<T*>(_m);
}
ZT_INLINE iterator end() noexcept
{ return reinterpret_cast<T *>(_m) + _s; }
ZT_INLINE iterator end() noexcept
{
return reinterpret_cast<T*>(_m) + _s;
}
ZT_INLINE const_iterator begin() const noexcept
{ return reinterpret_cast<const T *>(_m); }
ZT_INLINE const_iterator begin() const noexcept
{
return reinterpret_cast<const T*>(_m);
}
ZT_INLINE const_iterator end() const noexcept
{ return reinterpret_cast<const T *>(_m) + _s; }
ZT_INLINE const_iterator end() const noexcept
{
return reinterpret_cast<const T*>(_m) + _s;
}
ZT_INLINE T &operator[](const unsigned int i)
{
if (likely(i < _s))
return reinterpret_cast<T *>(_m)[i];
throw Utils::OutOfRangeException;
}
ZT_INLINE T& operator[](const unsigned int i)
{
if (likely(i < _s))
return reinterpret_cast<T*>(_m)[i];
throw Utils::OutOfRangeException;
}
ZT_INLINE const T &operator[](const unsigned int i) const
{
if (likely(i < _s))
return reinterpret_cast<const T *>(_m)[i];
throw Utils::OutOfRangeException;
}
ZT_INLINE const T& operator[](const unsigned int i) const
{
if (likely(i < _s))
return reinterpret_cast<const T*>(_m)[i];
throw Utils::OutOfRangeException;
}
static constexpr unsigned int capacity() noexcept
{ return C; }
static constexpr unsigned int capacity() noexcept
{
return C;
}
ZT_INLINE unsigned int size() const noexcept
{ return _s; }
ZT_INLINE unsigned int size() const noexcept
{
return _s;
}
ZT_INLINE bool empty() const noexcept
{ return (_s == 0); }
ZT_INLINE bool empty() const noexcept
{
return (_s == 0);
}
ZT_INLINE T *data() noexcept
{ return reinterpret_cast<T *>(_m); }
ZT_INLINE T* data() noexcept
{
return reinterpret_cast<T*>(_m);
}
ZT_INLINE const T *data() const noexcept
{ return reinterpret_cast<const T *>(_m); }
ZT_INLINE const T* data() const noexcept
{
return reinterpret_cast<const T*>(_m);
}
/**
* Push a value onto the back of this vector
*
* If the vector is at capacity this silently fails.
*
* @param v Value to push
*/
ZT_INLINE void push_back(const T &v)
{
if (likely(_s < C))
new(reinterpret_cast<T *>(_m) + _s++) T(v);
else throw Utils::OutOfRangeException;
}
/**
* Push a value onto the back of this vector
*
* If the vector is at capacity this silently fails.
*
* @param v Value to push
*/
ZT_INLINE void push_back(const T& v)
{
if (likely(_s < C))
new (reinterpret_cast<T*>(_m) + _s++) T(v);
else
throw Utils::OutOfRangeException;
}
/**
* Push new default value or return last in vector if full.
*
* @return Reference to new item
*/
ZT_INLINE T &push()
{
if (likely(_s < C)) {
return *(new(reinterpret_cast<T *>(_m) + _s++) T());
} else {
return *(reinterpret_cast<T *>(_m) + (C - 1));
}
}
/**
* Push new default value or return last in vector if full.
*
* @return Reference to new item
*/
ZT_INLINE T& push()
{
if (likely(_s < C)) {
return *(new (reinterpret_cast<T*>(_m) + _s++) T());
}
else {
return *(reinterpret_cast<T*>(_m) + (C - 1));
}
}
/**
* Push new default value or replace and return last in vector if full.
*
* @return Reference to new item
*/
ZT_INLINE T &push(const T &v)
{
if (likely(_s < C)) {
return *(new(reinterpret_cast<T *>(_m) + _s++) T(v));
} else {
T &tmp = *(reinterpret_cast<T *>(_m) + (C - 1));
tmp = v;
return tmp;
}
}
/**
* Push new default value or replace and return last in vector if full.
*
* @return Reference to new item
*/
ZT_INLINE T& push(const T& v)
{
if (likely(_s < C)) {
return *(new (reinterpret_cast<T*>(_m) + _s++) T(v));
}
else {
T& tmp = *(reinterpret_cast<T*>(_m) + (C - 1));
tmp = v;
return tmp;
}
}
/**
* Remove the last element if this vector is not empty
*/
ZT_INLINE void pop_back()
{
if (likely(_s != 0))
(reinterpret_cast<T *>(_m) + --_s)->~T();
}
/**
* Remove the last element if this vector is not empty
*/
ZT_INLINE void pop_back()
{
if (likely(_s != 0))
(reinterpret_cast<T*>(_m) + --_s)->~T();
}
/**
* Resize vector
*
* @param ns New size (clipped to C if larger than capacity)
*/
ZT_INLINE void resize(unsigned int ns)
{
if (unlikely(ns > C))
throw Utils::OutOfRangeException;
unsigned int s = _s;
while (s < ns)
new(reinterpret_cast<T *>(_m) + s++) T();
while (s > ns)
(reinterpret_cast<T *>(_m) + --s)->~T();
_s = s;
}
/**
* Resize vector
*
* @param ns New size (clipped to C if larger than capacity)
*/
ZT_INLINE void resize(unsigned int ns)
{
if (unlikely(ns > C))
throw Utils::OutOfRangeException;
unsigned int s = _s;
while (s < ns)
new (reinterpret_cast<T*>(_m) + s++) T();
while (s > ns)
(reinterpret_cast<T*>(_m) + --s)->~T();
_s = s;
}
/**
* Set the size of this vector without otherwise changing anything
*
* @param ns New size
*/
ZT_INLINE void unsafeSetSize(unsigned int ns)
{ _s = ns; }
/**
* Set the size of this vector without otherwise changing anything
*
* @param ns New size
*/
ZT_INLINE void unsafeSetSize(unsigned int ns)
{
_s = ns;
}
/**
* This is a bounds checked auto-resizing variant of the [] operator
*
* If 'i' is out of bounds vs the current size of the vector, the vector is
* resized. If that size would exceed C (capacity), 'i' is clipped to C-1.
*
* @param i Index to obtain as a reference, resizing if needed
* @return Reference to value at this index
*/
ZT_INLINE T &at(unsigned int i)
{
if (i >= _s) {
if (unlikely(i >= C))
i = C - 1;
do {
new(reinterpret_cast<T *>(_m) + _s++) T();
} while (i >= _s);
}
return *(reinterpret_cast<T *>(_m) + i);
}
/**
* This is a bounds checked auto-resizing variant of the [] operator
*
* If 'i' is out of bounds vs the current size of the vector, the vector is
* resized. If that size would exceed C (capacity), 'i' is clipped to C-1.
*
* @param i Index to obtain as a reference, resizing if needed
* @return Reference to value at this index
*/
ZT_INLINE T& at(unsigned int i)
{
if (i >= _s) {
if (unlikely(i >= C))
i = C - 1;
do {
new (reinterpret_cast<T*>(_m) + _s++) T();
} while (i >= _s);
}
return *(reinterpret_cast<T*>(_m) + i);
}
/**
* Assign this vector's contents from a range of pointers or iterators
*
* If the range is larger than C it is truncated at C.
*
* @tparam X Inferred type of interators or pointers
* @param start Starting iterator
* @param end Ending iterator (must be greater than start)
*/
template< typename X >
ZT_INLINE void assign(X start, const X &end)
{
const int l = std::min((int)std::distance(start, end), (int)C);
if (l > 0) {
this->resize((unsigned int)l);
for (int i = 0; i < l; ++i)
reinterpret_cast<T *>(_m)[i] = *(start++);
} else {
this->clear();
}
}
/**
* Assign this vector's contents from a range of pointers or iterators
*
* If the range is larger than C it is truncated at C.
*
* @tparam X Inferred type of interators or pointers
* @param start Starting iterator
* @param end Ending iterator (must be greater than start)
*/
template <typename X> ZT_INLINE void assign(X start, const X& end)
{
const int l = std::min((int)std::distance(start, end), (int)C);
if (l > 0) {
this->resize((unsigned int)l);
for (int i = 0; i < l; ++i)
reinterpret_cast<T*>(_m)[i] = *(start++);
}
else {
this->clear();
}
}
ZT_INLINE bool operator==(const FCV &v) const noexcept
{
if (_s == v._s) {
for (unsigned int i = 0; i < _s; ++i) {
if (!(*(reinterpret_cast<const T *>(_m) + i) == *(reinterpret_cast<const T *>(v._m) + i)))
return false;
}
return true;
}
return false;
}
ZT_INLINE bool operator==(const FCV& v) const noexcept
{
if (_s == v._s) {
for (unsigned int i = 0; i < _s; ++i) {
if (! (*(reinterpret_cast<const T*>(_m) + i) == *(reinterpret_cast<const T*>(v._m) + i)))
return false;
}
return true;
}
return false;
}
ZT_INLINE bool operator!=(const FCV &v) const noexcept
{ return *this != v; }
ZT_INLINE bool operator!=(const FCV& v) const noexcept
{
return *this != v;
}
ZT_INLINE bool operator<(const FCV &v) const noexcept
{ return std::lexicographical_compare(begin(), end(), v.begin(), v.end()); }
ZT_INLINE bool operator<(const FCV& v) const noexcept
{
return std::lexicographical_compare(begin(), end(), v.begin(), v.end());
}
ZT_INLINE bool operator>(const FCV &v) const noexcept
{ return (v < *this); }
ZT_INLINE bool operator>(const FCV& v) const noexcept
{
return (v < *this);
}
ZT_INLINE bool operator<=(const FCV &v) const noexcept
{ return v >= *this; }
ZT_INLINE bool operator<=(const FCV& v) const noexcept
{
return v >= *this;
}
ZT_INLINE bool operator>=(const FCV &v) const noexcept
{ return *this >= v; }
ZT_INLINE bool operator>=(const FCV& v) const noexcept
{
return *this >= v;
}
private:
private:
#ifdef _MSC_VER
uint8_t _m[sizeof(T) * C];
uint8_t _m[sizeof(T) * C];
#else
__attribute__((aligned(16))) uint8_t _m[sizeof(T) * C];
__attribute__((aligned(16))) uint8_t _m[sizeof(T) * C];
#endif
unsigned int _s;
unsigned int _s;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -14,13 +14,13 @@
#ifndef ZT_FINGERPRINT_HPP
#define ZT_FINGERPRINT_HPP
#include "Address.hpp"
#include "Constants.hpp"
#include "TriviallyCopyable.hpp"
#include "Address.hpp"
#include "Utils.hpp"
#define ZT_FINGERPRINT_STRING_SIZE_MAX 128
#define ZT_FINGERPRINT_MARSHAL_SIZE 53
#define ZT_FINGERPRINT_MARSHAL_SIZE 53
namespace ZeroTier {
@ -30,117 +30,152 @@ namespace ZeroTier {
* This is the same size as ZT_Fingerprint and should be cast-able back and forth.
* This is checked in Tests.cpp.
*/
class Fingerprint : public ZT_Fingerprint, public TriviallyCopyable
{
public:
ZT_INLINE Fingerprint() noexcept
{ memoryZero(this); }
class Fingerprint
: public ZT_Fingerprint
, public TriviallyCopyable {
public:
ZT_INLINE Fingerprint() noexcept
{
memoryZero(this);
}
ZT_INLINE Fingerprint(const ZT_Fingerprint &fp) noexcept
{ Utils::copy< sizeof(ZT_Fingerprint) >(this, &fp); }
ZT_INLINE Fingerprint(const ZT_Fingerprint& fp) noexcept
{
Utils::copy<sizeof(ZT_Fingerprint)>(this, &fp);
}
/**
* @return True if hash is not all zero (missing/unspecified)
*/
ZT_INLINE bool haveHash() const noexcept
{ return (!Utils::allZero(this->hash, ZT_FINGERPRINT_HASH_SIZE)); }
/**
* @return True if hash is not all zero (missing/unspecified)
*/
ZT_INLINE bool haveHash() const noexcept
{
return (! Utils::allZero(this->hash, ZT_FINGERPRINT_HASH_SIZE));
}
/**
* Get a base32-encoded representation of this fingerprint
*
* @param s Base32 string
*/
ZT_INLINE char *toString(char s[ZT_FINGERPRINT_STRING_SIZE_MAX]) const noexcept
{
Address(this->address).toString(s);
if (haveHash()) {
s[ZT_ADDRESS_LENGTH_HEX] = '-';
Utils::b32e(this->hash, ZT_FINGERPRINT_HASH_SIZE, s + (ZT_ADDRESS_LENGTH_HEX + 1), ZT_FINGERPRINT_STRING_SIZE_MAX - (ZT_ADDRESS_LENGTH_HEX + 1));
}
return s;
}
/**
* Get a base32-encoded representation of this fingerprint
*
* @param s Base32 string
*/
ZT_INLINE char* toString(char s[ZT_FINGERPRINT_STRING_SIZE_MAX]) const noexcept
{
Address(this->address).toString(s);
if (haveHash()) {
s[ZT_ADDRESS_LENGTH_HEX] = '-';
Utils::b32e(
this->hash,
ZT_FINGERPRINT_HASH_SIZE,
s + (ZT_ADDRESS_LENGTH_HEX + 1),
ZT_FINGERPRINT_STRING_SIZE_MAX - (ZT_ADDRESS_LENGTH_HEX + 1));
}
return s;
}
ZT_INLINE String toString() const
{
char tmp[ZT_FINGERPRINT_STRING_SIZE_MAX];
return String(toString(tmp));
}
ZT_INLINE String toString() const
{
char tmp[ZT_FINGERPRINT_STRING_SIZE_MAX];
return String(toString(tmp));
}
/**
* Set this fingerprint to a base32-encoded string
*
* @param s String to decode
* @return True if string appears to be valid and of the proper length (no other checking is done)
*/
ZT_INLINE bool fromString(const char *const s) noexcept
{
if (!s)
return false;
const int l = (int)strlen(s);
if (l < ZT_ADDRESS_LENGTH_HEX)
return false;
char a[ZT_ADDRESS_LENGTH_HEX + 1];
Utils::copy< ZT_ADDRESS_LENGTH_HEX >(a, s);
a[ZT_ADDRESS_LENGTH_HEX] = 0;
this->address = Utils::hexStrToU64(a) & ZT_ADDRESS_MASK;
if (l > (ZT_ADDRESS_LENGTH_HEX + 1)) {
if (Utils::b32d(s + (ZT_ADDRESS_LENGTH_HEX + 1), this->hash, ZT_FINGERPRINT_HASH_SIZE) != ZT_FINGERPRINT_HASH_SIZE)
return false;
} else {
Utils::zero< ZT_FINGERPRINT_HASH_SIZE >(this->hash);
}
return true;
}
/**
* Set this fingerprint to a base32-encoded string
*
* @param s String to decode
* @return True if string appears to be valid and of the proper length (no other checking is done)
*/
ZT_INLINE bool fromString(const char* const s) noexcept
{
if (! s)
return false;
const int l = (int)strlen(s);
if (l < ZT_ADDRESS_LENGTH_HEX)
return false;
char a[ZT_ADDRESS_LENGTH_HEX + 1];
Utils::copy<ZT_ADDRESS_LENGTH_HEX>(a, s);
a[ZT_ADDRESS_LENGTH_HEX] = 0;
this->address = Utils::hexStrToU64(a) & ZT_ADDRESS_MASK;
if (l > (ZT_ADDRESS_LENGTH_HEX + 1)) {
if (Utils::b32d(s + (ZT_ADDRESS_LENGTH_HEX + 1), this->hash, ZT_FINGERPRINT_HASH_SIZE)
!= ZT_FINGERPRINT_HASH_SIZE)
return false;
}
else {
Utils::zero<ZT_FINGERPRINT_HASH_SIZE>(this->hash);
}
return true;
}
ZT_INLINE void zero() noexcept
{ memoryZero(this); }
ZT_INLINE void zero() noexcept
{
memoryZero(this);
}
ZT_INLINE unsigned long hashCode() const noexcept
{ return (unsigned long)this->address; }
ZT_INLINE unsigned long hashCode() const noexcept
{
return (unsigned long)this->address;
}
ZT_INLINE operator bool() const noexcept
{ return this->address != 0; }
ZT_INLINE operator bool() const noexcept
{
return this->address != 0;
}
static constexpr int marshalSizeMax() noexcept
{ return ZT_FINGERPRINT_MARSHAL_SIZE; }
static constexpr int marshalSizeMax() noexcept
{
return ZT_FINGERPRINT_MARSHAL_SIZE;
}
ZT_INLINE int marshal(uint8_t data[ZT_FINGERPRINT_MARSHAL_SIZE]) const noexcept
{
Address(this->address).copyTo(data);
Utils::copy< ZT_FINGERPRINT_HASH_SIZE >(data + ZT_ADDRESS_LENGTH, this->hash);
return ZT_FINGERPRINT_MARSHAL_SIZE;
}
ZT_INLINE int marshal(uint8_t data[ZT_FINGERPRINT_MARSHAL_SIZE]) const noexcept
{
Address(this->address).copyTo(data);
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(data + ZT_ADDRESS_LENGTH, this->hash);
return ZT_FINGERPRINT_MARSHAL_SIZE;
}
ZT_INLINE int unmarshal(const uint8_t *const data, int len) noexcept
{
if (unlikely(len < ZT_FINGERPRINT_MARSHAL_SIZE))
return -1;
this->address = Address(data);
Utils::copy< ZT_FINGERPRINT_HASH_SIZE >(hash, data + ZT_ADDRESS_LENGTH);
return ZT_FINGERPRINT_MARSHAL_SIZE;
}
ZT_INLINE int unmarshal(const uint8_t* const data, int len) noexcept
{
if (unlikely(len < ZT_FINGERPRINT_MARSHAL_SIZE))
return -1;
this->address = Address(data);
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(hash, data + ZT_ADDRESS_LENGTH);
return ZT_FINGERPRINT_MARSHAL_SIZE;
}
ZT_INLINE bool operator==(const ZT_Fingerprint &h) const noexcept
{ return ((this->address == h.address) && (memcmp(this->hash, h.hash, ZT_FINGERPRINT_HASH_SIZE) == 0)); }
ZT_INLINE bool operator==(const ZT_Fingerprint& h) const noexcept
{
return ((this->address == h.address) && (memcmp(this->hash, h.hash, ZT_FINGERPRINT_HASH_SIZE) == 0));
}
ZT_INLINE bool operator!=(const ZT_Fingerprint &h) const noexcept
{ return !(*this == h); }
ZT_INLINE bool operator!=(const ZT_Fingerprint& h) const noexcept
{
return ! (*this == h);
}
ZT_INLINE bool operator<(const ZT_Fingerprint &h) const noexcept
{ return ((this->address < h.address) || ((this->address == h.address) && (memcmp(this->hash, h.hash, ZT_FINGERPRINT_HASH_SIZE) < 0))); }
ZT_INLINE bool operator<(const ZT_Fingerprint& h) const noexcept
{
return (
(this->address < h.address)
|| ((this->address == h.address) && (memcmp(this->hash, h.hash, ZT_FINGERPRINT_HASH_SIZE) < 0)));
}
ZT_INLINE bool operator>(const ZT_Fingerprint &h) const noexcept
{ return (*reinterpret_cast<const Fingerprint *>(&h) < *this); }
ZT_INLINE bool operator>(const ZT_Fingerprint& h) const noexcept
{
return (*reinterpret_cast<const Fingerprint*>(&h) < *this);
}
ZT_INLINE bool operator<=(const ZT_Fingerprint &h) const noexcept
{ return !(*reinterpret_cast<const Fingerprint *>(&h) < *this); }
ZT_INLINE bool operator<=(const ZT_Fingerprint& h) const noexcept
{
return ! (*reinterpret_cast<const Fingerprint*>(&h) < *this);
}
ZT_INLINE bool operator>=(const ZT_Fingerprint &h) const noexcept
{ return !(*this < h); }
ZT_INLINE bool operator>=(const ZT_Fingerprint& h) const noexcept
{
return ! (*this < h);
}
};
static_assert(sizeof(Fingerprint) == sizeof(ZT_Fingerprint), "size mismatch");
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -11,13 +11,14 @@
*/
/****/
#include "Constants.hpp"
#include "Identity.hpp"
#include "Constants.hpp"
#include "Endpoint.hpp"
#include "MIMC52.hpp"
#include "SHA512.hpp"
#include "Salsa20.hpp"
#include "Utils.hpp"
#include "Endpoint.hpp"
#include "MIMC52.hpp"
#include <memory>
#include <utility>
@ -29,479 +30,511 @@ namespace {
// This is the memory-intensive hash function used to compute v0 identities from v0 public keys.
#define ZT_V0_IDENTITY_GEN_MEMORY 2097152
void identityV0ProofOfWorkFrankenhash(const void *const restrict c25519CombinedPublicKey, void *const restrict digest, void *const restrict genmem) noexcept
void identityV0ProofOfWorkFrankenhash(
const void* const restrict c25519CombinedPublicKey,
void* const restrict digest,
void* const restrict genmem) noexcept
{
// Digest publicKey[] to obtain initial digest
SHA512(digest, c25519CombinedPublicKey, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE);
// Digest publicKey[] to obtain initial digest
SHA512(digest, c25519CombinedPublicKey, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE);
// Initialize genmem[] using Salsa20 in a CBC-like configuration since
// ordinary Salsa20 is randomly seek-able. This is good for a cipher
// but is not what we want for sequential memory-hardness.
Utils::zero< ZT_V0_IDENTITY_GEN_MEMORY >(genmem);
Salsa20 s20(digest, (char *)digest + 32);
s20.crypt20((char *)genmem, (char *)genmem, 64);
for (unsigned long i = 64; i < ZT_V0_IDENTITY_GEN_MEMORY; i += 64) {
unsigned long k = i - 64;
*((uint64_t *)((char *)genmem + i)) = *((uint64_t *)((char *)genmem + k));
*((uint64_t *)((char *)genmem + i + 8)) = *((uint64_t *)((char *)genmem + k + 8));
*((uint64_t *)((char *)genmem + i + 16)) = *((uint64_t *)((char *)genmem + k + 16));
*((uint64_t *)((char *)genmem + i + 24)) = *((uint64_t *)((char *)genmem + k + 24));
*((uint64_t *)((char *)genmem + i + 32)) = *((uint64_t *)((char *)genmem + k + 32));
*((uint64_t *)((char *)genmem + i + 40)) = *((uint64_t *)((char *)genmem + k + 40));
*((uint64_t *)((char *)genmem + i + 48)) = *((uint64_t *)((char *)genmem + k + 48));
*((uint64_t *)((char *)genmem + i + 56)) = *((uint64_t *)((char *)genmem + k + 56));
s20.crypt20((char *)genmem + i, (char *)genmem + i, 64);
}
// Initialize genmem[] using Salsa20 in a CBC-like configuration since
// ordinary Salsa20 is randomly seek-able. This is good for a cipher
// but is not what we want for sequential memory-hardness.
Utils::zero<ZT_V0_IDENTITY_GEN_MEMORY>(genmem);
Salsa20 s20(digest, (char*)digest + 32);
s20.crypt20((char*)genmem, (char*)genmem, 64);
for (unsigned long i = 64; i < ZT_V0_IDENTITY_GEN_MEMORY; i += 64) {
unsigned long k = i - 64;
*((uint64_t*)((char*)genmem + i)) = *((uint64_t*)((char*)genmem + k));
*((uint64_t*)((char*)genmem + i + 8)) = *((uint64_t*)((char*)genmem + k + 8));
*((uint64_t*)((char*)genmem + i + 16)) = *((uint64_t*)((char*)genmem + k + 16));
*((uint64_t*)((char*)genmem + i + 24)) = *((uint64_t*)((char*)genmem + k + 24));
*((uint64_t*)((char*)genmem + i + 32)) = *((uint64_t*)((char*)genmem + k + 32));
*((uint64_t*)((char*)genmem + i + 40)) = *((uint64_t*)((char*)genmem + k + 40));
*((uint64_t*)((char*)genmem + i + 48)) = *((uint64_t*)((char*)genmem + k + 48));
*((uint64_t*)((char*)genmem + i + 56)) = *((uint64_t*)((char*)genmem + k + 56));
s20.crypt20((char*)genmem + i, (char*)genmem + i, 64);
}
// Render final digest using genmem as a lookup table
for (unsigned long i = 0; i < (ZT_V0_IDENTITY_GEN_MEMORY / sizeof(uint64_t));) {
unsigned long idx1 = (unsigned long)(Utils::ntoh(((uint64_t *)genmem)[i++]) % (64 / sizeof(uint64_t)));
unsigned long idx2 = (unsigned long)(Utils::ntoh(((uint64_t *)genmem)[i++]) % (ZT_V0_IDENTITY_GEN_MEMORY / sizeof(uint64_t)));
uint64_t tmp = ((uint64_t *)genmem)[idx2];
((uint64_t *)genmem)[idx2] = ((uint64_t *)digest)[idx1];
((uint64_t *)digest)[idx1] = tmp;
s20.crypt20(digest, digest, 64);
}
// Render final digest using genmem as a lookup table
for (unsigned long i = 0; i < (ZT_V0_IDENTITY_GEN_MEMORY / sizeof(uint64_t));) {
unsigned long idx1 = (unsigned long)(Utils::ntoh(((uint64_t*)genmem)[i++]) % (64 / sizeof(uint64_t)));
unsigned long idx2 =
(unsigned long)(Utils::ntoh(((uint64_t*)genmem)[i++]) % (ZT_V0_IDENTITY_GEN_MEMORY / sizeof(uint64_t)));
uint64_t tmp = ((uint64_t*)genmem)[idx2];
((uint64_t*)genmem)[idx2] = ((uint64_t*)digest)[idx1];
((uint64_t*)digest)[idx1] = tmp;
s20.crypt20(digest, digest, 64);
}
}
struct identityV0ProofOfWorkCriteria
{
ZT_INLINE identityV0ProofOfWorkCriteria(unsigned char *restrict sb, char *restrict gm) noexcept: digest(sb), genmem(gm)
{}
struct identityV0ProofOfWorkCriteria {
ZT_INLINE identityV0ProofOfWorkCriteria(unsigned char* restrict sb, char* restrict gm) noexcept
: digest(sb)
, genmem(gm)
{
}
ZT_INLINE bool operator()(const uint8_t pub[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE]) const noexcept
{
identityV0ProofOfWorkFrankenhash(pub, digest, genmem);
return (digest[0] < 17);
}
ZT_INLINE bool operator()(const uint8_t pub[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE]) const noexcept
{
identityV0ProofOfWorkFrankenhash(pub, digest, genmem);
return (digest[0] < 17);
}
unsigned char *restrict digest;
char *restrict genmem;
unsigned char* restrict digest;
char* restrict genmem;
};
void v1ChallengeFromPub(const uint8_t pub[ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE], uint64_t challenge[4])
{
// This builds a 256-bit challenge by XORing the two public keys together. This doesn't need to be
// a hash, just different for different public keys. Public keys are basically kind of hashes of
// private keys, so that's good enough. This is only used to seed a PRNG in MIMC52 for a proof of
// sequential work. It's not used for authentication beyond checking PoW.
Utils::copy< 32 >(challenge, pub + 7);
challenge[0] ^= Utils::loadMachineEndian< uint64_t >(pub + 40);
challenge[1] ^= Utils::loadMachineEndian< uint64_t >(pub + 48);
challenge[2] ^= Utils::loadMachineEndian< uint64_t >(pub + 56);
challenge[3] ^= Utils::loadMachineEndian< uint64_t >(pub + 64);
challenge[0] ^= Utils::loadMachineEndian< uint64_t >(pub + 72);
challenge[1] ^= Utils::loadMachineEndian< uint64_t >(pub + 80);
challenge[2] ^= Utils::loadMachineEndian< uint64_t >(pub + 88);
challenge[3] ^= Utils::loadMachineEndian< uint64_t >(pub + 96);
challenge[0] ^= Utils::loadMachineEndian< uint64_t >(pub + 104);
challenge[1] ^= Utils::loadMachineEndian< uint64_t >(pub + 112);
// This builds a 256-bit challenge by XORing the two public keys together. This doesn't need to be
// a hash, just different for different public keys. Public keys are basically kind of hashes of
// private keys, so that's good enough. This is only used to seed a PRNG in MIMC52 for a proof of
// sequential work. It's not used for authentication beyond checking PoW.
Utils::copy<32>(challenge, pub + 7);
challenge[0] ^= Utils::loadMachineEndian<uint64_t>(pub + 40);
challenge[1] ^= Utils::loadMachineEndian<uint64_t>(pub + 48);
challenge[2] ^= Utils::loadMachineEndian<uint64_t>(pub + 56);
challenge[3] ^= Utils::loadMachineEndian<uint64_t>(pub + 64);
challenge[0] ^= Utils::loadMachineEndian<uint64_t>(pub + 72);
challenge[1] ^= Utils::loadMachineEndian<uint64_t>(pub + 80);
challenge[2] ^= Utils::loadMachineEndian<uint64_t>(pub + 88);
challenge[3] ^= Utils::loadMachineEndian<uint64_t>(pub + 96);
challenge[0] ^= Utils::loadMachineEndian<uint64_t>(pub + 104);
challenge[1] ^= Utils::loadMachineEndian<uint64_t>(pub + 112);
}
} // anonymous namespace
} // anonymous namespace
const Identity Identity::NIL;
bool Identity::generate(const Type t)
{
m_type = t;
m_hasPrivate = true;
m_type = t;
m_hasPrivate = true;
switch (t) {
switch (t) {
case C25519: {
// Generate C25519/Ed25519 key pair whose hash satisfies a "hashcash" criterion and generate the
// address from the last 40 bits of this hash. This is different from the fingerprint hash for V0.
uint8_t digest[64];
char* const genmem = new char[ZT_V0_IDENTITY_GEN_MEMORY];
Address address;
do {
C25519::generateSatisfying(identityV0ProofOfWorkCriteria(digest, genmem), m_pub, m_priv);
address.setTo(digest + 59);
} while (address.isReserved());
delete[] genmem;
m_fp.address = address; // address comes from PoW hash for type 0 identities
m_computeHash();
} break;
case C25519: {
// Generate C25519/Ed25519 key pair whose hash satisfies a "hashcash" criterion and generate the
// address from the last 40 bits of this hash. This is different from the fingerprint hash for V0.
uint8_t digest[64];
char *const genmem = new char[ZT_V0_IDENTITY_GEN_MEMORY];
Address address;
do {
C25519::generateSatisfying(identityV0ProofOfWorkCriteria(digest, genmem), m_pub, m_priv);
address.setTo(digest + 59);
} while (address.isReserved());
delete[] genmem;
m_fp.address = address; // address comes from PoW hash for type 0 identities
m_computeHash();
}
break;
case P384:
for (;;) {
C25519::generateCombined(m_pub + 7, m_priv);
ECC384GenerateKey(
m_pub + 7 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE,
m_priv + ZT_C25519_COMBINED_PRIVATE_KEY_SIZE);
case P384:
for (;;) {
C25519::generateCombined(m_pub + 7, m_priv);
ECC384GenerateKey(m_pub + 7 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE, m_priv + ZT_C25519_COMBINED_PRIVATE_KEY_SIZE);
uint64_t challenge[4];
v1ChallengeFromPub(m_pub, challenge);
const uint64_t proof =
MIMC52::delay(reinterpret_cast<const uint8_t*>(challenge), ZT_IDENTITY_TYPE1_MIMC52_ROUNDS);
m_pub[0] = (uint8_t)(proof >> 48U);
m_pub[1] = (uint8_t)(proof >> 40U);
m_pub[2] = (uint8_t)(proof >> 32U);
m_pub[3] = (uint8_t)(proof >> 24U);
m_pub[4] = (uint8_t)(proof >> 16U);
m_pub[5] = (uint8_t)(proof >> 8U);
m_pub[6] = (uint8_t)proof;
uint64_t challenge[4];
v1ChallengeFromPub(m_pub, challenge);
const uint64_t proof = MIMC52::delay(reinterpret_cast<const uint8_t *>(challenge), ZT_IDENTITY_TYPE1_MIMC52_ROUNDS);
m_pub[0] = (uint8_t)(proof >> 48U);
m_pub[1] = (uint8_t)(proof >> 40U);
m_pub[2] = (uint8_t)(proof >> 32U);
m_pub[3] = (uint8_t)(proof >> 24U);
m_pub[4] = (uint8_t)(proof >> 16U);
m_pub[5] = (uint8_t)(proof >> 8U);
m_pub[6] = (uint8_t)proof;
m_computeHash();
const Address addr(m_fp.hash);
if (! addr.isReserved()) {
m_fp.address = addr;
break;
}
}
break;
m_computeHash();
const Address addr(m_fp.hash);
if (!addr.isReserved()) {
m_fp.address = addr;
break;
}
}
break;
default:
return false;
}
default:
return false;
}
return true;
return true;
}
bool Identity::locallyValidate() const noexcept
{
try {
if ((m_fp) && ((!Address(m_fp.address).isReserved()))) {
switch (m_type) {
try {
if ((m_fp) && ((! Address(m_fp.address).isReserved()))) {
switch (m_type) {
case C25519: {
uint8_t digest[64];
char* const genmem = (char*)malloc(ZT_V0_IDENTITY_GEN_MEMORY);
if (! genmem)
return false;
identityV0ProofOfWorkFrankenhash(m_pub, digest, genmem);
free(genmem);
return ((Address(digest + 59) == m_fp.address) && (digest[0] < 17));
}
case C25519: {
uint8_t digest[64];
char *const genmem = (char *)malloc(ZT_V0_IDENTITY_GEN_MEMORY);
if (!genmem)
return false;
identityV0ProofOfWorkFrankenhash(m_pub, digest, genmem);
free(genmem);
return ((Address(digest + 59) == m_fp.address) && (digest[0] < 17));
}
case P384:
if (Address(m_fp.hash) == m_fp.address) {
uint64_t challenge[4];
v1ChallengeFromPub(m_pub, challenge);
return MIMC52::verify(reinterpret_cast<const uint8_t *>(challenge), ZT_IDENTITY_TYPE1_MIMC52_ROUNDS, ((uint64_t)m_pub[0] << 48U) | ((uint64_t)m_pub[1] << 40U) | ((uint64_t)m_pub[2] << 32U) | ((uint64_t)m_pub[3] << 24U) | ((uint64_t)m_pub[4] << 16U) | ((uint64_t)m_pub[5] << 8U) | (uint64_t)m_pub[6]);
}
return false;
}
}
} catch (...) {}
return false;
case P384:
if (Address(m_fp.hash) == m_fp.address) {
uint64_t challenge[4];
v1ChallengeFromPub(m_pub, challenge);
return MIMC52::verify(
reinterpret_cast<const uint8_t*>(challenge),
ZT_IDENTITY_TYPE1_MIMC52_ROUNDS,
((uint64_t)m_pub[0] << 48U) | ((uint64_t)m_pub[1] << 40U) | ((uint64_t)m_pub[2] << 32U)
| ((uint64_t)m_pub[3] << 24U) | ((uint64_t)m_pub[4] << 16U) | ((uint64_t)m_pub[5] << 8U)
| (uint64_t)m_pub[6]);
}
return false;
}
}
}
catch (...) {
}
return false;
}
void Identity::hashWithPrivate(uint8_t h[ZT_FINGERPRINT_HASH_SIZE]) const
{
if (m_hasPrivate) {
switch (m_type) {
if (m_hasPrivate) {
switch (m_type) {
case C25519:
SHA384(h, m_pub, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE, m_priv, ZT_C25519_COMBINED_PRIVATE_KEY_SIZE);
return;
case C25519:
SHA384(h, m_pub, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE, m_priv, ZT_C25519_COMBINED_PRIVATE_KEY_SIZE);
return;
case P384:
SHA384(h, m_pub, sizeof(m_pub), m_priv, sizeof(m_priv));
return;
}
}
Utils::zero< ZT_FINGERPRINT_HASH_SIZE >(h);
case P384:
SHA384(h, m_pub, sizeof(m_pub), m_priv, sizeof(m_priv));
return;
}
}
Utils::zero<ZT_FINGERPRINT_HASH_SIZE>(h);
}
unsigned int Identity::sign(const void *data, unsigned int len, void *sig, unsigned int siglen) const
unsigned int Identity::sign(const void* data, unsigned int len, void* sig, unsigned int siglen) const
{
if (m_hasPrivate) {
switch (m_type) {
if (m_hasPrivate) {
switch (m_type) {
case C25519:
if (siglen >= ZT_C25519_SIGNATURE_LEN) {
C25519::sign(m_priv, m_pub, data, len, sig);
return ZT_C25519_SIGNATURE_LEN;
}
break;
case C25519:
if (siglen >= ZT_C25519_SIGNATURE_LEN) {
C25519::sign(m_priv, m_pub, data, len, sig);
return ZT_C25519_SIGNATURE_LEN;
}
break;
case P384:
if (siglen >= ZT_ECC384_SIGNATURE_SIZE) {
static_assert(ZT_ECC384_SIGNATURE_HASH_SIZE == ZT_SHA384_DIGEST_SIZE, "weird!");
uint8_t h[ZT_ECC384_SIGNATURE_HASH_SIZE];
SHA384(h, data, len, m_pub, ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE);
ECC384ECDSASign(m_priv + ZT_C25519_COMBINED_PRIVATE_KEY_SIZE, h, (uint8_t *)sig);
return ZT_ECC384_SIGNATURE_SIZE;
}
break;
}
}
return 0;
case P384:
if (siglen >= ZT_ECC384_SIGNATURE_SIZE) {
static_assert(ZT_ECC384_SIGNATURE_HASH_SIZE == ZT_SHA384_DIGEST_SIZE, "weird!");
uint8_t h[ZT_ECC384_SIGNATURE_HASH_SIZE];
SHA384(h, data, len, m_pub, ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE);
ECC384ECDSASign(m_priv + ZT_C25519_COMBINED_PRIVATE_KEY_SIZE, h, (uint8_t*)sig);
return ZT_ECC384_SIGNATURE_SIZE;
}
break;
}
}
return 0;
}
bool Identity::verify(const void *data, unsigned int len, const void *sig, unsigned int siglen) const
bool Identity::verify(const void* data, unsigned int len, const void* sig, unsigned int siglen) const
{
switch (m_type) {
switch (m_type) {
case C25519:
return C25519::verify(m_pub, data, len, sig, siglen);
case C25519:
return C25519::verify(m_pub, data, len, sig, siglen);
case P384:
if (siglen == ZT_ECC384_SIGNATURE_SIZE) {
uint8_t h[ZT_ECC384_SIGNATURE_HASH_SIZE];
SHA384(h, data, len, m_pub, ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE);
return ECC384ECDSAVerify(m_pub + 7 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE, h, (const uint8_t *)sig);
}
break;
}
return false;
case P384:
if (siglen == ZT_ECC384_SIGNATURE_SIZE) {
uint8_t h[ZT_ECC384_SIGNATURE_HASH_SIZE];
SHA384(h, data, len, m_pub, ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE);
return ECC384ECDSAVerify(m_pub + 7 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE, h, (const uint8_t*)sig);
}
break;
}
return false;
}
bool Identity::agree(const Identity &id, uint8_t key[ZT_SYMMETRIC_KEY_SIZE]) const
bool Identity::agree(const Identity& id, uint8_t key[ZT_SYMMETRIC_KEY_SIZE]) const
{
uint8_t rawkey[128], h[64];
if (m_hasPrivate) {
if ((m_type == C25519) || (id.m_type == C25519)) {
// If we are a C25519 key we can agree with another C25519 key or with only the
// C25519 portion of a type 1 P-384 key.
C25519::agree(m_priv, id.m_pub, rawkey);
SHA512(h, rawkey, ZT_C25519_ECDH_SHARED_SECRET_SIZE);
Utils::copy< ZT_SYMMETRIC_KEY_SIZE >(key, h);
return true;
} else if ((m_type == P384) && (id.m_type == P384)) {
// For another P384 identity we execute DH agreement with BOTH keys and then
// hash the results together. For those (cough FIPS cough) who only consider
// P384 to be kosher, the C25519 secret can be considered a "salt"
// or something. For those who don't trust P384 this means the privacy of
// your traffic is also protected by C25519.
C25519::agree(m_priv, id.m_pub, rawkey);
ECC384ECDH(id.m_pub + 7 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE, m_priv + ZT_C25519_COMBINED_PRIVATE_KEY_SIZE, rawkey + ZT_C25519_ECDH_SHARED_SECRET_SIZE);
SHA384(key, rawkey, ZT_C25519_ECDH_SHARED_SECRET_SIZE + ZT_ECC384_SHARED_SECRET_SIZE);
return true;
}
}
return false;
uint8_t rawkey[128], h[64];
if (m_hasPrivate) {
if ((m_type == C25519) || (id.m_type == C25519)) {
// If we are a C25519 key we can agree with another C25519 key or with only the
// C25519 portion of a type 1 P-384 key.
C25519::agree(m_priv, id.m_pub, rawkey);
SHA512(h, rawkey, ZT_C25519_ECDH_SHARED_SECRET_SIZE);
Utils::copy<ZT_SYMMETRIC_KEY_SIZE>(key, h);
return true;
}
else if ((m_type == P384) && (id.m_type == P384)) {
// For another P384 identity we execute DH agreement with BOTH keys and then
// hash the results together. For those (cough FIPS cough) who only consider
// P384 to be kosher, the C25519 secret can be considered a "salt"
// or something. For those who don't trust P384 this means the privacy of
// your traffic is also protected by C25519.
C25519::agree(m_priv, id.m_pub, rawkey);
ECC384ECDH(
id.m_pub + 7 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE,
m_priv + ZT_C25519_COMBINED_PRIVATE_KEY_SIZE,
rawkey + ZT_C25519_ECDH_SHARED_SECRET_SIZE);
SHA384(key, rawkey, ZT_C25519_ECDH_SHARED_SECRET_SIZE + ZT_ECC384_SHARED_SECRET_SIZE);
return true;
}
}
return false;
}
char *Identity::toString(bool includePrivate, char buf[ZT_IDENTITY_STRING_BUFFER_LENGTH]) const
char* Identity::toString(bool includePrivate, char buf[ZT_IDENTITY_STRING_BUFFER_LENGTH]) const
{
char *p = buf;
Address(m_fp.address).toString(p);
p += 10;
*(p++) = ':';
char* p = buf;
Address(m_fp.address).toString(p);
p += 10;
*(p++) = ':';
switch (m_type) {
case C25519: {
*(p++) = '0';
*(p++) = ':';
Utils::hex(m_pub, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE, p);
p += ZT_C25519_COMBINED_PUBLIC_KEY_SIZE * 2;
if ((m_hasPrivate) && (includePrivate)) {
*(p++) = ':';
Utils::hex(m_priv, ZT_C25519_COMBINED_PRIVATE_KEY_SIZE, p);
p += ZT_C25519_COMBINED_PRIVATE_KEY_SIZE * 2;
}
*p = (char)0;
return buf;
}
case P384: {
*(p++) = '1';
*(p++) = ':';
int el = Utils::b32e(m_pub, sizeof(m_pub), p, (int)(ZT_IDENTITY_STRING_BUFFER_LENGTH - (uintptr_t)(p - buf)));
if (el <= 0) return nullptr;
p += el;
if ((m_hasPrivate) && (includePrivate)) {
*(p++) = ':';
el = Utils::b32e(m_priv, sizeof(m_priv), p, (int)(ZT_IDENTITY_STRING_BUFFER_LENGTH - (uintptr_t)(p - buf)));
if (el <= 0) return nullptr;
p += el;
}
*p = (char)0;
return buf;
}
default:
buf[0] = 0;
}
switch (m_type) {
case C25519: {
*(p++) = '0';
*(p++) = ':';
Utils::hex(m_pub, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE, p);
p += ZT_C25519_COMBINED_PUBLIC_KEY_SIZE * 2;
if ((m_hasPrivate) && (includePrivate)) {
*(p++) = ':';
Utils::hex(m_priv, ZT_C25519_COMBINED_PRIVATE_KEY_SIZE, p);
p += ZT_C25519_COMBINED_PRIVATE_KEY_SIZE * 2;
}
*p = (char)0;
return buf;
}
case P384: {
*(p++) = '1';
*(p++) = ':';
int el =
Utils::b32e(m_pub, sizeof(m_pub), p, (int)(ZT_IDENTITY_STRING_BUFFER_LENGTH - (uintptr_t)(p - buf)));
if (el <= 0)
return nullptr;
p += el;
if ((m_hasPrivate) && (includePrivate)) {
*(p++) = ':';
el = Utils::b32e(
m_priv,
sizeof(m_priv),
p,
(int)(ZT_IDENTITY_STRING_BUFFER_LENGTH - (uintptr_t)(p - buf)));
if (el <= 0)
return nullptr;
p += el;
}
*p = (char)0;
return buf;
}
default:
buf[0] = 0;
}
return nullptr;
return nullptr;
}
bool Identity::fromString(const char *str)
bool Identity::fromString(const char* str)
{
char tmp[ZT_IDENTITY_STRING_BUFFER_LENGTH];
memoryZero(this);
if ((!str) || (!Utils::scopy(tmp, sizeof(tmp), str)))
return false;
char tmp[ZT_IDENTITY_STRING_BUFFER_LENGTH];
memoryZero(this);
if ((! str) || (! Utils::scopy(tmp, sizeof(tmp), str)))
return false;
int fno = 0;
char *saveptr = nullptr;
for (char *f = Utils::stok(tmp, ":", &saveptr); ((f) && (fno < 4)); f = Utils::stok(nullptr, ":", &saveptr)) {
switch (fno++) {
int fno = 0;
char* saveptr = nullptr;
for (char* f = Utils::stok(tmp, ":", &saveptr); ((f) && (fno < 4)); f = Utils::stok(nullptr, ":", &saveptr)) {
switch (fno++) {
case 0:
m_fp.address = Utils::hexStrToU64(f) & ZT_ADDRESS_MASK;
if (Address(m_fp.address).isReserved())
return false;
break;
case 0:
m_fp.address = Utils::hexStrToU64(f) & ZT_ADDRESS_MASK;
if (Address(m_fp.address).isReserved())
return false;
break;
case 1:
if ((f[0] == '0') && (! f[1])) {
m_type = C25519;
}
else if ((f[0] == '1') && (! f[1])) {
m_type = P384;
}
else {
return false;
}
break;
case 1:
if ((f[0] == '0') && (!f[1])) {
m_type = C25519;
} else if ((f[0] == '1') && (!f[1])) {
m_type = P384;
} else {
return false;
}
break;
case 2:
switch (m_type) {
case C25519:
if (Utils::unhex(f, strlen(f), m_pub, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE)
!= ZT_C25519_COMBINED_PUBLIC_KEY_SIZE)
return false;
break;
case 2:
switch (m_type) {
case P384:
if (Utils::b32d(f, m_pub, sizeof(m_pub)) != sizeof(m_pub))
return false;
break;
}
break;
case C25519:
if (Utils::unhex(f, strlen(f), m_pub, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE) != ZT_C25519_COMBINED_PUBLIC_KEY_SIZE)
return false;
break;
case 3:
if (strlen(f) > 1) {
switch (m_type) {
case C25519:
if (Utils::unhex(f, strlen(f), m_priv, ZT_C25519_COMBINED_PRIVATE_KEY_SIZE)
!= ZT_C25519_COMBINED_PRIVATE_KEY_SIZE) {
return false;
}
else {
m_hasPrivate = true;
}
break;
case P384:
if (Utils::b32d(f, m_pub, sizeof(m_pub)) != sizeof(m_pub))
return false;
break;
case P384:
if (Utils::b32d(f, m_priv, sizeof(m_priv)) != sizeof(m_priv)) {
return false;
}
else {
m_hasPrivate = true;
}
break;
}
break;
}
}
}
}
break;
if (fno < 3)
return false;
case 3:
if (strlen(f) > 1) {
switch (m_type) {
case C25519:
if (Utils::unhex(f, strlen(f), m_priv, ZT_C25519_COMBINED_PRIVATE_KEY_SIZE) != ZT_C25519_COMBINED_PRIVATE_KEY_SIZE) {
return false;
} else {
m_hasPrivate = true;
}
break;
case P384:
if (Utils::b32d(f, m_priv, sizeof(m_priv)) != sizeof(m_priv)) {
return false;
} else {
m_hasPrivate = true;
}
break;
}
break;
}
}
}
if (fno < 3)
return false;
m_computeHash();
return !((m_type == P384) && (Address(m_fp.hash) != m_fp.address));
m_computeHash();
return ! ((m_type == P384) && (Address(m_fp.hash) != m_fp.address));
}
int Identity::marshal(uint8_t data[ZT_IDENTITY_MARSHAL_SIZE_MAX], const bool includePrivate) const noexcept
{
Address(m_fp.address).copyTo(data);
switch (m_type) {
Address(m_fp.address).copyTo(data);
switch (m_type) {
case C25519:
data[ZT_ADDRESS_LENGTH] = (uint8_t)C25519;
Utils::copy<ZT_C25519_COMBINED_PUBLIC_KEY_SIZE>(data + ZT_ADDRESS_LENGTH + 1, m_pub);
if ((includePrivate) && (m_hasPrivate)) {
data[ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE] = ZT_C25519_COMBINED_PRIVATE_KEY_SIZE;
Utils::copy<ZT_C25519_COMBINED_PRIVATE_KEY_SIZE>(
data + ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1,
m_priv);
return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1
+ ZT_C25519_COMBINED_PRIVATE_KEY_SIZE;
}
data[ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE] = 0;
return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1;
case C25519:
data[ZT_ADDRESS_LENGTH] = (uint8_t)C25519;
Utils::copy< ZT_C25519_COMBINED_PUBLIC_KEY_SIZE >(data + ZT_ADDRESS_LENGTH + 1, m_pub);
if ((includePrivate) && (m_hasPrivate)) {
data[ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE] = ZT_C25519_COMBINED_PRIVATE_KEY_SIZE;
Utils::copy< ZT_C25519_COMBINED_PRIVATE_KEY_SIZE >(data + ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1, m_priv);
return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1 + ZT_C25519_COMBINED_PRIVATE_KEY_SIZE;
}
data[ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE] = 0;
return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1;
case P384:
data[ZT_ADDRESS_LENGTH] = (uint8_t)P384;
Utils::copy< ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE >(data + ZT_ADDRESS_LENGTH + 1, m_pub);
if ((includePrivate) && (m_hasPrivate)) {
data[ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE] = ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE;
Utils::copy< ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE >(data + ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1, m_priv);
return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1 + ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE;
}
data[ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE] = 0;
return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1;
}
return -1;
case P384:
data[ZT_ADDRESS_LENGTH] = (uint8_t)P384;
Utils::copy<ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE>(data + ZT_ADDRESS_LENGTH + 1, m_pub);
if ((includePrivate) && (m_hasPrivate)) {
data[ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE] =
ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE;
Utils::copy<ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE>(
data + ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1,
m_priv);
return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1
+ ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE;
}
data[ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE] = 0;
return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1;
}
return -1;
}
int Identity::unmarshal(const uint8_t *data, const int len) noexcept
int Identity::unmarshal(const uint8_t* data, const int len) noexcept
{
memoryZero(this);
memoryZero(this);
if (len < (1 + ZT_ADDRESS_LENGTH))
return -1;
m_fp.address = Address(data);
if (len < (1 + ZT_ADDRESS_LENGTH))
return -1;
m_fp.address = Address(data);
unsigned int privlen;
switch ((m_type = (Type)data[ZT_ADDRESS_LENGTH])) {
unsigned int privlen;
switch ((m_type = (Type)data[ZT_ADDRESS_LENGTH])) {
case C25519:
if (len < (ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1))
return -1;
case C25519:
if (len < (ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1))
return -1;
Utils::copy<ZT_C25519_COMBINED_PUBLIC_KEY_SIZE>(m_pub, data + ZT_ADDRESS_LENGTH + 1);
m_computeHash();
Utils::copy< ZT_C25519_COMBINED_PUBLIC_KEY_SIZE >(m_pub, data + ZT_ADDRESS_LENGTH + 1);
m_computeHash();
privlen = data[ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE];
if (privlen == ZT_C25519_COMBINED_PRIVATE_KEY_SIZE) {
if (len
< (ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1
+ ZT_C25519_COMBINED_PRIVATE_KEY_SIZE))
return -1;
m_hasPrivate = true;
Utils::copy<ZT_C25519_COMBINED_PRIVATE_KEY_SIZE>(
m_priv,
data + ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1);
return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1
+ ZT_C25519_COMBINED_PRIVATE_KEY_SIZE;
}
else if (privlen == 0) {
m_hasPrivate = false;
return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1;
}
break;
privlen = data[ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE];
if (privlen == ZT_C25519_COMBINED_PRIVATE_KEY_SIZE) {
if (len < (ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1 + ZT_C25519_COMBINED_PRIVATE_KEY_SIZE))
return -1;
m_hasPrivate = true;
Utils::copy< ZT_C25519_COMBINED_PRIVATE_KEY_SIZE >(m_priv, data + ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1);
return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1 + ZT_C25519_COMBINED_PRIVATE_KEY_SIZE;
} else if (privlen == 0) {
m_hasPrivate = false;
return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1;
}
break;
case P384:
if (len < (ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1))
return -1;
case P384:
if (len < (ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1))
return -1;
Utils::copy<ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE>(m_pub, data + ZT_ADDRESS_LENGTH + 1);
m_computeHash(); // this sets the address for P384
if (Address(m_fp.hash) != m_fp.address) // this sanity check is possible with V1 identities
return -1;
Utils::copy< ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE >(m_pub, data + ZT_ADDRESS_LENGTH + 1);
m_computeHash(); // this sets the address for P384
if (Address(m_fp.hash) != m_fp.address) // this sanity check is possible with V1 identities
return -1;
privlen = data[ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE];
if (privlen == 0) {
m_hasPrivate = false;
return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1;
}
else if (privlen == ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE) {
if (len
< (ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1
+ ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE))
return -1;
m_hasPrivate = true;
Utils::copy<ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE>(
&m_priv,
data + ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1);
return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1
+ ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE;
}
break;
}
privlen = data[ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE];
if (privlen == 0) {
m_hasPrivate = false;
return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1;
} else if (privlen == ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE) {
if (len < (ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1 + ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE))
return -1;
m_hasPrivate = true;
Utils::copy< ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE >(&m_priv, data + ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1);
return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1 + ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE;
}
break;
}
return -1;
return -1;
}
void Identity::m_computeHash()
{
switch (m_type) {
default:
m_fp.zero();
break;
switch (m_type) {
default:
m_fp.zero();
break;
case C25519:
SHA384(m_fp.hash, m_pub, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE);
break;
case C25519:
SHA384(m_fp.hash, m_pub, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE);
break;
case P384:
SHA384(m_fp.hash, m_pub, ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE);
break;
}
case P384:
SHA384(m_fp.hash, m_pub, ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE);
break;
}
}
} // namespace ZeroTier
} // namespace ZeroTier

View file

@ -14,21 +14,22 @@
#ifndef ZT_IDENTITY_HPP
#define ZT_IDENTITY_HPP
#include "Constants.hpp"
#include "Utils.hpp"
#include "Address.hpp"
#include "C25519.hpp"
#include "SHA512.hpp"
#include "ECC384.hpp"
#include "TriviallyCopyable.hpp"
#include "Fingerprint.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "ECC384.hpp"
#include "Fingerprint.hpp"
#include "SHA512.hpp"
#include "TriviallyCopyable.hpp"
#include "Utils.hpp"
#define ZT_IDENTITY_STRING_BUFFER_LENGTH 1024
#define ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE (7 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + ZT_ECC384_PUBLIC_KEY_SIZE)
#define ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE (ZT_C25519_COMBINED_PRIVATE_KEY_SIZE + ZT_ECC384_PRIVATE_KEY_SIZE)
#define ZT_IDENTITY_MARSHAL_SIZE_MAX (ZT_ADDRESS_LENGTH + 4 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE)
#define ZT_IDENTITY_TYPE1_MIMC52_ROUNDS 262144
#define ZT_IDENTITY_MARSHAL_SIZE_MAX \
(ZT_ADDRESS_LENGTH + 4 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE)
#define ZT_IDENTITY_TYPE1_MIMC52_ROUNDS 262144
namespace ZeroTier {
@ -44,225 +45,259 @@ namespace ZeroTier {
* Type 1 identities are better in many ways but type 0 will remain the default until
* 1.x nodes are pretty much dead in the wild.
*/
class Identity : public TriviallyCopyable
{
public:
/**
* Identity type -- numeric values of these enums are protocol constants
*/
enum Type
{
C25519 = ZT_IDENTITY_TYPE_C25519, // Type 0 -- Curve25519 and Ed25519 (1.x and 2.x, default)
P384 = ZT_IDENTITY_TYPE_P384 // Type 1 -- NIST P-384 with linked Curve25519/Ed25519 secondaries (2.x+)
};
class Identity : public TriviallyCopyable {
public:
/**
* Identity type -- numeric values of these enums are protocol constants
*/
enum Type {
C25519 = ZT_IDENTITY_TYPE_C25519, // Type 0 -- Curve25519 and Ed25519 (1.x and 2.x, default)
P384 = ZT_IDENTITY_TYPE_P384 // Type 1 -- NIST P-384 with linked Curve25519/Ed25519 secondaries (2.x+)
};
/**
* A nil/empty identity instance
*/
static const Identity NIL;
/**
* A nil/empty identity instance
*/
static const Identity NIL;
ZT_INLINE Identity() noexcept
{ memoryZero(this); }
ZT_INLINE Identity() noexcept
{
memoryZero(this);
}
ZT_INLINE Identity(const Identity &id) noexcept
{ Utils::copy< sizeof(Identity) >(this, &id); }
ZT_INLINE Identity(const Identity& id) noexcept
{
Utils::copy<sizeof(Identity)>(this, &id);
}
/**
* Construct identity from string
*
* If the identity is not basically valid (no deep checking is done) the result will
* be a null identity.
*
* @param str Identity in canonical string format
*/
explicit ZT_INLINE Identity(const char *str)
{ fromString(str); }
/**
* Construct identity from string
*
* If the identity is not basically valid (no deep checking is done) the result will
* be a null identity.
*
* @param str Identity in canonical string format
*/
explicit ZT_INLINE Identity(const char* str)
{
fromString(str);
}
ZT_INLINE ~Identity()
{ Utils::burn(reinterpret_cast<void *>(&this->m_priv), sizeof(this->m_priv)); }
ZT_INLINE ~Identity()
{
Utils::burn(reinterpret_cast<void*>(&this->m_priv), sizeof(this->m_priv));
}
ZT_INLINE Identity &operator=(const Identity &id) noexcept
{
if (likely(this != &id))
Utils::copy< sizeof(Identity) >(this, &id);
return *this;
}
ZT_INLINE Identity& operator=(const Identity& id) noexcept
{
if (likely(this != &id))
Utils::copy<sizeof(Identity)>(this, &id);
return *this;
}
/**
* Set identity to NIL value (all zero)
*/
ZT_INLINE void zero() noexcept
{ memoryZero(this); }
/**
* Set identity to NIL value (all zero)
*/
ZT_INLINE void zero() noexcept
{
memoryZero(this);
}
/**
* @return Identity type (undefined if identity is null or invalid)
*/
ZT_INLINE Type type() const noexcept
{ return m_type; }
/**
* @return Identity type (undefined if identity is null or invalid)
*/
ZT_INLINE Type type() const noexcept
{
return m_type;
}
/**
* Generate a new identity (address, key pair)
*
* This is a time consuming operation taking up to 5-10 seconds on some slower systems.
*
* @param t Type of identity to generate
* @return False if there was an error such as type being an invalid value
*/
bool generate(Type t);
/**
* Generate a new identity (address, key pair)
*
* This is a time consuming operation taking up to 5-10 seconds on some slower systems.
*
* @param t Type of identity to generate
* @return False if there was an error such as type being an invalid value
*/
bool generate(Type t);
/**
* Check the validity of this identity's address
*
* For type 0 identities this is slightly time consuming. For type 1 identities it's
* instantaneous. It should be done when a new identity is accepted for the very first
* time.
*
* @return True if validation check passes
*/
bool locallyValidate() const noexcept;
/**
* Check the validity of this identity's address
*
* For type 0 identities this is slightly time consuming. For type 1 identities it's
* instantaneous. It should be done when a new identity is accepted for the very first
* time.
*
* @return True if validation check passes
*/
bool locallyValidate() const noexcept;
/**
* @return True if this identity contains a private key
*/
ZT_INLINE bool hasPrivate() const noexcept
{ return m_hasPrivate; }
/**
* @return True if this identity contains a private key
*/
ZT_INLINE bool hasPrivate() const noexcept
{
return m_hasPrivate;
}
/**
* @return This identity's address
*/
ZT_INLINE Address address() const noexcept
{ return Address(m_fp.address); }
/**
* @return This identity's address
*/
ZT_INLINE Address address() const noexcept
{
return Address(m_fp.address);
}
/**
* @return Full fingerprint of this identity (address plus SHA384 of keys)
*/
ZT_INLINE const Fingerprint &fingerprint() const noexcept
{ return m_fp; }
/**
* @return Full fingerprint of this identity (address plus SHA384 of keys)
*/
ZT_INLINE const Fingerprint& fingerprint() const noexcept
{
return m_fp;
}
/**
* Compute a hash of this identity's public and private keys.
*
* If there is no private key or the identity is NIL the buffer is filled with zero.
*
* @param h Buffer to store SHA384 hash
*/
void hashWithPrivate(uint8_t h[ZT_FINGERPRINT_HASH_SIZE]) const;
/**
* Compute a hash of this identity's public and private keys.
*
* If there is no private key or the identity is NIL the buffer is filled with zero.
*
* @param h Buffer to store SHA384 hash
*/
void hashWithPrivate(uint8_t h[ZT_FINGERPRINT_HASH_SIZE]) const;
/**
* Sign a message with this identity (private key required)
*
* The signature buffer should be large enough for the largest
* signature, which is currently 96 bytes.
*
* @param data Data to sign
* @param len Length of data
* @param sig Buffer to receive signature
* @param siglen Length of buffer
* @return Number of bytes actually written to sig or 0 on error
*/
unsigned int sign(const void *data, unsigned int len, void *sig, unsigned int siglen) const;
/**
* Sign a message with this identity (private key required)
*
* The signature buffer should be large enough for the largest
* signature, which is currently 96 bytes.
*
* @param data Data to sign
* @param len Length of data
* @param sig Buffer to receive signature
* @param siglen Length of buffer
* @return Number of bytes actually written to sig or 0 on error
*/
unsigned int sign(const void* data, unsigned int len, void* sig, unsigned int siglen) const;
/**
* Verify a message signature against this identity
*
* @param data Data to check
* @param len Length of data
* @param signature Signature bytes
* @param siglen Length of signature in bytes
* @return True if signature validates and data integrity checks
*/
bool verify(const void *data, unsigned int len, const void *sig, unsigned int siglen) const;
/**
* Verify a message signature against this identity
*
* @param data Data to check
* @param len Length of data
* @param signature Signature bytes
* @param siglen Length of signature in bytes
* @return True if signature validates and data integrity checks
*/
bool verify(const void* data, unsigned int len, const void* sig, unsigned int siglen) const;
/**
* Shortcut method to perform key agreement with another identity
*
* This identity must have a private key. (Check hasPrivate())
*
* @param id Identity to agree with
* @param key Result parameter to fill with key bytes
* @return Was agreement successful?
*/
bool agree(const Identity &id, uint8_t key[ZT_SYMMETRIC_KEY_SIZE]) const;
/**
* Shortcut method to perform key agreement with another identity
*
* This identity must have a private key. (Check hasPrivate())
*
* @param id Identity to agree with
* @param key Result parameter to fill with key bytes
* @return Was agreement successful?
*/
bool agree(const Identity& id, uint8_t key[ZT_SYMMETRIC_KEY_SIZE]) const;
/**
* Serialize to a more human-friendly string
*
* @param includePrivate If true, include private key (if it exists)
* @param buf Buffer to store string
* @return ASCII string representation of identity (pointer to buf)
*/
char *toString(bool includePrivate, char buf[ZT_IDENTITY_STRING_BUFFER_LENGTH]) const;
/**
* Serialize to a more human-friendly string
*
* @param includePrivate If true, include private key (if it exists)
* @param buf Buffer to store string
* @return ASCII string representation of identity (pointer to buf)
*/
char* toString(bool includePrivate, char buf[ZT_IDENTITY_STRING_BUFFER_LENGTH]) const;
ZT_INLINE String toString(const bool includePrivate = false) const
{
char buf[ZT_IDENTITY_STRING_BUFFER_LENGTH];
toString(includePrivate, buf);
return String(buf);
}
ZT_INLINE String toString(const bool includePrivate = false) const
{
char buf[ZT_IDENTITY_STRING_BUFFER_LENGTH];
toString(includePrivate, buf);
return String(buf);
}
/**
* Deserialize a human-friendly string
*
* Note: validation is for the format only. The locallyValidate() method
* must be used to check signature and address/key correspondence.
*
* @param str String to deserialize
* @return True if deserialization appears successful
*/
bool fromString(const char *str);
/**
* Deserialize a human-friendly string
*
* Note: validation is for the format only. The locallyValidate() method
* must be used to check signature and address/key correspondence.
*
* @param str String to deserialize
* @return True if deserialization appears successful
*/
bool fromString(const char* str);
/**
* Erase any private key in this identity object
*/
ZT_INLINE void erasePrivateKey() noexcept
{
Utils::burn(m_priv, sizeof(m_priv));
m_hasPrivate = false;
}
/**
* Erase any private key in this identity object
*/
ZT_INLINE void erasePrivateKey() noexcept
{
Utils::burn(m_priv, sizeof(m_priv));
m_hasPrivate = false;
}
/**
* @return True if this identity contains something
*/
explicit ZT_INLINE operator bool() const noexcept
{ return (m_fp); }
/**
* @return True if this identity contains something
*/
explicit ZT_INLINE operator bool() const noexcept
{
return (m_fp);
}
ZT_INLINE unsigned long hashCode() const noexcept
{ return m_fp.hashCode(); }
ZT_INLINE unsigned long hashCode() const noexcept
{
return m_fp.hashCode();
}
ZT_INLINE bool operator==(const Identity &id) const noexcept
{ return (m_fp == id.m_fp); }
ZT_INLINE bool operator==(const Identity& id) const noexcept
{
return (m_fp == id.m_fp);
}
ZT_INLINE bool operator!=(const Identity &id) const noexcept
{ return !(*this == id); }
ZT_INLINE bool operator!=(const Identity& id) const noexcept
{
return ! (*this == id);
}
ZT_INLINE bool operator<(const Identity &id) const noexcept
{ return (m_fp < id.m_fp); }
ZT_INLINE bool operator<(const Identity& id) const noexcept
{
return (m_fp < id.m_fp);
}
ZT_INLINE bool operator>(const Identity &id) const noexcept
{ return (id < *this); }
ZT_INLINE bool operator>(const Identity& id) const noexcept
{
return (id < *this);
}
ZT_INLINE bool operator<=(const Identity &id) const noexcept
{ return !(id < *this); }
ZT_INLINE bool operator<=(const Identity& id) const noexcept
{
return ! (id < *this);
}
ZT_INLINE bool operator>=(const Identity &id) const noexcept
{ return !(*this < id); }
ZT_INLINE bool operator>=(const Identity& id) const noexcept
{
return ! (*this < id);
}
static constexpr int marshalSizeMax() noexcept
{ return ZT_IDENTITY_MARSHAL_SIZE_MAX; }
static constexpr int marshalSizeMax() noexcept
{
return ZT_IDENTITY_MARSHAL_SIZE_MAX;
}
int marshal(uint8_t data[ZT_IDENTITY_MARSHAL_SIZE_MAX], bool includePrivate = false) const noexcept;
int unmarshal(const uint8_t *data, int len) noexcept;
int marshal(uint8_t data[ZT_IDENTITY_MARSHAL_SIZE_MAX], bool includePrivate = false) const noexcept;
int unmarshal(const uint8_t* data, int len) noexcept;
private:
void m_computeHash();
private:
void m_computeHash();
Fingerprint m_fp;
uint8_t m_priv[ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE];
uint8_t m_pub[ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE];
Type m_type; // _type determines which fields in _priv and _pub are used
bool m_hasPrivate;
Fingerprint m_fp;
uint8_t m_priv[ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE];
uint8_t m_pub[ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE];
Type m_type; // _type determines which fields in _priv and _pub are used
bool m_hasPrivate;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -13,454 +13,481 @@
#define _WIN32_WINNT 0x06010000
#include "Constants.hpp"
#include "InetAddress.hpp"
#include "Constants.hpp"
#include "Utils.hpp"
namespace ZeroTier {
static_assert(ZT_SOCKADDR_STORAGE_SIZE == sizeof(sockaddr_storage), "ZT_SOCKADDR_STORAGE_SIZE is incorrect on this platform, must be size of sockaddr_storage");
static_assert(ZT_SOCKADDR_STORAGE_SIZE == sizeof(InetAddress), "ZT_SOCKADDR_STORAGE_SIZE should equal InetAddress, which should equal size of sockaddr_storage");
static_assert(ZT_SOCKADDR_STORAGE_SIZE == sizeof(ZT_InetAddress), "ZT_SOCKADDR_STORAGE_SIZE should equal ZT_InetAddress, which should equal size of sockaddr_storage");
static_assert(
ZT_SOCKADDR_STORAGE_SIZE == sizeof(sockaddr_storage),
"ZT_SOCKADDR_STORAGE_SIZE is incorrect on this platform, must be size of sockaddr_storage");
static_assert(
ZT_SOCKADDR_STORAGE_SIZE == sizeof(InetAddress),
"ZT_SOCKADDR_STORAGE_SIZE should equal InetAddress, which should equal size of sockaddr_storage");
static_assert(
ZT_SOCKADDR_STORAGE_SIZE == sizeof(ZT_InetAddress),
"ZT_SOCKADDR_STORAGE_SIZE should equal ZT_InetAddress, which should equal size of sockaddr_storage");
const InetAddress InetAddress::LO4((const void *) ("\x7f\x00\x00\x01"), 4, 0);
const InetAddress InetAddress::LO6((const void *) ("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"), 16, 0);
const InetAddress InetAddress::LO4((const void*)("\x7f\x00\x00\x01"), 4, 0);
const InetAddress
InetAddress::LO6((const void*)("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"), 16, 0);
const InetAddress InetAddress::NIL;
InetAddress::IpScope InetAddress::ipScope() const noexcept
{
switch (as.ss.ss_family) {
switch (as.ss.ss_family) {
case AF_INET: {
const uint32_t ip = Utils::ntoh((uint32_t)as.sa_in.sin_addr.s_addr);
switch (ip >> 24U) {
case 0x00:
return ZT_IP_SCOPE_NONE; // 0.0.0.0/8 (reserved, never used)
case 0x06:
return ZT_IP_SCOPE_PSEUDOPRIVATE; // 6.0.0.0/8 (US Army)
case 0x0a:
return ZT_IP_SCOPE_PRIVATE; // 10.0.0.0/8
case 0x15: // return IP_SCOPE_PSEUDOPRIVATE; // 21.0.0.0/8 (US DDN-RVN)
case 0x16: // return IP_SCOPE_PSEUDOPRIVATE; // 22.0.0.0/8 (US DISA)
case 0x19: // return IP_SCOPE_PSEUDOPRIVATE; // 25.0.0.0/8 (UK Ministry of
// Defense)
case 0x1a: // return IP_SCOPE_PSEUDOPRIVATE; // 26.0.0.0/8 (US DISA)
case 0x1c: // return IP_SCOPE_PSEUDOPRIVATE; // 28.0.0.0/8 (US DSI-North)
case 0x1d: // return IP_SCOPE_PSEUDOPRIVATE; // 29.0.0.0/8 (US DISA)
case 0x1e: // return IP_SCOPE_PSEUDOPRIVATE; // 30.0.0.0/8 (US DISA)
case 0x33: // return IP_SCOPE_PSEUDOPRIVATE; // 51.0.0.0/8 (UK Department of
// Social Security)
case 0x37: // return IP_SCOPE_PSEUDOPRIVATE; // 55.0.0.0/8 (US DoD)
case 0x38: // 56.0.0.0/8 (US Postal Service)
return ZT_IP_SCOPE_PSEUDOPRIVATE;
case 0x64:
if ((ip & 0xffc00000) == 0x64400000)
return ZT_IP_SCOPE_PRIVATE; // 100.64.0.0/10
break;
case 0x7f:
return ZT_IP_SCOPE_LOOPBACK; // 127.0.0.0/8
case 0xa9:
if ((ip & 0xffff0000) == 0xa9fe0000)
return ZT_IP_SCOPE_LINK_LOCAL; // 169.254.0.0/16
break;
case 0xac:
if ((ip & 0xfff00000) == 0xac100000)
return ZT_IP_SCOPE_PRIVATE; // 172.16.0.0/12
break;
case 0xc0:
if ((ip & 0xffff0000) == 0xc0a80000)
return ZT_IP_SCOPE_PRIVATE; // 192.168.0.0/16
if ((ip & 0xffffff00) == 0xc0000200)
return ZT_IP_SCOPE_PRIVATE; // 192.0.2.0/24
break;
case 0xc6:
if ((ip & 0xfffe0000) == 0xc6120000)
return ZT_IP_SCOPE_PRIVATE; // 198.18.0.0/15
if ((ip & 0xffffff00) == 0xc6336400)
return ZT_IP_SCOPE_PRIVATE; // 198.51.100.0/24
break;
case 0xcb:
if ((ip & 0xffffff00) == 0xcb007100)
return ZT_IP_SCOPE_PRIVATE; // 203.0.113.0/24
break;
case 0xff:
return ZT_IP_SCOPE_NONE; // 255.0.0.0/8 (broadcast, or unused/unusable)
}
switch (ip >> 28U) {
case 0xe:
return ZT_IP_SCOPE_MULTICAST; // 224.0.0.0/4
case 0xf:
return ZT_IP_SCOPE_PSEUDOPRIVATE; // 240.0.0.0/4 ("reserved," usually unusable)
}
return ZT_IP_SCOPE_GLOBAL;
}
case AF_INET: {
const uint32_t ip = Utils::ntoh((uint32_t)as.sa_in.sin_addr.s_addr);
switch (ip >> 24U) {
case 0x00:
return ZT_IP_SCOPE_NONE; // 0.0.0.0/8 (reserved, never used)
case 0x06:
return ZT_IP_SCOPE_PSEUDOPRIVATE; // 6.0.0.0/8 (US Army)
case 0x0a:
return ZT_IP_SCOPE_PRIVATE; // 10.0.0.0/8
case 0x15: //return IP_SCOPE_PSEUDOPRIVATE; // 21.0.0.0/8 (US DDN-RVN)
case 0x16: //return IP_SCOPE_PSEUDOPRIVATE; // 22.0.0.0/8 (US DISA)
case 0x19: //return IP_SCOPE_PSEUDOPRIVATE; // 25.0.0.0/8 (UK Ministry of Defense)
case 0x1a: //return IP_SCOPE_PSEUDOPRIVATE; // 26.0.0.0/8 (US DISA)
case 0x1c: //return IP_SCOPE_PSEUDOPRIVATE; // 28.0.0.0/8 (US DSI-North)
case 0x1d: //return IP_SCOPE_PSEUDOPRIVATE; // 29.0.0.0/8 (US DISA)
case 0x1e: //return IP_SCOPE_PSEUDOPRIVATE; // 30.0.0.0/8 (US DISA)
case 0x33: //return IP_SCOPE_PSEUDOPRIVATE; // 51.0.0.0/8 (UK Department of Social Security)
case 0x37: //return IP_SCOPE_PSEUDOPRIVATE; // 55.0.0.0/8 (US DoD)
case 0x38: // 56.0.0.0/8 (US Postal Service)
return ZT_IP_SCOPE_PSEUDOPRIVATE;
case 0x64:
if ((ip & 0xffc00000) == 0x64400000) return ZT_IP_SCOPE_PRIVATE; // 100.64.0.0/10
break;
case 0x7f:
return ZT_IP_SCOPE_LOOPBACK; // 127.0.0.0/8
case 0xa9:
if ((ip & 0xffff0000) == 0xa9fe0000) return ZT_IP_SCOPE_LINK_LOCAL; // 169.254.0.0/16
break;
case 0xac:
if ((ip & 0xfff00000) == 0xac100000) return ZT_IP_SCOPE_PRIVATE; // 172.16.0.0/12
break;
case 0xc0:
if ((ip & 0xffff0000) == 0xc0a80000) return ZT_IP_SCOPE_PRIVATE; // 192.168.0.0/16
if ((ip & 0xffffff00) == 0xc0000200) return ZT_IP_SCOPE_PRIVATE; // 192.0.2.0/24
break;
case 0xc6:
if ((ip & 0xfffe0000) == 0xc6120000) return ZT_IP_SCOPE_PRIVATE; // 198.18.0.0/15
if ((ip & 0xffffff00) == 0xc6336400) return ZT_IP_SCOPE_PRIVATE; // 198.51.100.0/24
break;
case 0xcb:
if ((ip & 0xffffff00) == 0xcb007100) return ZT_IP_SCOPE_PRIVATE; // 203.0.113.0/24
break;
case 0xff:
return ZT_IP_SCOPE_NONE; // 255.0.0.0/8 (broadcast, or unused/unusable)
}
switch (ip >> 28U) {
case 0xe:
return ZT_IP_SCOPE_MULTICAST; // 224.0.0.0/4
case 0xf:
return ZT_IP_SCOPE_PSEUDOPRIVATE; // 240.0.0.0/4 ("reserved," usually unusable)
}
return ZT_IP_SCOPE_GLOBAL;
}
case AF_INET6: {
const uint8_t *const ip = as.sa_in6.sin6_addr.s6_addr;
if ((ip[0] & 0xf0U) == 0xf0) {
if (ip[0] == 0xff) return ZT_IP_SCOPE_MULTICAST; // ff00::/8
if ((ip[0] == 0xfe) && ((ip[1] & 0xc0U) == 0x80)) {
unsigned int k = 2;
while ((!ip[k]) && (k < 15)) ++k;
if ((k == 15) && (ip[15] == 0x01))
return ZT_IP_SCOPE_LOOPBACK; // fe80::1/128
else return ZT_IP_SCOPE_LINK_LOCAL; // fe80::/10
}
if ((ip[0] & 0xfeU) == 0xfc) return ZT_IP_SCOPE_PRIVATE; // fc00::/7
}
unsigned int k = 0;
while ((!ip[k]) && (k < 15)) ++k;
if (k == 15) { // all 0's except last byte
if (ip[15] == 0x01) return ZT_IP_SCOPE_LOOPBACK; // ::1/128
if (ip[15] == 0x00) return ZT_IP_SCOPE_NONE; // ::/128
}
return ZT_IP_SCOPE_GLOBAL;
}
}
return ZT_IP_SCOPE_NONE;
case AF_INET6: {
const uint8_t* const ip = as.sa_in6.sin6_addr.s6_addr;
if ((ip[0] & 0xf0U) == 0xf0) {
if (ip[0] == 0xff)
return ZT_IP_SCOPE_MULTICAST; // ff00::/8
if ((ip[0] == 0xfe) && ((ip[1] & 0xc0U) == 0x80)) {
unsigned int k = 2;
while ((! ip[k]) && (k < 15))
++k;
if ((k == 15) && (ip[15] == 0x01))
return ZT_IP_SCOPE_LOOPBACK; // fe80::1/128
else
return ZT_IP_SCOPE_LINK_LOCAL; // fe80::/10
}
if ((ip[0] & 0xfeU) == 0xfc)
return ZT_IP_SCOPE_PRIVATE; // fc00::/7
}
unsigned int k = 0;
while ((! ip[k]) && (k < 15))
++k;
if (k == 15) { // all 0's except last byte
if (ip[15] == 0x01)
return ZT_IP_SCOPE_LOOPBACK; // ::1/128
if (ip[15] == 0x00)
return ZT_IP_SCOPE_NONE; // ::/128
}
return ZT_IP_SCOPE_GLOBAL;
}
}
return ZT_IP_SCOPE_NONE;
}
void InetAddress::set(const void *ipBytes, unsigned int ipLen, unsigned int port) noexcept
void InetAddress::set(const void* ipBytes, unsigned int ipLen, unsigned int port) noexcept
{
memoryZero(this);
if (ipLen == 4) {
as.sa_in.sin_family = AF_INET;
as.sa_in.sin_port = Utils::hton((uint16_t) port);
as.sa_in.sin_addr.s_addr = Utils::loadMachineEndian< uint32_t >(ipBytes);
} else if (ipLen == 16) {
as.sa_in6.sin6_family = AF_INET6;
as.sa_in6.sin6_port = Utils::hton((uint16_t) port);
Utils::copy<16>(as.sa_in6.sin6_addr.s6_addr, ipBytes);
}
memoryZero(this);
if (ipLen == 4) {
as.sa_in.sin_family = AF_INET;
as.sa_in.sin_port = Utils::hton((uint16_t)port);
as.sa_in.sin_addr.s_addr = Utils::loadMachineEndian<uint32_t>(ipBytes);
}
else if (ipLen == 16) {
as.sa_in6.sin6_family = AF_INET6;
as.sa_in6.sin6_port = Utils::hton((uint16_t)port);
Utils::copy<16>(as.sa_in6.sin6_addr.s6_addr, ipBytes);
}
}
bool InetAddress::isDefaultRoute() const noexcept
{
switch (as.ss.ss_family) {
case AF_INET:
return ((as.sa_in.sin_port == 0) && (as.sa_in.sin_addr.s_addr == 0));
case AF_INET6:
if (as.sa_in6.sin6_port == 0) {
for (unsigned int i = 0;i < 16;++i) {
if (as.sa_in6.sin6_addr.s6_addr[i])
return false;
}
return true;
}
return false;
default:
return false;
}
switch (as.ss.ss_family) {
case AF_INET:
return ((as.sa_in.sin_port == 0) && (as.sa_in.sin_addr.s_addr == 0));
case AF_INET6:
if (as.sa_in6.sin6_port == 0) {
for (unsigned int i = 0; i < 16; ++i) {
if (as.sa_in6.sin6_addr.s6_addr[i])
return false;
}
return true;
}
return false;
default:
return false;
}
}
char *InetAddress::toString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const noexcept
char* InetAddress::toString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const noexcept
{
char *p = toIpString(buf);
if (*p) {
while (*p) ++p;
*(p++) = '/';
Utils::decimal(port(), p);
}
return buf;
char* p = toIpString(buf);
if (*p) {
while (*p)
++p;
*(p++) = '/';
Utils::decimal(port(), p);
}
return buf;
}
char *InetAddress::toIpString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const noexcept
char* InetAddress::toIpString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const noexcept
{
buf[0] = (char) 0;
switch (as.ss.ss_family) {
case AF_INET:
inet_ntop(AF_INET, &as.sa_in.sin_addr.s_addr, buf, INET_ADDRSTRLEN);
break;
case AF_INET6:
inet_ntop(AF_INET6, as.sa_in6.sin6_addr.s6_addr, buf, INET6_ADDRSTRLEN);
break;
}
return buf;
buf[0] = (char)0;
switch (as.ss.ss_family) {
case AF_INET:
inet_ntop(AF_INET, &as.sa_in.sin_addr.s_addr, buf, INET_ADDRSTRLEN);
break;
case AF_INET6:
inet_ntop(AF_INET6, as.sa_in6.sin6_addr.s6_addr, buf, INET6_ADDRSTRLEN);
break;
}
return buf;
}
bool InetAddress::fromString(const char *ipSlashPort) noexcept
bool InetAddress::fromString(const char* ipSlashPort) noexcept
{
char buf[64];
char buf[64];
memoryZero(this);
memoryZero(this);
if (!*ipSlashPort)
return true;
if (!Utils::scopy(buf, sizeof(buf), ipSlashPort))
return false;
if (! *ipSlashPort)
return true;
if (! Utils::scopy(buf, sizeof(buf), ipSlashPort))
return false;
char *portAt = buf;
while ((*portAt) && (*portAt != '/'))
++portAt;
unsigned int port = 0;
if (*portAt) {
*(portAt++) = (char) 0;
port = Utils::strToUInt(portAt) & 0xffffU;
}
char* portAt = buf;
while ((*portAt) && (*portAt != '/'))
++portAt;
unsigned int port = 0;
if (*portAt) {
*(portAt++) = (char)0;
port = Utils::strToUInt(portAt) & 0xffffU;
}
if (strchr(buf, ':')) {
as.sa_in6.sin6_family = AF_INET6;
as.sa_in6.sin6_port = Utils::hton((uint16_t) port);
inet_pton(AF_INET6, buf, as.sa_in6.sin6_addr.s6_addr);
return true;
} else if (strchr(buf, '.')) {
as.sa_in.sin_family = AF_INET;
as.sa_in.sin_port = Utils::hton((uint16_t) port);
inet_pton(AF_INET, buf, &as.sa_in.sin_addr.s_addr);
return true;
}
if (strchr(buf, ':')) {
as.sa_in6.sin6_family = AF_INET6;
as.sa_in6.sin6_port = Utils::hton((uint16_t)port);
inet_pton(AF_INET6, buf, as.sa_in6.sin6_addr.s6_addr);
return true;
}
else if (strchr(buf, '.')) {
as.sa_in.sin_family = AF_INET;
as.sa_in.sin_port = Utils::hton((uint16_t)port);
inet_pton(AF_INET, buf, &as.sa_in.sin_addr.s_addr);
return true;
}
return false;
return false;
}
InetAddress InetAddress::netmask() const noexcept
{
InetAddress r(*this);
switch (r.as.ss.ss_family) {
case AF_INET:
r.as.sa_in.sin_addr.s_addr = Utils::hton((uint32_t) (0xffffffffU << (32 - netmaskBits())));
break;
case AF_INET6: {
uint64_t nm[2];
const unsigned int bits = netmaskBits();
if (bits) {
nm[0] = Utils::hton((uint64_t) ((bits >= 64) ? 0xffffffffffffffffULL : (0xffffffffffffffffULL << (64 - bits))));
nm[1] = Utils::hton((uint64_t) ((bits <= 64) ? 0ULL : (0xffffffffffffffffULL << (128 - bits))));
} else {
nm[0] = 0;
nm[1] = 0;
}
Utils::copy<16>(r.as.sa_in6.sin6_addr.s6_addr, nm);
}
break;
}
return r;
InetAddress r(*this);
switch (r.as.ss.ss_family) {
case AF_INET:
r.as.sa_in.sin_addr.s_addr = Utils::hton((uint32_t)(0xffffffffU << (32 - netmaskBits())));
break;
case AF_INET6: {
uint64_t nm[2];
const unsigned int bits = netmaskBits();
if (bits) {
nm[0] = Utils::hton(
(uint64_t)((bits >= 64) ? 0xffffffffffffffffULL : (0xffffffffffffffffULL << (64 - bits))));
nm[1] = Utils::hton((uint64_t)((bits <= 64) ? 0ULL : (0xffffffffffffffffULL << (128 - bits))));
}
else {
nm[0] = 0;
nm[1] = 0;
}
Utils::copy<16>(r.as.sa_in6.sin6_addr.s6_addr, nm);
} break;
}
return r;
}
InetAddress InetAddress::broadcast() const noexcept
{
if (as.ss.ss_family == AF_INET) {
InetAddress r(*this);
reinterpret_cast<sockaddr_in *>(&r)->sin_addr.s_addr |= Utils::hton((uint32_t) (0xffffffffU >> netmaskBits()));
return r;
}
return InetAddress();
if (as.ss.ss_family == AF_INET) {
InetAddress r(*this);
reinterpret_cast<sockaddr_in*>(&r)->sin_addr.s_addr |= Utils::hton((uint32_t)(0xffffffffU >> netmaskBits()));
return r;
}
return InetAddress();
}
InetAddress InetAddress::network() const noexcept
{
InetAddress r(*this);
switch (r.as.ss.ss_family) {
case AF_INET:
r.as.sa_in.sin_addr.s_addr &= Utils::hton((uint32_t) (0xffffffffU << (32 - netmaskBits())));
break;
case AF_INET6: {
uint64_t nm[2];
const unsigned int bits = netmaskBits();
Utils::copy<16>(nm, reinterpret_cast<sockaddr_in6 *>(&r)->sin6_addr.s6_addr);
nm[0] &= Utils::hton((uint64_t) ((bits >= 64) ? 0xffffffffffffffffULL : (0xffffffffffffffffULL << (64 - bits))));
nm[1] &= Utils::hton((uint64_t) ((bits <= 64) ? 0ULL : (0xffffffffffffffffULL << (128 - bits))));
Utils::copy<16>(r.as.sa_in6.sin6_addr.s6_addr, nm);
}
break;
}
return r;
InetAddress r(*this);
switch (r.as.ss.ss_family) {
case AF_INET:
r.as.sa_in.sin_addr.s_addr &= Utils::hton((uint32_t)(0xffffffffU << (32 - netmaskBits())));
break;
case AF_INET6: {
uint64_t nm[2];
const unsigned int bits = netmaskBits();
Utils::copy<16>(nm, reinterpret_cast<sockaddr_in6*>(&r)->sin6_addr.s6_addr);
nm[0] &=
Utils::hton((uint64_t)((bits >= 64) ? 0xffffffffffffffffULL : (0xffffffffffffffffULL << (64 - bits))));
nm[1] &= Utils::hton((uint64_t)((bits <= 64) ? 0ULL : (0xffffffffffffffffULL << (128 - bits))));
Utils::copy<16>(r.as.sa_in6.sin6_addr.s6_addr, nm);
} break;
}
return r;
}
bool InetAddress::isEqualPrefix(const InetAddress &addr) const noexcept
bool InetAddress::isEqualPrefix(const InetAddress& addr) const noexcept
{
if (addr.as.ss.ss_family == as.ss.ss_family) {
switch (as.ss.ss_family) {
case AF_INET6: {
const InetAddress mask(netmask());
InetAddress addr_mask(addr.netmask());
const uint8_t *const n = addr_mask.as.sa_in6.sin6_addr.s6_addr;
const uint8_t *const m = mask.as.sa_in6.sin6_addr.s6_addr;
const uint8_t *const a = addr.as.sa_in6.sin6_addr.s6_addr;
const uint8_t *const b = as.sa_in6.sin6_addr.s6_addr;
for (unsigned int i = 0;i < 16;++i) {
if ((a[i] & m[i]) != (b[i] & n[i]))
return false;
}
return true;
}
}
}
return false;
if (addr.as.ss.ss_family == as.ss.ss_family) {
switch (as.ss.ss_family) {
case AF_INET6: {
const InetAddress mask(netmask());
InetAddress addr_mask(addr.netmask());
const uint8_t* const n = addr_mask.as.sa_in6.sin6_addr.s6_addr;
const uint8_t* const m = mask.as.sa_in6.sin6_addr.s6_addr;
const uint8_t* const a = addr.as.sa_in6.sin6_addr.s6_addr;
const uint8_t* const b = as.sa_in6.sin6_addr.s6_addr;
for (unsigned int i = 0; i < 16; ++i) {
if ((a[i] & m[i]) != (b[i] & n[i]))
return false;
}
return true;
}
}
}
return false;
}
bool InetAddress::containsAddress(const InetAddress &addr) const noexcept
bool InetAddress::containsAddress(const InetAddress& addr) const noexcept
{
if (addr.as.ss.ss_family == as.ss.ss_family) {
switch (as.ss.ss_family) {
case AF_INET: {
const unsigned int bits = netmaskBits();
if (bits == 0)
return true;
return (
(Utils::ntoh((uint32_t) addr.as.sa_in.sin_addr.s_addr) >> (32 - bits)) ==
(Utils::ntoh((uint32_t) as.sa_in.sin_addr.s_addr) >> (32 - bits))
);
}
case AF_INET6: {
const InetAddress mask(netmask());
const uint8_t *const m = mask.as.sa_in6.sin6_addr.s6_addr;
const uint8_t *const a = addr.as.sa_in6.sin6_addr.s6_addr;
const uint8_t *const b = as.sa_in6.sin6_addr.s6_addr;
for (unsigned int i = 0;i < 16;++i) {
if ((a[i] & m[i]) != b[i])
return false;
}
return true;
}
}
}
return false;
if (addr.as.ss.ss_family == as.ss.ss_family) {
switch (as.ss.ss_family) {
case AF_INET: {
const unsigned int bits = netmaskBits();
if (bits == 0)
return true;
return (
(Utils::ntoh((uint32_t)addr.as.sa_in.sin_addr.s_addr) >> (32 - bits))
== (Utils::ntoh((uint32_t)as.sa_in.sin_addr.s_addr) >> (32 - bits)));
}
case AF_INET6: {
const InetAddress mask(netmask());
const uint8_t* const m = mask.as.sa_in6.sin6_addr.s6_addr;
const uint8_t* const a = addr.as.sa_in6.sin6_addr.s6_addr;
const uint8_t* const b = as.sa_in6.sin6_addr.s6_addr;
for (unsigned int i = 0; i < 16; ++i) {
if ((a[i] & m[i]) != b[i])
return false;
}
return true;
}
}
}
return false;
}
bool InetAddress::isNetwork() const noexcept
{
switch (as.ss.ss_family) {
case AF_INET: {
unsigned int bits = netmaskBits();
if (bits <= 0)
return false;
if (bits >= 32)
return false;
const uint32_t ip = Utils::ntoh((uint32_t) as.sa_in.sin_addr.s_addr);
return ((ip & (0xffffffffU >> bits)) == 0);
}
case AF_INET6: {
unsigned int bits = netmaskBits();
if (bits <= 0)
return false;
if (bits >= 128)
return false;
const uint8_t *const ip = as.sa_in6.sin6_addr.s6_addr;
unsigned int p = bits / 8;
if ((ip[p++] & (0xffU >> (bits % 8))) != 0)
return false;
while (p < 16) {
if (ip[p++])
return false;
}
return true;
}
}
return false;
switch (as.ss.ss_family) {
case AF_INET: {
unsigned int bits = netmaskBits();
if (bits <= 0)
return false;
if (bits >= 32)
return false;
const uint32_t ip = Utils::ntoh((uint32_t)as.sa_in.sin_addr.s_addr);
return ((ip & (0xffffffffU >> bits)) == 0);
}
case AF_INET6: {
unsigned int bits = netmaskBits();
if (bits <= 0)
return false;
if (bits >= 128)
return false;
const uint8_t* const ip = as.sa_in6.sin6_addr.s6_addr;
unsigned int p = bits / 8;
if ((ip[p++] & (0xffU >> (bits % 8))) != 0)
return false;
while (p < 16) {
if (ip[p++])
return false;
}
return true;
}
}
return false;
}
int InetAddress::marshal(uint8_t data[ZT_INETADDRESS_MARSHAL_SIZE_MAX]) const noexcept
{
unsigned int port;
switch (as.ss.ss_family) {
case AF_INET:
port = Utils::ntoh((uint16_t) reinterpret_cast<const sockaddr_in *>(this)->sin_port);
data[0] = 4;
data[1] = reinterpret_cast<const uint8_t *>(&as.sa_in.sin_addr.s_addr)[0];
data[2] = reinterpret_cast<const uint8_t *>(&as.sa_in.sin_addr.s_addr)[1];
data[3] = reinterpret_cast<const uint8_t *>(&as.sa_in.sin_addr.s_addr)[2];
data[4] = reinterpret_cast<const uint8_t *>(&as.sa_in.sin_addr.s_addr)[3];
data[5] = (uint8_t) (port >> 8U);
data[6] = (uint8_t) port;
return 7;
case AF_INET6:
port = Utils::ntoh((uint16_t) as.sa_in6.sin6_port);
data[0] = 6;
Utils::copy<16>(data + 1, as.sa_in6.sin6_addr.s6_addr);
data[17] = (uint8_t) (port >> 8U);
data[18] = (uint8_t) port;
return 19;
default:
data[0] = 0;
return 1;
}
unsigned int port;
switch (as.ss.ss_family) {
case AF_INET:
port = Utils::ntoh((uint16_t) reinterpret_cast<const sockaddr_in*>(this)->sin_port);
data[0] = 4;
data[1] = reinterpret_cast<const uint8_t*>(&as.sa_in.sin_addr.s_addr)[0];
data[2] = reinterpret_cast<const uint8_t*>(&as.sa_in.sin_addr.s_addr)[1];
data[3] = reinterpret_cast<const uint8_t*>(&as.sa_in.sin_addr.s_addr)[2];
data[4] = reinterpret_cast<const uint8_t*>(&as.sa_in.sin_addr.s_addr)[3];
data[5] = (uint8_t)(port >> 8U);
data[6] = (uint8_t)port;
return 7;
case AF_INET6:
port = Utils::ntoh((uint16_t)as.sa_in6.sin6_port);
data[0] = 6;
Utils::copy<16>(data + 1, as.sa_in6.sin6_addr.s6_addr);
data[17] = (uint8_t)(port >> 8U);
data[18] = (uint8_t)port;
return 19;
default:
data[0] = 0;
return 1;
}
}
int InetAddress::unmarshal(const uint8_t *restrict data, const int len) noexcept
int InetAddress::unmarshal(const uint8_t* restrict data, const int len) noexcept
{
memoryZero(this);
if (unlikely(len <= 0))
return -1;
switch (data[0]) {
case 0:
return 1;
case 4:
if (unlikely(len < 7))
return -1;
as.sa_in.sin_family = AF_INET;
as.sa_in.sin_port = Utils::loadMachineEndian< uint16_t >(data + 5);
as.sa_in.sin_addr.s_addr = Utils::loadMachineEndian< uint32_t >(data + 1);
return 7;
case 6:
if (unlikely(len < 19))
return -1;
as.sa_in6.sin6_family = AF_INET6;
as.sa_in6.sin6_port = Utils::loadMachineEndian< uint16_t >(data + 17);
Utils::copy<16>(as.sa_in6.sin6_addr.s6_addr, data + 1);
return 19;
default:
return -1;
}
memoryZero(this);
if (unlikely(len <= 0))
return -1;
switch (data[0]) {
case 0:
return 1;
case 4:
if (unlikely(len < 7))
return -1;
as.sa_in.sin_family = AF_INET;
as.sa_in.sin_port = Utils::loadMachineEndian<uint16_t>(data + 5);
as.sa_in.sin_addr.s_addr = Utils::loadMachineEndian<uint32_t>(data + 1);
return 7;
case 6:
if (unlikely(len < 19))
return -1;
as.sa_in6.sin6_family = AF_INET6;
as.sa_in6.sin6_port = Utils::loadMachineEndian<uint16_t>(data + 17);
Utils::copy<16>(as.sa_in6.sin6_addr.s6_addr, data + 1);
return 19;
default:
return -1;
}
}
InetAddress InetAddress::makeIpv6LinkLocal(const MAC &mac) noexcept
InetAddress InetAddress::makeIpv6LinkLocal(const MAC& mac) noexcept
{
InetAddress r;
r.as.sa_in6.sin6_family = AF_INET6;
r.as.sa_in6.sin6_port = ZT_CONST_TO_BE_UINT16(64);
r.as.sa_in6.sin6_addr.s6_addr[0] = 0xfe;
r.as.sa_in6.sin6_addr.s6_addr[1] = 0x80;
r.as.sa_in6.sin6_addr.s6_addr[2] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[3] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[4] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[5] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[6] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[7] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[8] = mac[0] & 0xfdU;
r.as.sa_in6.sin6_addr.s6_addr[9] = mac[1];
r.as.sa_in6.sin6_addr.s6_addr[10] = mac[2];
r.as.sa_in6.sin6_addr.s6_addr[11] = 0xff;
r.as.sa_in6.sin6_addr.s6_addr[12] = 0xfe;
r.as.sa_in6.sin6_addr.s6_addr[13] = mac[3];
r.as.sa_in6.sin6_addr.s6_addr[14] = mac[4];
r.as.sa_in6.sin6_addr.s6_addr[15] = mac[5];
return r;
InetAddress r;
r.as.sa_in6.sin6_family = AF_INET6;
r.as.sa_in6.sin6_port = ZT_CONST_TO_BE_UINT16(64);
r.as.sa_in6.sin6_addr.s6_addr[0] = 0xfe;
r.as.sa_in6.sin6_addr.s6_addr[1] = 0x80;
r.as.sa_in6.sin6_addr.s6_addr[2] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[3] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[4] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[5] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[6] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[7] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[8] = mac[0] & 0xfdU;
r.as.sa_in6.sin6_addr.s6_addr[9] = mac[1];
r.as.sa_in6.sin6_addr.s6_addr[10] = mac[2];
r.as.sa_in6.sin6_addr.s6_addr[11] = 0xff;
r.as.sa_in6.sin6_addr.s6_addr[12] = 0xfe;
r.as.sa_in6.sin6_addr.s6_addr[13] = mac[3];
r.as.sa_in6.sin6_addr.s6_addr[14] = mac[4];
r.as.sa_in6.sin6_addr.s6_addr[15] = mac[5];
return r;
}
InetAddress InetAddress::makeIpv6rfc4193(uint64_t nwid, uint64_t zeroTierAddress) noexcept
{
InetAddress r;
r.as.sa_in6.sin6_family = AF_INET6;
r.as.sa_in6.sin6_port = ZT_CONST_TO_BE_UINT16(88); // /88 includes 0xfd + network ID, discriminating by device ID below that
r.as.sa_in6.sin6_addr.s6_addr[0] = 0xfd;
r.as.sa_in6.sin6_addr.s6_addr[1] = (uint8_t) (nwid >> 56U);
r.as.sa_in6.sin6_addr.s6_addr[2] = (uint8_t) (nwid >> 48U);
r.as.sa_in6.sin6_addr.s6_addr[3] = (uint8_t) (nwid >> 40U);
r.as.sa_in6.sin6_addr.s6_addr[4] = (uint8_t) (nwid >> 32U);
r.as.sa_in6.sin6_addr.s6_addr[5] = (uint8_t) (nwid >> 24U);
r.as.sa_in6.sin6_addr.s6_addr[6] = (uint8_t) (nwid >> 16U);
r.as.sa_in6.sin6_addr.s6_addr[7] = (uint8_t) (nwid >> 8U);
r.as.sa_in6.sin6_addr.s6_addr[8] = (uint8_t) nwid;
r.as.sa_in6.sin6_addr.s6_addr[9] = 0x99;
r.as.sa_in6.sin6_addr.s6_addr[10] = 0x93;
r.as.sa_in6.sin6_addr.s6_addr[11] = (uint8_t) (zeroTierAddress >> 32U);
r.as.sa_in6.sin6_addr.s6_addr[12] = (uint8_t) (zeroTierAddress >> 24U);
r.as.sa_in6.sin6_addr.s6_addr[13] = (uint8_t) (zeroTierAddress >> 16U);
r.as.sa_in6.sin6_addr.s6_addr[14] = (uint8_t) (zeroTierAddress >> 8U);
r.as.sa_in6.sin6_addr.s6_addr[15] = (uint8_t) zeroTierAddress;
return r;
InetAddress r;
r.as.sa_in6.sin6_family = AF_INET6;
r.as.sa_in6.sin6_port =
ZT_CONST_TO_BE_UINT16(88); // /88 includes 0xfd + network ID, discriminating by device ID below that
r.as.sa_in6.sin6_addr.s6_addr[0] = 0xfd;
r.as.sa_in6.sin6_addr.s6_addr[1] = (uint8_t)(nwid >> 56U);
r.as.sa_in6.sin6_addr.s6_addr[2] = (uint8_t)(nwid >> 48U);
r.as.sa_in6.sin6_addr.s6_addr[3] = (uint8_t)(nwid >> 40U);
r.as.sa_in6.sin6_addr.s6_addr[4] = (uint8_t)(nwid >> 32U);
r.as.sa_in6.sin6_addr.s6_addr[5] = (uint8_t)(nwid >> 24U);
r.as.sa_in6.sin6_addr.s6_addr[6] = (uint8_t)(nwid >> 16U);
r.as.sa_in6.sin6_addr.s6_addr[7] = (uint8_t)(nwid >> 8U);
r.as.sa_in6.sin6_addr.s6_addr[8] = (uint8_t)nwid;
r.as.sa_in6.sin6_addr.s6_addr[9] = 0x99;
r.as.sa_in6.sin6_addr.s6_addr[10] = 0x93;
r.as.sa_in6.sin6_addr.s6_addr[11] = (uint8_t)(zeroTierAddress >> 32U);
r.as.sa_in6.sin6_addr.s6_addr[12] = (uint8_t)(zeroTierAddress >> 24U);
r.as.sa_in6.sin6_addr.s6_addr[13] = (uint8_t)(zeroTierAddress >> 16U);
r.as.sa_in6.sin6_addr.s6_addr[14] = (uint8_t)(zeroTierAddress >> 8U);
r.as.sa_in6.sin6_addr.s6_addr[15] = (uint8_t)zeroTierAddress;
return r;
}
InetAddress InetAddress::makeIpv66plane(uint64_t nwid, uint64_t zeroTierAddress) noexcept
{
nwid ^= (nwid >> 32U);
InetAddress r;
r.as.sa_in6.sin6_family = AF_INET6;
r.as.sa_in6.sin6_port = ZT_CONST_TO_BE_UINT16(40);
r.as.sa_in6.sin6_addr.s6_addr[0] = 0xfc;
r.as.sa_in6.sin6_addr.s6_addr[1] = (uint8_t) (nwid >> 24U);
r.as.sa_in6.sin6_addr.s6_addr[2] = (uint8_t) (nwid >> 16U);
r.as.sa_in6.sin6_addr.s6_addr[3] = (uint8_t) (nwid >> 8U);
r.as.sa_in6.sin6_addr.s6_addr[4] = (uint8_t) nwid;
r.as.sa_in6.sin6_addr.s6_addr[5] = (uint8_t) (zeroTierAddress >> 32U);
r.as.sa_in6.sin6_addr.s6_addr[6] = (uint8_t) (zeroTierAddress >> 24U);
r.as.sa_in6.sin6_addr.s6_addr[7] = (uint8_t) (zeroTierAddress >> 16U);
r.as.sa_in6.sin6_addr.s6_addr[8] = (uint8_t) (zeroTierAddress >> 8U);
r.as.sa_in6.sin6_addr.s6_addr[9] = (uint8_t) zeroTierAddress;
r.as.sa_in6.sin6_addr.s6_addr[15] = 0x01;
return r;
nwid ^= (nwid >> 32U);
InetAddress r;
r.as.sa_in6.sin6_family = AF_INET6;
r.as.sa_in6.sin6_port = ZT_CONST_TO_BE_UINT16(40);
r.as.sa_in6.sin6_addr.s6_addr[0] = 0xfc;
r.as.sa_in6.sin6_addr.s6_addr[1] = (uint8_t)(nwid >> 24U);
r.as.sa_in6.sin6_addr.s6_addr[2] = (uint8_t)(nwid >> 16U);
r.as.sa_in6.sin6_addr.s6_addr[3] = (uint8_t)(nwid >> 8U);
r.as.sa_in6.sin6_addr.s6_addr[4] = (uint8_t)nwid;
r.as.sa_in6.sin6_addr.s6_addr[5] = (uint8_t)(zeroTierAddress >> 32U);
r.as.sa_in6.sin6_addr.s6_addr[6] = (uint8_t)(zeroTierAddress >> 24U);
r.as.sa_in6.sin6_addr.s6_addr[7] = (uint8_t)(zeroTierAddress >> 16U);
r.as.sa_in6.sin6_addr.s6_addr[8] = (uint8_t)(zeroTierAddress >> 8U);
r.as.sa_in6.sin6_addr.s6_addr[9] = (uint8_t)zeroTierAddress;
r.as.sa_in6.sin6_addr.s6_addr[15] = 0x01;
return r;
}
extern "C" {
@ -468,4 +495,4 @@ extern const int ZT_AF_INET = (int)AF_INET;
extern const int ZT_AF_INET6 = (int)AF_INET6;
}
} // namespace ZeroTier
} // namespace ZeroTier

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -18,12 +18,12 @@
namespace ZeroTier {
#define LZ4_MAX_INPUT_SIZE 0x7E000000
#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
#define LZ4_MAX_INPUT_SIZE 0x7E000000
#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize) / 255) + 16)
int LZ4_compress_fast(const char *source,char *dest,int inputSize,int maxOutputSize,int acceleration = 1) noexcept;
int LZ4_decompress_safe(const char *source,char *dest,int compressedSize,int maxDecompressedSize) noexcept;
int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration = 1) noexcept;
int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize) noexcept;
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -12,194 +12,216 @@
/****/
#include "Locator.hpp"
#include "Identity.hpp"
#include <algorithm>
namespace ZeroTier {
const SharedPtr< const Locator::EndpointAttributes > Locator::EndpointAttributes::DEFAULT(new Locator::EndpointAttributes());
const SharedPtr<const Locator::EndpointAttributes>
Locator::EndpointAttributes::DEFAULT(new Locator::EndpointAttributes());
Locator::Locator(const char *const str) noexcept :
__refCount(0)
Locator::Locator(const char* const str) noexcept : __refCount(0)
{
if (!fromString(str)) {
m_revision = 0;
m_signer.zero();
m_endpoints.clear();
m_signature.clear();
}
if (! fromString(str)) {
m_revision = 0;
m_signer.zero();
m_endpoints.clear();
m_signature.clear();
}
}
bool Locator::add(const Endpoint &ep, const SharedPtr< const EndpointAttributes > &a)
bool Locator::add(const Endpoint& ep, const SharedPtr<const EndpointAttributes>& a)
{
for (Vector< std::pair< Endpoint, SharedPtr< const EndpointAttributes > > >::iterator i(m_endpoints.begin());i!=m_endpoints.end();++i) {
if (i->first == ep) {
i->second = ((a) && (a->data[0] != 0)) ? a : EndpointAttributes::DEFAULT;
return true;
}
}
if (m_endpoints.size() < ZT_LOCATOR_MAX_ENDPOINTS) {
m_endpoints.push_back(std::pair<Endpoint, SharedPtr< const EndpointAttributes > >(ep, ((a) && (a->data[0] != 0)) ? a : EndpointAttributes::DEFAULT));
return true;
}
return false;
for (Vector<std::pair<Endpoint, SharedPtr<const EndpointAttributes> > >::iterator i(m_endpoints.begin());
i != m_endpoints.end();
++i) {
if (i->first == ep) {
i->second = ((a) && (a->data[0] != 0)) ? a : EndpointAttributes::DEFAULT;
return true;
}
}
if (m_endpoints.size() < ZT_LOCATOR_MAX_ENDPOINTS) {
m_endpoints.push_back(std::pair<Endpoint, SharedPtr<const EndpointAttributes> >(
ep,
((a) && (a->data[0] != 0)) ? a : EndpointAttributes::DEFAULT));
return true;
}
return false;
}
bool Locator::sign(const int64_t rev, const Identity &id) noexcept
bool Locator::sign(const int64_t rev, const Identity& id) noexcept
{
m_revision = rev;
m_signer = id.address();
m_revision = rev;
m_signer = id.address();
m_sortEndpoints();
m_sortEndpoints();
uint8_t signdata[ZT_LOCATOR_MARSHAL_SIZE_MAX];
const unsigned int signlen = marshal(signdata, true);
uint8_t signdata[ZT_LOCATOR_MARSHAL_SIZE_MAX];
const unsigned int signlen = marshal(signdata, true);
const unsigned int siglen = id.sign(signdata, signlen, m_signature.data(), m_signature.capacity());
if (siglen == 0)
return false;
m_signature.unsafeSetSize(siglen);
const unsigned int siglen = id.sign(signdata, signlen, m_signature.data(), m_signature.capacity());
if (siglen == 0)
return false;
m_signature.unsafeSetSize(siglen);
return true;
return true;
}
bool Locator::verify(const Identity &id) const noexcept
bool Locator::verify(const Identity& id) const noexcept
{
try {
if ((m_revision > 0) && (m_signer == id.address())) {
uint8_t signdata[ZT_LOCATOR_MARSHAL_SIZE_MAX];
const unsigned int signlen = marshal(signdata, true);
return id.verify(signdata, signlen, m_signature.data(), m_signature.size());
}
} catch (...) {} // fail verify on any unexpected exception
return false;
try {
if ((m_revision > 0) && (m_signer == id.address())) {
uint8_t signdata[ZT_LOCATOR_MARSHAL_SIZE_MAX];
const unsigned int signlen = marshal(signdata, true);
return id.verify(signdata, signlen, m_signature.data(), m_signature.size());
}
}
catch (...) {
} // fail verify on any unexpected exception
return false;
}
char *Locator::toString(char s[ZT_LOCATOR_STRING_SIZE_MAX]) const noexcept
char* Locator::toString(char s[ZT_LOCATOR_STRING_SIZE_MAX]) const noexcept
{
static_assert(ZT_LOCATOR_STRING_SIZE_MAX > ((((ZT_LOCATOR_MARSHAL_SIZE_MAX / 5) + 1) * 8) + ZT_ADDRESS_LENGTH_HEX + 1), "overflow");
uint8_t bin[ZT_LOCATOR_MARSHAL_SIZE_MAX];
m_signer.toString(s);
s[ZT_ADDRESS_LENGTH_HEX] = '@';
Utils::b32e(bin, marshal(bin, false), s + (ZT_ADDRESS_LENGTH_HEX + 1), ZT_LOCATOR_STRING_SIZE_MAX - (ZT_ADDRESS_LENGTH_HEX + 1));
return s;
static_assert(
ZT_LOCATOR_STRING_SIZE_MAX > ((((ZT_LOCATOR_MARSHAL_SIZE_MAX / 5) + 1) * 8) + ZT_ADDRESS_LENGTH_HEX + 1),
"overflow");
uint8_t bin[ZT_LOCATOR_MARSHAL_SIZE_MAX];
m_signer.toString(s);
s[ZT_ADDRESS_LENGTH_HEX] = '@';
Utils::b32e(
bin,
marshal(bin, false),
s + (ZT_ADDRESS_LENGTH_HEX + 1),
ZT_LOCATOR_STRING_SIZE_MAX - (ZT_ADDRESS_LENGTH_HEX + 1));
return s;
}
bool Locator::fromString(const char *s) noexcept
bool Locator::fromString(const char* s) noexcept
{
if (!s)
return false;
if (strlen(s) < (ZT_ADDRESS_LENGTH_HEX + 1))
return false;
uint8_t bin[ZT_LOCATOR_MARSHAL_SIZE_MAX];
const int bl = Utils::b32d(s + (ZT_ADDRESS_LENGTH_HEX + 1), bin, ZT_LOCATOR_MARSHAL_SIZE_MAX);
if ((bl <= 0) || (bl > ZT_LOCATOR_MARSHAL_SIZE_MAX))
return false;
return unmarshal(bin, bl) > 0;
if (! s)
return false;
if (strlen(s) < (ZT_ADDRESS_LENGTH_HEX + 1))
return false;
uint8_t bin[ZT_LOCATOR_MARSHAL_SIZE_MAX];
const int bl = Utils::b32d(s + (ZT_ADDRESS_LENGTH_HEX + 1), bin, ZT_LOCATOR_MARSHAL_SIZE_MAX);
if ((bl <= 0) || (bl > ZT_LOCATOR_MARSHAL_SIZE_MAX))
return false;
return unmarshal(bin, bl) > 0;
}
int Locator::marshal(uint8_t data[ZT_LOCATOR_MARSHAL_SIZE_MAX], const bool excludeSignature) const noexcept
{
Utils::storeBigEndian<uint64_t>(data, (uint64_t) m_revision);
m_signer.copyTo(data + 8);
int p = 8 + ZT_ADDRESS_LENGTH;
Utils::storeBigEndian<uint64_t>(data, (uint64_t)m_revision);
m_signer.copyTo(data + 8);
int p = 8 + ZT_ADDRESS_LENGTH;
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_endpoints.size());
p += 2;
for (Vector< std::pair< Endpoint, SharedPtr< const EndpointAttributes > > >::const_iterator e(m_endpoints.begin());e != m_endpoints.end();++e) {
int l = e->first.marshal(data + p);
if (l <= 0)
return -1;
p += l;
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_endpoints.size());
p += 2;
for (Vector<std::pair<Endpoint, SharedPtr<const EndpointAttributes> > >::const_iterator e(m_endpoints.begin());
e != m_endpoints.end();
++e) {
int l = e->first.marshal(data + p);
if (l <= 0)
return -1;
p += l;
l = (int)e->second->data[0];
if (l > 0) {
Utils::copy(data + p, e->second->data, (unsigned int)l);
p += l;
} else {
data[p++] = 0;
}
}
l = (int)e->second->data[0];
if (l > 0) {
Utils::copy(data + p, e->second->data, (unsigned int)l);
p += l;
}
else {
data[p++] = 0;
}
}
Utils::storeMachineEndian< uint16_t >(data + p, 0); // length of meta-data, currently always 0
p += 2;
Utils::storeMachineEndian<uint16_t>(data + p, 0); // length of meta-data, currently always 0
p += 2;
if (!excludeSignature) {
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t) m_signature.size());
p += 2;
Utils::copy(data + p, m_signature.data(), m_signature.size());
p += (int) m_signature.size();
}
if (! excludeSignature) {
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_signature.size());
p += 2;
Utils::copy(data + p, m_signature.data(), m_signature.size());
p += (int)m_signature.size();
}
return p;
return p;
}
int Locator::unmarshal(const uint8_t *data, const int len) noexcept
int Locator::unmarshal(const uint8_t* data, const int len) noexcept
{
if (unlikely(len < (8 + ZT_ADDRESS_LENGTH)))
return -1;
m_revision = (int64_t)Utils::loadBigEndian<uint64_t>(data);
m_signer.setTo(data + 8);
int p = 8 + ZT_ADDRESS_LENGTH;
if (unlikely(len < (8 + ZT_ADDRESS_LENGTH)))
return -1;
m_revision = (int64_t)Utils::loadBigEndian<uint64_t>(data);
m_signer.setTo(data + 8);
int p = 8 + ZT_ADDRESS_LENGTH;
if (unlikely(p + 2) > len)
return -1;
unsigned int endpointCount = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
if (unlikely(endpointCount > ZT_LOCATOR_MAX_ENDPOINTS))
return -1;
m_endpoints.resize(endpointCount);
m_endpoints.shrink_to_fit();
for (unsigned int i = 0;i < endpointCount;++i) {
int l = m_endpoints[i].first.unmarshal(data + p, len - p);
if (l <= 0)
return -1;
p += l;
if (unlikely(p + 2) > len)
return -1;
unsigned int endpointCount = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
if (unlikely(endpointCount > ZT_LOCATOR_MAX_ENDPOINTS))
return -1;
m_endpoints.resize(endpointCount);
m_endpoints.shrink_to_fit();
for (unsigned int i = 0; i < endpointCount; ++i) {
int l = m_endpoints[i].first.unmarshal(data + p, len - p);
if (l <= 0)
return -1;
p += l;
if (unlikely(p + 1) > len)
return -1;
l = (int)data[p];
if (l <= 0) {
m_endpoints[i].second = EndpointAttributes::DEFAULT;
++p;
} else {
m_endpoints[i].second.set(new EndpointAttributes());
Utils::copy(const_cast< uint8_t * >(m_endpoints[i].second->data), data + p, (unsigned int)l);
p += l;
}
}
if (unlikely(p + 1) > len)
return -1;
l = (int)data[p];
if (l <= 0) {
m_endpoints[i].second = EndpointAttributes::DEFAULT;
++p;
}
else {
m_endpoints[i].second.set(new EndpointAttributes());
Utils::copy(const_cast<uint8_t*>(m_endpoints[i].second->data), data + p, (unsigned int)l);
p += l;
}
}
if (unlikely((p + 2) > len))
return -1;
p += 2 + (int)Utils::loadBigEndian<uint16_t>(data + p);
if (unlikely((p + 2) > len))
return -1;
p += 2 + (int)Utils::loadBigEndian<uint16_t>(data + p);
if (unlikely((p + 2) > len))
return -1;
const unsigned int siglen = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
if (unlikely((siglen > ZT_SIGNATURE_BUFFER_SIZE) || ((p + (int)siglen) > len)))
return -1;
m_signature.unsafeSetSize(siglen);
Utils::copy(m_signature.data(), data + p, siglen);
p += (int)siglen;
if (unlikely(p > len))
return -1;
if (unlikely((p + 2) > len))
return -1;
const unsigned int siglen = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
if (unlikely((siglen > ZT_SIGNATURE_BUFFER_SIZE) || ((p + (int)siglen) > len)))
return -1;
m_signature.unsafeSetSize(siglen);
Utils::copy(m_signature.data(), data + p, siglen);
p += (int)siglen;
if (unlikely(p > len))
return -1;
m_sortEndpoints();
m_sortEndpoints();
return p;
return p;
}
struct p_SortByEndpoint
{
// There can't be more than one of the same endpoint, so only need to sort
// by endpoint.
ZT_INLINE bool operator()(const std::pair< Endpoint, SharedPtr< const Locator::EndpointAttributes > > &a,const std::pair< Endpoint, SharedPtr< const Locator::EndpointAttributes > > &b) const noexcept
{ return a.first < b.first; }
struct p_SortByEndpoint {
// There can't be more than one of the same endpoint, so only need to sort
// by endpoint.
ZT_INLINE bool operator()(
const std::pair<Endpoint, SharedPtr<const Locator::EndpointAttributes> >& a,
const std::pair<Endpoint, SharedPtr<const Locator::EndpointAttributes> >& b) const noexcept
{
return a.first < b.first;
}
};
void Locator::m_sortEndpoints() noexcept
{ std::sort(m_endpoints.begin(), m_endpoints.end(), p_SortByEndpoint()); }
{
std::sort(m_endpoints.begin(), m_endpoints.end(), p_SortByEndpoint());
}
} // namespace ZeroTier
} // namespace ZeroTier

View file

@ -15,13 +15,13 @@
#define ZT_LOCATOR_HPP
#include "Constants.hpp"
#include "Endpoint.hpp"
#include "Identity.hpp"
#include "TriviallyCopyable.hpp"
#include "SharedPtr.hpp"
#include "FCV.hpp"
#include "Containers.hpp"
#include "Dictionary.hpp"
#include "Endpoint.hpp"
#include "FCV.hpp"
#include "Identity.hpp"
#include "SharedPtr.hpp"
#include "TriviallyCopyable.hpp"
/**
* Maximum size of endpoint attributes dictionary plus one byte for size.
@ -35,7 +35,10 @@
*/
#define ZT_LOCATOR_MAX_ENDPOINTS 16
#define ZT_LOCATOR_MARSHAL_SIZE_MAX (8 + ZT_ADDRESS_LENGTH + 2 + (ZT_LOCATOR_MAX_ENDPOINTS * (ZT_ENDPOINT_MARSHAL_SIZE_MAX + ZT_LOCATOR_MAX_ENDPOINT_ATTRIBUTES_SIZE)) + 2 + 2 + ZT_SIGNATURE_BUFFER_SIZE)
#define ZT_LOCATOR_MARSHAL_SIZE_MAX \
(8 + ZT_ADDRESS_LENGTH + 2 \
+ (ZT_LOCATOR_MAX_ENDPOINTS * (ZT_ENDPOINT_MARSHAL_SIZE_MAX + ZT_LOCATOR_MAX_ENDPOINT_ATTRIBUTES_SIZE)) + 2 + 2 \
+ ZT_SIGNATURE_BUFFER_SIZE)
/**
* Maximum size of a string format Locator (this is way larger than needed)
@ -50,194 +53,223 @@ namespace ZeroTier {
* A locator contains long-lived endpoints for a node such as IP/port pairs,
* URLs, or other nodes, and is signed by the node it describes.
*/
class Locator
{
friend class SharedPtr< Locator >;
friend class SharedPtr< const Locator >;
class Locator {
friend class SharedPtr<Locator>;
friend class SharedPtr<const Locator>;
public:
/**
* Attributes of an endpoint in this locator
*
* This is specified for future use, but there are currently no attributes
* defined. A Dictionary is used for serialization for extensibility.
*/
struct EndpointAttributes
{
friend class SharedPtr< Locator::EndpointAttributes >;
friend class SharedPtr< const Locator::EndpointAttributes >;
public:
/**
* Attributes of an endpoint in this locator
*
* This is specified for future use, but there are currently no attributes
* defined. A Dictionary is used for serialization for extensibility.
*/
struct EndpointAttributes {
friend class SharedPtr<Locator::EndpointAttributes>;
friend class SharedPtr<const Locator::EndpointAttributes>;
/**
* Default endpoint attributes
*/
static const SharedPtr< const Locator::EndpointAttributes > DEFAULT;
/**
* Default endpoint attributes
*/
static const SharedPtr<const Locator::EndpointAttributes> DEFAULT;
/**
* Raw attributes data in the form of a dictionary prefixed by its size.
*
* The maximum size of attributes is 255, which is more than enough for
* tiny things like bandwidth and priority.
*/
uint8_t data[ZT_LOCATOR_MAX_ENDPOINT_ATTRIBUTES_SIZE];
/**
* Raw attributes data in the form of a dictionary prefixed by its size.
*
* The maximum size of attributes is 255, which is more than enough for
* tiny things like bandwidth and priority.
*/
uint8_t data[ZT_LOCATOR_MAX_ENDPOINT_ATTRIBUTES_SIZE];
ZT_INLINE EndpointAttributes() noexcept
{ Utils::zero< ZT_LOCATOR_MAX_ENDPOINT_ATTRIBUTES_SIZE >(data); }
ZT_INLINE EndpointAttributes() noexcept
{
Utils::zero<ZT_LOCATOR_MAX_ENDPOINT_ATTRIBUTES_SIZE>(data);
}
ZT_INLINE bool operator==(const EndpointAttributes &a) const noexcept
{ return ((data[0] == a.data[0]) && (memcmp(data, a.data, data[0]) == 0)); }
ZT_INLINE bool operator==(const EndpointAttributes& a) const noexcept
{
return ((data[0] == a.data[0]) && (memcmp(data, a.data, data[0]) == 0));
}
ZT_INLINE bool operator<(const EndpointAttributes &a) const noexcept
{ return ((data[0] < a.data[0]) || ((data[0] == a.data[0]) && (memcmp(data, a.data, data[0]) < 0))); }
ZT_INLINE bool operator<(const EndpointAttributes& a) const noexcept
{
return ((data[0] < a.data[0]) || ((data[0] == a.data[0]) && (memcmp(data, a.data, data[0]) < 0)));
}
ZT_INLINE bool operator!=(const EndpointAttributes &a) const noexcept
{ return !(*this == a); }
ZT_INLINE bool operator!=(const EndpointAttributes& a) const noexcept
{
return ! (*this == a);
}
ZT_INLINE bool operator>(const EndpointAttributes &a) const noexcept
{ return (a < *this); }
ZT_INLINE bool operator>(const EndpointAttributes& a) const noexcept
{
return (a < *this);
}
ZT_INLINE bool operator<=(const EndpointAttributes &a) const noexcept
{ return !(a < *this); }
ZT_INLINE bool operator<=(const EndpointAttributes& a) const noexcept
{
return ! (a < *this);
}
ZT_INLINE bool operator>=(const EndpointAttributes &a) const noexcept
{ return !(*this < a); }
ZT_INLINE bool operator>=(const EndpointAttributes& a) const noexcept
{
return ! (*this < a);
}
private:
std::atomic< int > __refCount;
};
private:
std::atomic<int> __refCount;
};
ZT_INLINE Locator() noexcept:
m_revision(0)
{}
ZT_INLINE Locator() noexcept : m_revision(0)
{
}
ZT_INLINE Locator(const Locator &l) noexcept:
m_revision(l.m_revision),
m_signer(l.m_signer),
m_endpoints(l.m_endpoints),
m_signature(l.m_signature),
__refCount(0)
{}
ZT_INLINE Locator(const Locator& l) noexcept
: m_revision(l.m_revision)
, m_signer(l.m_signer)
, m_endpoints(l.m_endpoints)
, m_signature(l.m_signature)
, __refCount(0)
{
}
explicit Locator(const char *const str) noexcept;
explicit Locator(const char* const str) noexcept;
/**
* @return Timestamp (a.k.a. revision number) set by Location signer
*/
ZT_INLINE int64_t revision() const noexcept
{ return m_revision; }
/**
* @return Timestamp (a.k.a. revision number) set by Location signer
*/
ZT_INLINE int64_t revision() const noexcept
{
return m_revision;
}
/**
* @return ZeroTier address of signer
*/
ZT_INLINE Address signer() const noexcept
{ return m_signer; }
/**
* @return ZeroTier address of signer
*/
ZT_INLINE Address signer() const noexcept
{
return m_signer;
}
/**
* @return Endpoints specified in locator
*/
ZT_INLINE const Vector <std::pair< Endpoint, SharedPtr< const EndpointAttributes > >> &endpoints() const noexcept
{ return m_endpoints; }
/**
* @return Endpoints specified in locator
*/
ZT_INLINE const Vector<std::pair<Endpoint, SharedPtr<const EndpointAttributes> > >& endpoints() const noexcept
{
return m_endpoints;
}
/**
* @return Signature data
*/
ZT_INLINE const FCV< uint8_t, ZT_SIGNATURE_BUFFER_SIZE > &signature() const noexcept
{ return m_signature; }
/**
* @return Signature data
*/
ZT_INLINE const FCV<uint8_t, ZT_SIGNATURE_BUFFER_SIZE>& signature() const noexcept
{
return m_signature;
}
/**
* Add an endpoint to this locator
*
* This doesn't check for the presence of the endpoint, so take
* care not to add duplicates.
*
* @param ep Endpoint to add
* @param a Endpoint attributes or NULL to use default
* @return True if endpoint was added (or already present), false if locator is full
*/
bool add(const Endpoint &ep, const SharedPtr< const EndpointAttributes > &a);
/**
* Add an endpoint to this locator
*
* This doesn't check for the presence of the endpoint, so take
* care not to add duplicates.
*
* @param ep Endpoint to add
* @param a Endpoint attributes or NULL to use default
* @return True if endpoint was added (or already present), false if locator is full
*/
bool add(const Endpoint& ep, const SharedPtr<const EndpointAttributes>& a);
/**
* Sign this locator
*
* This sets timestamp, sorts endpoints so that the same set of endpoints
* will always produce the same locator, and signs.
*
* @param id Identity that includes private key
* @return True if signature successful
*/
bool sign(int64_t rev, const Identity &id) noexcept;
/**
* Sign this locator
*
* This sets timestamp, sorts endpoints so that the same set of endpoints
* will always produce the same locator, and signs.
*
* @param id Identity that includes private key
* @return True if signature successful
*/
bool sign(int64_t rev, const Identity& id) noexcept;
/**
* Verify this Locator's validity and signature
*
* @param id Identity corresponding to hash
* @return True if valid and signature checks out
*/
bool verify(const Identity &id) const noexcept;
/**
* Verify this Locator's validity and signature
*
* @param id Identity corresponding to hash
* @return True if valid and signature checks out
*/
bool verify(const Identity& id) const noexcept;
/**
* Convert this locator to a string
*
* @param s String buffer
* @return Pointer to buffer
*/
char *toString(char s[ZT_LOCATOR_STRING_SIZE_MAX]) const noexcept;
/**
* Convert this locator to a string
*
* @param s String buffer
* @return Pointer to buffer
*/
char* toString(char s[ZT_LOCATOR_STRING_SIZE_MAX]) const noexcept;
ZT_INLINE String toString() const
{
char tmp[ZT_LOCATOR_STRING_SIZE_MAX];
return String(toString(tmp));
}
ZT_INLINE String toString() const
{
char tmp[ZT_LOCATOR_STRING_SIZE_MAX];
return String(toString(tmp));
}
/**
* Decode a string format locator
*
* @param s Locator from toString()
* @return True if format was valid
*/
bool fromString(const char *s) noexcept;
/**
* Decode a string format locator
*
* @param s Locator from toString()
* @return True if format was valid
*/
bool fromString(const char* s) noexcept;
explicit ZT_INLINE operator bool() const noexcept
{ return m_revision > 0; }
explicit ZT_INLINE operator bool() const noexcept
{
return m_revision > 0;
}
static constexpr int marshalSizeMax() noexcept
{ return ZT_LOCATOR_MARSHAL_SIZE_MAX; }
static constexpr int marshalSizeMax() noexcept
{
return ZT_LOCATOR_MARSHAL_SIZE_MAX;
}
int marshal(uint8_t data[ZT_LOCATOR_MARSHAL_SIZE_MAX], bool excludeSignature = false) const noexcept;
int unmarshal(const uint8_t *data, int len) noexcept;
int marshal(uint8_t data[ZT_LOCATOR_MARSHAL_SIZE_MAX], bool excludeSignature = false) const noexcept;
int unmarshal(const uint8_t* data, int len) noexcept;
ZT_INLINE bool operator==(const Locator &l) const noexcept
{
const unsigned long es = (unsigned long)m_endpoints.size();
if ((m_revision == l.m_revision) && (m_signer == l.m_signer) && (es == (unsigned long)l.m_endpoints.size()) && (m_signature == l.m_signature)) {
for (unsigned long i = 0; i < es; ++i) {
if (m_endpoints[i].first != l.m_endpoints[i].first)
return false;
if (!m_endpoints[i].second) {
if (l.m_endpoints[i].second)
return false;
} else {
if ((!l.m_endpoints[i].second) || (*(m_endpoints[i].second) != *(l.m_endpoints[i].second)))
return false;
}
}
return true;
}
return false;
}
ZT_INLINE bool operator==(const Locator& l) const noexcept
{
const unsigned long es = (unsigned long)m_endpoints.size();
if ((m_revision == l.m_revision) && (m_signer == l.m_signer) && (es == (unsigned long)l.m_endpoints.size())
&& (m_signature == l.m_signature)) {
for (unsigned long i = 0; i < es; ++i) {
if (m_endpoints[i].first != l.m_endpoints[i].first)
return false;
if (! m_endpoints[i].second) {
if (l.m_endpoints[i].second)
return false;
}
else {
if ((! l.m_endpoints[i].second) || (*(m_endpoints[i].second) != *(l.m_endpoints[i].second)))
return false;
}
}
return true;
}
return false;
}
ZT_INLINE bool operator!=(const Locator &l) const noexcept
{ return !(*this == l); }
ZT_INLINE bool operator!=(const Locator& l) const noexcept
{
return ! (*this == l);
}
private:
void m_sortEndpoints() noexcept;
private:
void m_sortEndpoints() noexcept;
int64_t m_revision;
Address m_signer;
Vector <std::pair< Endpoint, SharedPtr< const EndpointAttributes > >> m_endpoints;
FCV< uint8_t, ZT_SIGNATURE_BUFFER_SIZE > m_signature;
std::atomic< int > __refCount;
int64_t m_revision;
Address m_signer;
Vector<std::pair<Endpoint, SharedPtr<const EndpointAttributes> > > m_endpoints;
FCV<uint8_t, ZT_SIGNATURE_BUFFER_SIZE> m_signature;
std::atomic<int> __refCount;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -14,258 +14,315 @@
#ifndef ZT_MAC_HPP
#define ZT_MAC_HPP
#include "Constants.hpp"
#include "Utils.hpp"
#include "Address.hpp"
#include "TriviallyCopyable.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "TriviallyCopyable.hpp"
#include "Utils.hpp"
namespace ZeroTier {
/**
* 48-byte Ethernet MAC address
*/
class MAC : public TriviallyCopyable
{
public:
ZT_INLINE MAC() noexcept: m_mac(0ULL)
{}
class MAC : public TriviallyCopyable {
public:
ZT_INLINE MAC() noexcept : m_mac(0ULL)
{
}
ZT_INLINE MAC(const uint8_t a, const uint8_t b, const uint8_t c, const uint8_t d, const uint8_t e, const uint8_t f) noexcept:
m_mac((((uint64_t)a) << 40U) | (((uint64_t)b) << 32U) | (((uint64_t)c) << 24U) | (((uint64_t)d) << 16U) | (((uint64_t)e) << 8U) | ((uint64_t)f))
{}
ZT_INLINE
MAC(const uint8_t a, const uint8_t b, const uint8_t c, const uint8_t d, const uint8_t e, const uint8_t f) noexcept
: m_mac(
(((uint64_t)a) << 40U) | (((uint64_t)b) << 32U) | (((uint64_t)c) << 24U) | (((uint64_t)d) << 16U)
| (((uint64_t)e) << 8U) | ((uint64_t)f))
{
}
explicit ZT_INLINE MAC(const uint64_t m) noexcept:
m_mac(m)
{}
explicit ZT_INLINE MAC(const uint64_t m) noexcept : m_mac(m)
{
}
explicit ZT_INLINE MAC(const uint8_t b[6]) noexcept
{ setTo(b); }
explicit ZT_INLINE MAC(const uint8_t b[6]) noexcept
{
setTo(b);
}
ZT_INLINE MAC(const Address &ztaddr, const uint64_t nwid) noexcept
{ fromAddress(ztaddr, nwid); }
ZT_INLINE MAC(const Address& ztaddr, const uint64_t nwid) noexcept
{
fromAddress(ztaddr, nwid);
}
/**
* @return MAC in 64-bit integer
*/
ZT_INLINE uint64_t toInt() const noexcept
{ return m_mac; }
/**
* @return MAC in 64-bit integer
*/
ZT_INLINE uint64_t toInt() const noexcept
{
return m_mac;
}
/**
* Set MAC to zero
*/
ZT_INLINE void zero() noexcept
{ m_mac = 0ULL; }
/**
* Set MAC to zero
*/
ZT_INLINE void zero() noexcept
{
m_mac = 0ULL;
}
/**
* @param bits Raw MAC in big-endian byte order
* @param len Length, must be >= 6 or result is zero
*/
ZT_INLINE void setTo(const uint8_t b[6]) noexcept
{ m_mac = ((uint64_t)b[0] << 40U) | ((uint64_t)b[1] << 32U) | ((uint64_t)b[2] << 24U) | ((uint64_t)b[3] << 16U) | ((uint64_t)b[4] << 8U) | (uint64_t)b[5]; }
/**
* @param bits Raw MAC in big-endian byte order
* @param len Length, must be >= 6 or result is zero
*/
ZT_INLINE void setTo(const uint8_t b[6]) noexcept
{
m_mac = ((uint64_t)b[0] << 40U) | ((uint64_t)b[1] << 32U) | ((uint64_t)b[2] << 24U) | ((uint64_t)b[3] << 16U)
| ((uint64_t)b[4] << 8U) | (uint64_t)b[5];
}
/**
* @param buf Destination buffer for MAC in big-endian byte order
* @param len Length of buffer, must be >= 6 or nothing is copied
*/
ZT_INLINE void copyTo(uint8_t b[6]) const noexcept
{
b[0] = (uint8_t)(m_mac >> 40U);
b[1] = (uint8_t)(m_mac >> 32U);
b[2] = (uint8_t)(m_mac >> 24U);
b[3] = (uint8_t)(m_mac >> 16U);
b[4] = (uint8_t)(m_mac >> 8U);
b[5] = (uint8_t)m_mac;
}
/**
* @param buf Destination buffer for MAC in big-endian byte order
* @param len Length of buffer, must be >= 6 or nothing is copied
*/
ZT_INLINE void copyTo(uint8_t b[6]) const noexcept
{
b[0] = (uint8_t)(m_mac >> 40U);
b[1] = (uint8_t)(m_mac >> 32U);
b[2] = (uint8_t)(m_mac >> 24U);
b[3] = (uint8_t)(m_mac >> 16U);
b[4] = (uint8_t)(m_mac >> 8U);
b[5] = (uint8_t)m_mac;
}
/**
* @return True if this is broadcast (all 0xff)
*/
ZT_INLINE bool isBroadcast() const noexcept
{ return m_mac; }
/**
* @return True if this is broadcast (all 0xff)
*/
ZT_INLINE bool isBroadcast() const noexcept
{
return m_mac;
}
/**
* @return True if this is a multicast MAC
*/
ZT_INLINE bool isMulticast() const noexcept
{ return ((m_mac & 0x010000000000ULL) != 0ULL); }
/**
* @return True if this is a multicast MAC
*/
ZT_INLINE bool isMulticast() const noexcept
{
return ((m_mac & 0x010000000000ULL) != 0ULL);
}
/**
* Set this MAC to a MAC derived from an address and a network ID
*
* @param ztaddr ZeroTier address
* @param nwid 64-bit network ID
*/
ZT_INLINE void fromAddress(const Address &ztaddr, uint64_t nwid) noexcept
{
uint64_t m = ((uint64_t)firstOctetForNetwork(nwid)) << 40U;
m |= ztaddr.toInt(); // a is 40 bits
m ^= ((nwid >> 8U) & 0xffU) << 32U;
m ^= ((nwid >> 16U) & 0xffU) << 24U;
m ^= ((nwid >> 24U) & 0xffU) << 16U;
m ^= ((nwid >> 32U) & 0xffU) << 8U;
m ^= (nwid >> 40U) & 0xffU;
m_mac = m;
}
/**
* Set this MAC to a MAC derived from an address and a network ID
*
* @param ztaddr ZeroTier address
* @param nwid 64-bit network ID
*/
ZT_INLINE void fromAddress(const Address& ztaddr, uint64_t nwid) noexcept
{
uint64_t m = ((uint64_t)firstOctetForNetwork(nwid)) << 40U;
m |= ztaddr.toInt(); // a is 40 bits
m ^= ((nwid >> 8U) & 0xffU) << 32U;
m ^= ((nwid >> 16U) & 0xffU) << 24U;
m ^= ((nwid >> 24U) & 0xffU) << 16U;
m ^= ((nwid >> 32U) & 0xffU) << 8U;
m ^= (nwid >> 40U) & 0xffU;
m_mac = m;
}
/**
* Get the ZeroTier address for this MAC on this network (assuming no bridging of course, basic unicast)
*
* This just XORs the next-lest-significant 5 bytes of the network ID again to unmask.
*
* @param nwid Network ID
*/
ZT_INLINE Address toAddress(uint64_t nwid) const noexcept
{
uint64_t a = m_mac & 0xffffffffffULL; // least significant 40 bits of MAC are formed from address
a ^= ((nwid >> 8U) & 0xffU) << 32U; // ... XORed with bits 8-48 of the nwid in little-endian byte order, so unmask it
a ^= ((nwid >> 16U) & 0xffU) << 24U;
a ^= ((nwid >> 24U) & 0xffU) << 16U;
a ^= ((nwid >> 32U) & 0xffU) << 8U;
a ^= (nwid >> 40U) & 0xffU;
return Address(a);
}
/**
* Get the ZeroTier address for this MAC on this network (assuming no bridging of course, basic unicast)
*
* This just XORs the next-lest-significant 5 bytes of the network ID again to unmask.
*
* @param nwid Network ID
*/
ZT_INLINE Address toAddress(uint64_t nwid) const noexcept
{
uint64_t a = m_mac & 0xffffffffffULL; // least significant 40 bits of MAC are formed from address
a ^= ((nwid >> 8U) & 0xffU)
<< 32U; // ... XORed with bits 8-48 of the nwid in little-endian byte order, so unmask it
a ^= ((nwid >> 16U) & 0xffU) << 24U;
a ^= ((nwid >> 24U) & 0xffU) << 16U;
a ^= ((nwid >> 32U) & 0xffU) << 8U;
a ^= (nwid >> 40U) & 0xffU;
return Address(a);
}
/**
* @param nwid Network ID
* @return First octet of MAC for this network
*/
static ZT_INLINE unsigned char firstOctetForNetwork(uint64_t nwid) noexcept
{
const uint8_t a = ((uint8_t)(nwid & 0xfeU) | 0x02U); // locally administered, not multicast, from LSB of network ID
return ((a == 0x52) ? 0x32 : a); // blacklist 0x52 since it's used by KVM, libvirt, and other popular virtualization engines... seems de-facto standard on Linux
}
/**
* @param nwid Network ID
* @return First octet of MAC for this network
*/
static ZT_INLINE unsigned char firstOctetForNetwork(uint64_t nwid) noexcept
{
const uint8_t a =
((uint8_t)(nwid & 0xfeU) | 0x02U); // locally administered, not multicast, from LSB of network ID
return ((a == 0x52) ? 0x32 : a); // blacklist 0x52 since it's used by KVM, libvirt, and other popular
// virtualization engines... seems de-facto standard on Linux
}
/**
* @param i Value from 0 to 5 (inclusive)
* @return Byte at said position (address interpreted in big-endian order)
*/
ZT_INLINE uint8_t operator[](unsigned int i) const noexcept
{ return (uint8_t)(m_mac >> (unsigned int)(40 - (i * 8))); }
/**
* @param i Value from 0 to 5 (inclusive)
* @return Byte at said position (address interpreted in big-endian order)
*/
ZT_INLINE uint8_t operator[](unsigned int i) const noexcept
{
return (uint8_t)(m_mac >> (unsigned int)(40 - (i * 8)));
}
/**
* @return 6, which is the number of bytes in a MAC, for container compliance
*/
ZT_INLINE unsigned int size() const noexcept
{ return 6; }
/**
* @return 6, which is the number of bytes in a MAC, for container compliance
*/
ZT_INLINE unsigned int size() const noexcept
{
return 6;
}
ZT_INLINE unsigned long hashCode() const noexcept
{ return (unsigned long)Utils::hash64(m_mac); }
ZT_INLINE unsigned long hashCode() const noexcept
{
return (unsigned long)Utils::hash64(m_mac);
}
ZT_INLINE operator bool() const noexcept
{ return (m_mac != 0ULL); }
ZT_INLINE operator bool() const noexcept
{
return (m_mac != 0ULL);
}
ZT_INLINE operator uint64_t() const noexcept
{ return m_mac; }
ZT_INLINE operator uint64_t() const noexcept
{
return m_mac;
}
/**
* Convert this MAC to a standard format colon-separated hex string
*
* @param buf Buffer to store string
* @return Pointer to buf
*/
ZT_INLINE char *toString(char buf[18]) const noexcept
{
buf[0] = Utils::HEXCHARS[(m_mac >> 44U) & 0xfU];
buf[1] = Utils::HEXCHARS[(m_mac >> 40U) & 0xfU];
buf[2] = ':';
buf[3] = Utils::HEXCHARS[(m_mac >> 36U) & 0xfU];
buf[4] = Utils::HEXCHARS[(m_mac >> 32U) & 0xfU];
buf[5] = ':';
buf[6] = Utils::HEXCHARS[(m_mac >> 28U) & 0xfU];
buf[7] = Utils::HEXCHARS[(m_mac >> 24U) & 0xfU];
buf[8] = ':';
buf[9] = Utils::HEXCHARS[(m_mac >> 20U) & 0xfU];
buf[10] = Utils::HEXCHARS[(m_mac >> 16U) & 0xfU];
buf[11] = ':';
buf[12] = Utils::HEXCHARS[(m_mac >> 12U) & 0xfU];
buf[13] = Utils::HEXCHARS[(m_mac >> 8U) & 0xfU];
buf[14] = ':';
buf[15] = Utils::HEXCHARS[(m_mac >> 4U) & 0xfU];
buf[16] = Utils::HEXCHARS[m_mac & 0xfU];
buf[17] = (char)0;
return buf;
}
/**
* Convert this MAC to a standard format colon-separated hex string
*
* @param buf Buffer to store string
* @return Pointer to buf
*/
ZT_INLINE char* toString(char buf[18]) const noexcept
{
buf[0] = Utils::HEXCHARS[(m_mac >> 44U) & 0xfU];
buf[1] = Utils::HEXCHARS[(m_mac >> 40U) & 0xfU];
buf[2] = ':';
buf[3] = Utils::HEXCHARS[(m_mac >> 36U) & 0xfU];
buf[4] = Utils::HEXCHARS[(m_mac >> 32U) & 0xfU];
buf[5] = ':';
buf[6] = Utils::HEXCHARS[(m_mac >> 28U) & 0xfU];
buf[7] = Utils::HEXCHARS[(m_mac >> 24U) & 0xfU];
buf[8] = ':';
buf[9] = Utils::HEXCHARS[(m_mac >> 20U) & 0xfU];
buf[10] = Utils::HEXCHARS[(m_mac >> 16U) & 0xfU];
buf[11] = ':';
buf[12] = Utils::HEXCHARS[(m_mac >> 12U) & 0xfU];
buf[13] = Utils::HEXCHARS[(m_mac >> 8U) & 0xfU];
buf[14] = ':';
buf[15] = Utils::HEXCHARS[(m_mac >> 4U) & 0xfU];
buf[16] = Utils::HEXCHARS[m_mac & 0xfU];
buf[17] = (char)0;
return buf;
}
ZT_INLINE String toString() const
{
char tmp[18];
return String(toString(tmp));
}
ZT_INLINE String toString() const
{
char tmp[18];
return String(toString(tmp));
}
/**
* Parse a MAC address in hex format with or without : separators and ignoring non-hex characters.
*
* @param s String to parse
*/
ZT_INLINE void fromString(const char *s) noexcept
{
m_mac = 0;
if (s) {
while (*s) {
uint64_t c;
const char hc = *s++;
if ((hc >= 48) && (hc <= 57))
c = (uint64_t)hc - 48;
else if ((hc >= 97) && (hc <= 102))
c = (uint64_t)hc - 87;
else if ((hc >= 65) && (hc <= 70))
c = (uint64_t)hc - 55;
else continue;
m_mac = (m_mac << 4U) | c;
}
m_mac &= 0xffffffffffffULL;
}
}
/**
* Parse a MAC address in hex format with or without : separators and ignoring non-hex characters.
*
* @param s String to parse
*/
ZT_INLINE void fromString(const char* s) noexcept
{
m_mac = 0;
if (s) {
while (*s) {
uint64_t c;
const char hc = *s++;
if ((hc >= 48) && (hc <= 57))
c = (uint64_t)hc - 48;
else if ((hc >= 97) && (hc <= 102))
c = (uint64_t)hc - 87;
else if ((hc >= 65) && (hc <= 70))
c = (uint64_t)hc - 55;
else
continue;
m_mac = (m_mac << 4U) | c;
}
m_mac &= 0xffffffffffffULL;
}
}
ZT_INLINE MAC &operator=(const uint64_t m) noexcept
{
m_mac = m;
return *this;
}
ZT_INLINE MAC& operator=(const uint64_t m) noexcept
{
m_mac = m;
return *this;
}
ZT_INLINE bool operator==(const MAC &m) const noexcept
{ return (m_mac == m.m_mac); }
ZT_INLINE bool operator==(const MAC& m) const noexcept
{
return (m_mac == m.m_mac);
}
ZT_INLINE bool operator!=(const MAC &m) const noexcept
{ return (m_mac != m.m_mac); }
ZT_INLINE bool operator!=(const MAC& m) const noexcept
{
return (m_mac != m.m_mac);
}
ZT_INLINE bool operator<(const MAC &m) const noexcept
{ return (m_mac < m.m_mac); }
ZT_INLINE bool operator<(const MAC& m) const noexcept
{
return (m_mac < m.m_mac);
}
ZT_INLINE bool operator<=(const MAC &m) const noexcept
{ return (m_mac <= m.m_mac); }
ZT_INLINE bool operator<=(const MAC& m) const noexcept
{
return (m_mac <= m.m_mac);
}
ZT_INLINE bool operator>(const MAC &m) const noexcept
{ return (m_mac > m.m_mac); }
ZT_INLINE bool operator>(const MAC& m) const noexcept
{
return (m_mac > m.m_mac);
}
ZT_INLINE bool operator>=(const MAC &m) const noexcept
{ return (m_mac >= m.m_mac); }
ZT_INLINE bool operator>=(const MAC& m) const noexcept
{
return (m_mac >= m.m_mac);
}
ZT_INLINE bool operator==(const uint64_t m) const noexcept
{ return (m_mac == m); }
ZT_INLINE bool operator==(const uint64_t m) const noexcept
{
return (m_mac == m);
}
ZT_INLINE bool operator!=(const uint64_t m) const noexcept
{ return (m_mac != m); }
ZT_INLINE bool operator!=(const uint64_t m) const noexcept
{
return (m_mac != m);
}
ZT_INLINE bool operator<(const uint64_t m) const noexcept
{ return (m_mac < m); }
ZT_INLINE bool operator<(const uint64_t m) const noexcept
{
return (m_mac < m);
}
ZT_INLINE bool operator<=(const uint64_t m) const noexcept
{ return (m_mac <= m); }
ZT_INLINE bool operator<=(const uint64_t m) const noexcept
{
return (m_mac <= m);
}
ZT_INLINE bool operator>(const uint64_t m) const noexcept
{ return (m_mac > m); }
ZT_INLINE bool operator>(const uint64_t m) const noexcept
{
return (m_mac > m);
}
ZT_INLINE bool operator>=(const uint64_t m) const noexcept
{ return (m_mac >= m); }
ZT_INLINE bool operator>=(const uint64_t m) const noexcept
{
return (m_mac >= m);
}
private:
uint64_t m_mac;
private:
uint64_t m_mac;
};
static_assert(sizeof(MAC) == sizeof(uint64_t), "MAC contains unnecessary padding");
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -11,37 +11,131 @@
*/
/****/
#include "Constants.hpp"
#include "AES.hpp"
#include "MIMC52.hpp"
#include "AES.hpp"
#include "Constants.hpp"
namespace {
// Largest 1024 primes of form 6k + 5 and less than 2^52. Only the least significant 32
// bits need to be here, as the most significant bits are all 1.
const uint32_t ZT_MIMC52_PRIMES[1024] = {4294895267, 4294895477, 4294895513, 4294895519, 4294895543, 4294895567, 4294895657, 4294895711, 4294895777, 4294895861, 4294895909, 4294895921, 4294895969, 4294896011, 4294896149, 4294896227, 4294896401, 4294896473, 4294896527, 4294896563, 4294896653, 4294896731, 4294896863, 4294896899, 4294896983, 4294897037, 4294897103, 4294897331, 4294897349, 4294897451, 4294897571, 4294897661, 4294897703, 4294897757, 4294897793, 4294897811, 4294897817, 4294897829, 4294897877, 4294897919, 4294897991, 4294898027, 4294898129, 4294898153, 4294898231, 4294898273,
4294898279, 4294898291, 4294898363, 4294898369, 4294898417, 4294898423, 4294898453, 4294898489, 4294898573, 4294898579, 4294898639, 4294898693, 4294898747, 4294898759, 4294898867, 4294898879, 4294898909, 4294898921, 4294898933, 4294899011, 4294899041, 4294899047, 4294899203, 4294899221, 4294899227, 4294899287, 4294899341, 4294899431, 4294899509, 4294899533, 4294899539, 4294899551, 4294899629, 4294899791, 4294899809, 4294899971, 4294900001, 4294900007, 4294900013, 4294900307, 4294900331, 4294900427, 4294900469, 4294900481, 4294900541, 4294900583,
4294900781, 4294900853, 4294900931, 4294900991, 4294901033, 4294901087, 4294901159, 4294901267, 4294901393, 4294901411, 4294901489, 4294901657, 4294902011, 4294902071, 4294902101, 4294902107, 4294902353, 4294902377, 4294902599, 4294902647, 4294902743, 4294902869, 4294902977, 4294903067, 4294903103, 4294903259, 4294903289, 4294903397, 4294903421, 4294903493, 4294903577, 4294903631, 4294903637, 4294903733, 4294903799, 4294903823, 4294904003, 4294904033, 4294904081, 4294904129, 4294904279, 4294904297, 4294904303, 4294904333, 4294904351, 4294904381,
4294904453, 4294904519, 4294904561, 4294904639, 4294904657, 4294904747, 4294904807, 4294904843, 4294905089, 4294905149, 4294905293, 4294905299, 4294905311, 4294905443, 4294905479, 4294905539, 4294905623, 4294905641, 4294905671, 4294905707, 4294905887, 4294905977, 4294906091, 4294906103, 4294906139, 4294906157, 4294906223, 4294906259, 4294906487, 4294906493, 4294906523, 4294906547, 4294906553, 4294906571, 4294906577, 4294906589, 4294906703, 4294906733, 4294906763, 4294906841, 4294906859, 4294906937, 4294907057, 4294907063, 4294907141, 4294907231,
4294907249, 4294907261, 4294907267, 4294907387, 4294907417, 4294907567, 4294907603, 4294907699, 4294907789, 4294907849, 4294907873, 4294907879, 4294908023, 4294908071, 4294908119, 4294908209, 4294908227, 4294908329, 4294908491, 4294908503, 4294908569, 4294908653, 4294908713, 4294908719, 4294908791, 4294908839, 4294908869, 4294908989, 4294909031, 4294909067, 4294909109, 4294909253, 4294909529, 4294909589, 4294909643, 4294909739, 4294909799, 4294909811, 4294909853, 4294910003, 4294910039, 4294910189, 4294910201, 4294910219, 4294910273, 4294910333,
4294910369, 4294910393, 4294910471, 4294910549, 4294910651, 4294910669, 4294910681, 4294910711, 4294910753, 4294910801, 4294910981, 4294911053, 4294911143, 4294911227, 4294911239, 4294911359, 4294911383, 4294911407, 4294911521, 4294911551, 4294911611, 4294911641, 4294911689, 4294911719, 4294911869, 4294912109, 4294912133, 4294912151, 4294912187, 4294912223, 4294912331, 4294912439, 4294912607, 4294912703, 4294912859, 4294912871, 4294912907, 4294912961, 4294913003, 4294913111, 4294913309, 4294913333, 4294913357, 4294913399, 4294913411, 4294913459,
4294913501, 4294913531, 4294913591, 4294913609, 4294913663, 4294913783, 4294913819, 4294913903, 4294914137, 4294914413, 4294914473, 4294914497, 4294914527, 4294914551, 4294914593, 4294914611, 4294914659, 4294914671, 4294914743, 4294914863, 4294914917, 4294915061, 4294915103, 4294915139, 4294915217, 4294915223, 4294915253, 4294915283, 4294915373, 4294915433, 4294915607, 4294916069, 4294916213, 4294916267, 4294916303, 4294916393, 4294916441, 4294916477, 4294916507, 4294916573, 4294916633, 4294916687, 4294916783, 4294916837, 4294916897, 4294916921,
4294917029, 4294917047, 4294917101, 4294917203, 4294917287, 4294917299, 4294917389, 4294917437, 4294917527, 4294917557, 4294917611, 4294917617, 4294917689, 4294917821, 4294917857, 4294917917, 4294917941, 4294918169, 4294918187, 4294918307, 4294918409, 4294918433, 4294918481, 4294918703, 4294918709, 4294918733, 4294918799, 4294918871, 4294919009, 4294919249, 4294919279, 4294919291, 4294919363, 4294919381, 4294919441, 4294919447, 4294919549, 4294919579, 4294919633, 4294919657, 4294919669, 4294919693, 4294919711, 4294920029, 4294920059, 4294920089,
4294920197, 4294920239, 4294920257, 4294920263, 4294920269, 4294920341, 4294920353, 4294920407, 4294920503, 4294920599, 4294920647, 4294920743, 4294920803, 4294920809, 4294920881, 4294920899, 4294920983, 4294921043, 4294921139, 4294921151, 4294921181, 4294921229, 4294921289, 4294921331, 4294921343, 4294921391, 4294921469, 4294921709, 4294921721, 4294921823, 4294921847, 4294921889, 4294922057, 4294922171, 4294922201, 4294922237, 4294922309, 4294922399, 4294922447, 4294922507, 4294922513, 4294922549, 4294922609, 4294922663, 4294922861, 4294922933,
4294923101, 4294923191, 4294923209, 4294923221, 4294923251, 4294923263, 4294923359, 4294923371, 4294923377, 4294923461, 4294923521, 4294923953, 4294924001, 4294924091, 4294924121, 4294924319, 4294924397, 4294924571, 4294924583, 4294924751, 4294924817, 4294924823, 4294924847, 4294924877, 4294925003, 4294925027, 4294925117, 4294925237, 4294925243, 4294925297, 4294925369, 4294925627, 4294925639, 4294925729, 4294925747, 4294925873, 4294925891, 4294925933, 4294926047, 4294926059, 4294926209, 4294926221, 4294926233, 4294926257, 4294926329, 4294926371,
4294926401, 4294926413, 4294926437, 4294926563, 4294926569, 4294926917, 4294926923, 4294926947, 4294926971, 4294927067, 4294927073, 4294927151, 4294927349, 4294927367, 4294927403, 4294927481, 4294927523, 4294927553, 4294927589, 4294927649, 4294927673, 4294927727, 4294927739, 4294927763, 4294927889, 4294928183, 4294928207, 4294928249, 4294928327, 4294928351, 4294928399, 4294928483, 4294928489, 4294928543, 4294928597, 4294928951, 4294928963, 4294928981, 4294929017, 4294929059, 4294929161, 4294929197, 4294929233, 4294929269, 4294929311, 4294929323,
4294929341, 4294929383, 4294929401, 4294929497, 4294929509, 4294929581, 4294929707, 4294929743, 4294930043, 4294930121, 4294930193, 4294930223, 4294930349, 4294930403, 4294930571, 4294930613, 4294930721, 4294930751, 4294930877, 4294930931, 4294930961, 4294930967, 4294930973, 4294931021, 4294931051, 4294931057, 4294931063, 4294931219, 4294931273, 4294931339, 4294931423, 4294931441, 4294931453, 4294931567, 4294931639, 4294931717, 4294931897, 4294931969, 4294932023, 4294932053, 4294932239, 4294932299, 4294932443, 4294932671, 4294932677, 4294932731,
4294932743, 4294932767, 4294932773, 4294932779, 4294932881, 4294932899, 4294932929, 4294933067, 4294933277, 4294933307, 4294933343, 4294933451, 4294933523, 4294933763, 4294933793, 4294933829, 4294933847, 4294933871, 4294933997, 4294934033, 4294934111, 4294934207, 4294934243, 4294934267, 4294934279, 4294934291, 4294934327, 4294934363, 4294934423, 4294934489, 4294934561, 4294934867, 4294934921, 4294934969, 4294935137, 4294935239, 4294935299, 4294935431, 4294935539, 4294935629, 4294935701, 4294935791, 4294935797, 4294935803, 4294935959, 4294936001,
4294936007, 4294936037, 4294936079, 4294936127, 4294936163, 4294936247, 4294936307, 4294936331, 4294936409, 4294936451, 4294936601, 4294936607, 4294936619, 4294936667, 4294936709, 4294936733, 4294936751, 4294936763, 4294936829, 4294936937, 4294936997, 4294937027, 4294937051, 4294937093, 4294937177, 4294937213, 4294937291, 4294937381, 4294937417, 4294937429, 4294937681, 4294937693, 4294937753, 4294937771, 4294937813, 4294937837, 4294937891, 4294937969, 4294938071, 4294938101, 4294938323, 4294938371, 4294938401, 4294938467, 4294938473, 4294938521,
4294938599, 4294938731, 4294938779, 4294938833, 4294938899, 4294938977, 4294938983, 4294939067, 4294939127, 4294939223, 4294939277, 4294939331, 4294939337, 4294939391, 4294939457, 4294939559, 4294939673, 4294939691, 4294939901, 4294939991, 4294940087, 4294940093, 4294940189, 4294940213, 4294940417, 4294940657, 4294940699, 4294940753, 4294940801, 4294940873, 4294940951, 4294941047, 4294941143, 4294941161, 4294941227, 4294941281, 4294941377, 4294941509, 4294941551, 4294941701, 4294941731, 4294941767, 4294941911, 4294941923, 4294942043, 4294942139,
4294942313, 4294942343, 4294942373, 4294942427, 4294942529, 4294942601, 4294942649, 4294942673, 4294942679, 4294942733, 4294942769, 4294942811, 4294942961, 4294943129, 4294943141, 4294943219, 4294943369, 4294943423, 4294943471, 4294943651, 4294943687, 4294943717, 4294943729, 4294943747, 4294943759, 4294943813, 4294943819, 4294943891, 4294944077, 4294944191, 4294944233, 4294944239, 4294944353, 4294944389, 4294944581, 4294944623, 4294944629, 4294944659, 4294944821, 4294945031, 4294945157, 4294945211, 4294945229, 4294945301, 4294945337, 4294945343,
4294945511, 4294945547, 4294945667, 4294945709, 4294945757, 4294945841, 4294945991, 4294946033, 4294946099, 4294946153, 4294946477, 4294946687, 4294946747, 4294946957, 4294946993, 4294947023, 4294947131, 4294947167, 4294947287, 4294947311, 4294947413, 4294947581, 4294947599, 4294947671, 4294947851, 4294947959, 4294948067, 4294948073, 4294948193, 4294948259, 4294948421, 4294948451, 4294948613, 4294948673, 4294948883, 4294949027, 4294949057, 4294949069, 4294949519, 4294949531, 4294949603, 4294949609, 4294949627, 4294949693, 4294949729, 4294949741,
4294949807, 4294949921, 4294949939, 4294949981, 4294949993, 4294950083, 4294950173, 4294950197, 4294950251, 4294950287, 4294950317, 4294950323, 4294950329, 4294950581, 4294950593, 4294950617, 4294950629, 4294950713, 4294950929, 4294951151, 4294951163, 4294951169, 4294951379, 4294951583, 4294951613, 4294951853, 4294951907, 4294951913, 4294951937, 4294951961, 4294952063, 4294952183, 4294952393, 4294952543, 4294952549, 4294952597, 4294952627, 4294952687, 4294952723, 4294952729, 4294952789, 4294952819, 4294952873, 4294952891, 4294952903, 4294952969,
4294952999, 4294953023, 4294953107, 4294953173, 4294953281, 4294953341, 4294953431, 4294953599, 4294953689, 4294953719, 4294953827, 4294953887, 4294953977, 4294954073, 4294954079, 4294954157, 4294954217, 4294954283, 4294954607, 4294954667, 4294954859, 4294954901, 4294954973, 4294955081, 4294955237, 4294955273, 4294955327, 4294955441, 4294955507, 4294955591, 4294955789, 4294955831, 4294955837, 4294955927, 4294955963, 4294955969, 4294955987, 4294956041, 4294956047, 4294956197, 4294956323, 4294956359, 4294956551, 4294956593, 4294956623, 4294956629,
4294956641, 4294956719, 4294956761, 4294956767, 4294956797, 4294956821, 4294956833, 4294957037, 4294957079, 4294957103, 4294957181, 4294957349, 4294957379, 4294957433, 4294957463, 4294957511, 4294957577, 4294957727, 4294957859, 4294957877, 4294958039, 4294958153, 4294958309, 4294958417, 4294958441, 4294958693, 4294958717, 4294958753, 4294958903, 4294958909, 4294959017, 4294959071, 4294959107, 4294959161, 4294959257, 4294959299, 4294959329, 4294959431, 4294959593, 4294959599, 4294959659, 4294959893, 4294959917, 4294959983, 4294960001, 4294960031,
4294960061, 4294960079, 4294960097, 4294960271, 4294960283, 4294960349, 4294960367, 4294960421, 4294960529, 4294960541, 4294960583, 4294960613, 4294960673, 4294960691, 4294960697, 4294960787, 4294960919, 4294961003, 4294961039, 4294961153, 4294961159, 4294961171, 4294961321, 4294961411, 4294961471, 4294961507, 4294961537, 4294961669, 4294961717, 4294961741, 4294961873, 4294962059, 4294962137, 4294962167, 4294962263, 4294962281, 4294962311, 4294962341, 4294962413, 4294962521, 4294962563, 4294962761, 4294962893, 4294963103, 4294963163, 4294963223,
4294963313, 4294963349, 4294963427, 4294963547, 4294963559, 4294963721, 4294963799, 4294963817, 4294963901, 4294963919, 4294964021, 4294964279, 4294964297, 4294964363, 4294964387, 4294964411, 4294964567, 4294964603, 4294964687, 4294964777, 4294965041, 4294965071, 4294965119, 4294965221, 4294965251, 4294965287, 4294965413, 4294965569, 4294965647, 4294965671, 4294965689, 4294965779, 4294965839, 4294965893, 4294966091, 4294966109, 4294966127, 4294966157, 4294966187, 4294966199, 4294966211, 4294966403, 4294966457, 4294966499, 4294966541, 4294966637,
4294966661, 4294966739, 4294966823, 4294966883, 4294966901, 4294966961, 4294967027, 4294967087, 4294967099, 4294967123, 4294967153, 4294967249};
const uint32_t ZT_MIMC52_PRIMES[1024] = {
4294895267, 4294895477, 4294895513, 4294895519, 4294895543, 4294895567, 4294895657, 4294895711, 4294895777,
4294895861, 4294895909, 4294895921, 4294895969, 4294896011, 4294896149, 4294896227, 4294896401, 4294896473,
4294896527, 4294896563, 4294896653, 4294896731, 4294896863, 4294896899, 4294896983, 4294897037, 4294897103,
4294897331, 4294897349, 4294897451, 4294897571, 4294897661, 4294897703, 4294897757, 4294897793, 4294897811,
4294897817, 4294897829, 4294897877, 4294897919, 4294897991, 4294898027, 4294898129, 4294898153, 4294898231,
4294898273, 4294898279, 4294898291, 4294898363, 4294898369, 4294898417, 4294898423, 4294898453, 4294898489,
4294898573, 4294898579, 4294898639, 4294898693, 4294898747, 4294898759, 4294898867, 4294898879, 4294898909,
4294898921, 4294898933, 4294899011, 4294899041, 4294899047, 4294899203, 4294899221, 4294899227, 4294899287,
4294899341, 4294899431, 4294899509, 4294899533, 4294899539, 4294899551, 4294899629, 4294899791, 4294899809,
4294899971, 4294900001, 4294900007, 4294900013, 4294900307, 4294900331, 4294900427, 4294900469, 4294900481,
4294900541, 4294900583, 4294900781, 4294900853, 4294900931, 4294900991, 4294901033, 4294901087, 4294901159,
4294901267, 4294901393, 4294901411, 4294901489, 4294901657, 4294902011, 4294902071, 4294902101, 4294902107,
4294902353, 4294902377, 4294902599, 4294902647, 4294902743, 4294902869, 4294902977, 4294903067, 4294903103,
4294903259, 4294903289, 4294903397, 4294903421, 4294903493, 4294903577, 4294903631, 4294903637, 4294903733,
4294903799, 4294903823, 4294904003, 4294904033, 4294904081, 4294904129, 4294904279, 4294904297, 4294904303,
4294904333, 4294904351, 4294904381, 4294904453, 4294904519, 4294904561, 4294904639, 4294904657, 4294904747,
4294904807, 4294904843, 4294905089, 4294905149, 4294905293, 4294905299, 4294905311, 4294905443, 4294905479,
4294905539, 4294905623, 4294905641, 4294905671, 4294905707, 4294905887, 4294905977, 4294906091, 4294906103,
4294906139, 4294906157, 4294906223, 4294906259, 4294906487, 4294906493, 4294906523, 4294906547, 4294906553,
4294906571, 4294906577, 4294906589, 4294906703, 4294906733, 4294906763, 4294906841, 4294906859, 4294906937,
4294907057, 4294907063, 4294907141, 4294907231, 4294907249, 4294907261, 4294907267, 4294907387, 4294907417,
4294907567, 4294907603, 4294907699, 4294907789, 4294907849, 4294907873, 4294907879, 4294908023, 4294908071,
4294908119, 4294908209, 4294908227, 4294908329, 4294908491, 4294908503, 4294908569, 4294908653, 4294908713,
4294908719, 4294908791, 4294908839, 4294908869, 4294908989, 4294909031, 4294909067, 4294909109, 4294909253,
4294909529, 4294909589, 4294909643, 4294909739, 4294909799, 4294909811, 4294909853, 4294910003, 4294910039,
4294910189, 4294910201, 4294910219, 4294910273, 4294910333, 4294910369, 4294910393, 4294910471, 4294910549,
4294910651, 4294910669, 4294910681, 4294910711, 4294910753, 4294910801, 4294910981, 4294911053, 4294911143,
4294911227, 4294911239, 4294911359, 4294911383, 4294911407, 4294911521, 4294911551, 4294911611, 4294911641,
4294911689, 4294911719, 4294911869, 4294912109, 4294912133, 4294912151, 4294912187, 4294912223, 4294912331,
4294912439, 4294912607, 4294912703, 4294912859, 4294912871, 4294912907, 4294912961, 4294913003, 4294913111,
4294913309, 4294913333, 4294913357, 4294913399, 4294913411, 4294913459, 4294913501, 4294913531, 4294913591,
4294913609, 4294913663, 4294913783, 4294913819, 4294913903, 4294914137, 4294914413, 4294914473, 4294914497,
4294914527, 4294914551, 4294914593, 4294914611, 4294914659, 4294914671, 4294914743, 4294914863, 4294914917,
4294915061, 4294915103, 4294915139, 4294915217, 4294915223, 4294915253, 4294915283, 4294915373, 4294915433,
4294915607, 4294916069, 4294916213, 4294916267, 4294916303, 4294916393, 4294916441, 4294916477, 4294916507,
4294916573, 4294916633, 4294916687, 4294916783, 4294916837, 4294916897, 4294916921, 4294917029, 4294917047,
4294917101, 4294917203, 4294917287, 4294917299, 4294917389, 4294917437, 4294917527, 4294917557, 4294917611,
4294917617, 4294917689, 4294917821, 4294917857, 4294917917, 4294917941, 4294918169, 4294918187, 4294918307,
4294918409, 4294918433, 4294918481, 4294918703, 4294918709, 4294918733, 4294918799, 4294918871, 4294919009,
4294919249, 4294919279, 4294919291, 4294919363, 4294919381, 4294919441, 4294919447, 4294919549, 4294919579,
4294919633, 4294919657, 4294919669, 4294919693, 4294919711, 4294920029, 4294920059, 4294920089, 4294920197,
4294920239, 4294920257, 4294920263, 4294920269, 4294920341, 4294920353, 4294920407, 4294920503, 4294920599,
4294920647, 4294920743, 4294920803, 4294920809, 4294920881, 4294920899, 4294920983, 4294921043, 4294921139,
4294921151, 4294921181, 4294921229, 4294921289, 4294921331, 4294921343, 4294921391, 4294921469, 4294921709,
4294921721, 4294921823, 4294921847, 4294921889, 4294922057, 4294922171, 4294922201, 4294922237, 4294922309,
4294922399, 4294922447, 4294922507, 4294922513, 4294922549, 4294922609, 4294922663, 4294922861, 4294922933,
4294923101, 4294923191, 4294923209, 4294923221, 4294923251, 4294923263, 4294923359, 4294923371, 4294923377,
4294923461, 4294923521, 4294923953, 4294924001, 4294924091, 4294924121, 4294924319, 4294924397, 4294924571,
4294924583, 4294924751, 4294924817, 4294924823, 4294924847, 4294924877, 4294925003, 4294925027, 4294925117,
4294925237, 4294925243, 4294925297, 4294925369, 4294925627, 4294925639, 4294925729, 4294925747, 4294925873,
4294925891, 4294925933, 4294926047, 4294926059, 4294926209, 4294926221, 4294926233, 4294926257, 4294926329,
4294926371, 4294926401, 4294926413, 4294926437, 4294926563, 4294926569, 4294926917, 4294926923, 4294926947,
4294926971, 4294927067, 4294927073, 4294927151, 4294927349, 4294927367, 4294927403, 4294927481, 4294927523,
4294927553, 4294927589, 4294927649, 4294927673, 4294927727, 4294927739, 4294927763, 4294927889, 4294928183,
4294928207, 4294928249, 4294928327, 4294928351, 4294928399, 4294928483, 4294928489, 4294928543, 4294928597,
4294928951, 4294928963, 4294928981, 4294929017, 4294929059, 4294929161, 4294929197, 4294929233, 4294929269,
4294929311, 4294929323, 4294929341, 4294929383, 4294929401, 4294929497, 4294929509, 4294929581, 4294929707,
4294929743, 4294930043, 4294930121, 4294930193, 4294930223, 4294930349, 4294930403, 4294930571, 4294930613,
4294930721, 4294930751, 4294930877, 4294930931, 4294930961, 4294930967, 4294930973, 4294931021, 4294931051,
4294931057, 4294931063, 4294931219, 4294931273, 4294931339, 4294931423, 4294931441, 4294931453, 4294931567,
4294931639, 4294931717, 4294931897, 4294931969, 4294932023, 4294932053, 4294932239, 4294932299, 4294932443,
4294932671, 4294932677, 4294932731, 4294932743, 4294932767, 4294932773, 4294932779, 4294932881, 4294932899,
4294932929, 4294933067, 4294933277, 4294933307, 4294933343, 4294933451, 4294933523, 4294933763, 4294933793,
4294933829, 4294933847, 4294933871, 4294933997, 4294934033, 4294934111, 4294934207, 4294934243, 4294934267,
4294934279, 4294934291, 4294934327, 4294934363, 4294934423, 4294934489, 4294934561, 4294934867, 4294934921,
4294934969, 4294935137, 4294935239, 4294935299, 4294935431, 4294935539, 4294935629, 4294935701, 4294935791,
4294935797, 4294935803, 4294935959, 4294936001, 4294936007, 4294936037, 4294936079, 4294936127, 4294936163,
4294936247, 4294936307, 4294936331, 4294936409, 4294936451, 4294936601, 4294936607, 4294936619, 4294936667,
4294936709, 4294936733, 4294936751, 4294936763, 4294936829, 4294936937, 4294936997, 4294937027, 4294937051,
4294937093, 4294937177, 4294937213, 4294937291, 4294937381, 4294937417, 4294937429, 4294937681, 4294937693,
4294937753, 4294937771, 4294937813, 4294937837, 4294937891, 4294937969, 4294938071, 4294938101, 4294938323,
4294938371, 4294938401, 4294938467, 4294938473, 4294938521, 4294938599, 4294938731, 4294938779, 4294938833,
4294938899, 4294938977, 4294938983, 4294939067, 4294939127, 4294939223, 4294939277, 4294939331, 4294939337,
4294939391, 4294939457, 4294939559, 4294939673, 4294939691, 4294939901, 4294939991, 4294940087, 4294940093,
4294940189, 4294940213, 4294940417, 4294940657, 4294940699, 4294940753, 4294940801, 4294940873, 4294940951,
4294941047, 4294941143, 4294941161, 4294941227, 4294941281, 4294941377, 4294941509, 4294941551, 4294941701,
4294941731, 4294941767, 4294941911, 4294941923, 4294942043, 4294942139, 4294942313, 4294942343, 4294942373,
4294942427, 4294942529, 4294942601, 4294942649, 4294942673, 4294942679, 4294942733, 4294942769, 4294942811,
4294942961, 4294943129, 4294943141, 4294943219, 4294943369, 4294943423, 4294943471, 4294943651, 4294943687,
4294943717, 4294943729, 4294943747, 4294943759, 4294943813, 4294943819, 4294943891, 4294944077, 4294944191,
4294944233, 4294944239, 4294944353, 4294944389, 4294944581, 4294944623, 4294944629, 4294944659, 4294944821,
4294945031, 4294945157, 4294945211, 4294945229, 4294945301, 4294945337, 4294945343, 4294945511, 4294945547,
4294945667, 4294945709, 4294945757, 4294945841, 4294945991, 4294946033, 4294946099, 4294946153, 4294946477,
4294946687, 4294946747, 4294946957, 4294946993, 4294947023, 4294947131, 4294947167, 4294947287, 4294947311,
4294947413, 4294947581, 4294947599, 4294947671, 4294947851, 4294947959, 4294948067, 4294948073, 4294948193,
4294948259, 4294948421, 4294948451, 4294948613, 4294948673, 4294948883, 4294949027, 4294949057, 4294949069,
4294949519, 4294949531, 4294949603, 4294949609, 4294949627, 4294949693, 4294949729, 4294949741, 4294949807,
4294949921, 4294949939, 4294949981, 4294949993, 4294950083, 4294950173, 4294950197, 4294950251, 4294950287,
4294950317, 4294950323, 4294950329, 4294950581, 4294950593, 4294950617, 4294950629, 4294950713, 4294950929,
4294951151, 4294951163, 4294951169, 4294951379, 4294951583, 4294951613, 4294951853, 4294951907, 4294951913,
4294951937, 4294951961, 4294952063, 4294952183, 4294952393, 4294952543, 4294952549, 4294952597, 4294952627,
4294952687, 4294952723, 4294952729, 4294952789, 4294952819, 4294952873, 4294952891, 4294952903, 4294952969,
4294952999, 4294953023, 4294953107, 4294953173, 4294953281, 4294953341, 4294953431, 4294953599, 4294953689,
4294953719, 4294953827, 4294953887, 4294953977, 4294954073, 4294954079, 4294954157, 4294954217, 4294954283,
4294954607, 4294954667, 4294954859, 4294954901, 4294954973, 4294955081, 4294955237, 4294955273, 4294955327,
4294955441, 4294955507, 4294955591, 4294955789, 4294955831, 4294955837, 4294955927, 4294955963, 4294955969,
4294955987, 4294956041, 4294956047, 4294956197, 4294956323, 4294956359, 4294956551, 4294956593, 4294956623,
4294956629, 4294956641, 4294956719, 4294956761, 4294956767, 4294956797, 4294956821, 4294956833, 4294957037,
4294957079, 4294957103, 4294957181, 4294957349, 4294957379, 4294957433, 4294957463, 4294957511, 4294957577,
4294957727, 4294957859, 4294957877, 4294958039, 4294958153, 4294958309, 4294958417, 4294958441, 4294958693,
4294958717, 4294958753, 4294958903, 4294958909, 4294959017, 4294959071, 4294959107, 4294959161, 4294959257,
4294959299, 4294959329, 4294959431, 4294959593, 4294959599, 4294959659, 4294959893, 4294959917, 4294959983,
4294960001, 4294960031, 4294960061, 4294960079, 4294960097, 4294960271, 4294960283, 4294960349, 4294960367,
4294960421, 4294960529, 4294960541, 4294960583, 4294960613, 4294960673, 4294960691, 4294960697, 4294960787,
4294960919, 4294961003, 4294961039, 4294961153, 4294961159, 4294961171, 4294961321, 4294961411, 4294961471,
4294961507, 4294961537, 4294961669, 4294961717, 4294961741, 4294961873, 4294962059, 4294962137, 4294962167,
4294962263, 4294962281, 4294962311, 4294962341, 4294962413, 4294962521, 4294962563, 4294962761, 4294962893,
4294963103, 4294963163, 4294963223, 4294963313, 4294963349, 4294963427, 4294963547, 4294963559, 4294963721,
4294963799, 4294963817, 4294963901, 4294963919, 4294964021, 4294964279, 4294964297, 4294964363, 4294964387,
4294964411, 4294964567, 4294964603, 4294964687, 4294964777, 4294965041, 4294965071, 4294965119, 4294965221,
4294965251, 4294965287, 4294965413, 4294965569, 4294965647, 4294965671, 4294965689, 4294965779, 4294965839,
4294965893, 4294966091, 4294966109, 4294966127, 4294966157, 4294966187, 4294966199, 4294966211, 4294966403,
4294966457, 4294966499, 4294966541, 4294966637, 4294966661, 4294966739, 4294966823, 4294966883, 4294966901,
4294966961, 4294967027, 4294967087, 4294967099, 4294967123, 4294967153, 4294967249
};
#ifdef ZT_NO_IEEE_DOUBLE
@ -49,14 +143,14 @@ const uint32_t ZT_MIMC52_PRIMES[1024] = {4294895267, 4294895477, 4294895513, 429
* much slower on systems that can use the FPU hack. */
static uint64_t mulmod64(uint64_t a, uint64_t b, const uint64_t m)
{
uint64_t res = 0;
while ((a)) {
if ((a << 63U))
res = (res + b) % m;
a >>= 1U;
b = (b << 1U) % m;
}
return res;
uint64_t res = 0;
while ((a)) {
if ((a << 63U))
res = (res + b) % m;
a >>= 1U;
b = (b << 1U) % m;
}
return res;
}
#define mulmod52(a, b, m, mf) mulmod64((a), (b), (m))
@ -69,10 +163,10 @@ static uint64_t mulmod64(uint64_t a, uint64_t b, const uint64_t m)
* performs fairly equally across CPUs. */
ZT_INLINE uint64_t mulmod52(uint64_t a, const uint64_t b, const uint64_t m, const double mf)
{
a = ( ( a * b ) - ( ((uint64_t)(((double)a * (double)b) / mf) - 1) * m ) );
//a -= m * (uint64_t)(a > m); // faster on some systems, but slower on newer cores
a %= m;
return a;
a = ((a * b) - (((uint64_t)(((double)a * (double)b) / mf) - 1) * m));
// a -= m * (uint64_t)(a > m); // faster on some systems, but slower on newer cores
a %= m;
return a;
}
#endif
@ -80,18 +174,19 @@ ZT_INLINE uint64_t mulmod52(uint64_t a, const uint64_t b, const uint64_t m, cons
// Compute a^e%m (mf is m in floating point form to avoid repeated conversion)
ZT_INLINE uint64_t modpow52(uint64_t a, uint64_t e, const uint64_t m, const double mf)
{
uint64_t res = 1ULL;
for (;;) {
if ((e << 63U)) {
res = mulmod52(res, a, m, mf);
}
if (likely((e >>= 1U) != 0)) {
a = mulmod52(a, a, m, mf);
} else {
break;
}
}
return res;
uint64_t res = 1ULL;
for (;;) {
if ((e << 63U)) {
res = mulmod52(res, a, m, mf);
}
if (likely((e >>= 1U) != 0)) {
a = mulmod52(a, a, m, mf);
}
else {
break;
}
}
return res;
}
static const ZeroTier::AES s_mimc52AES("abcdefghijklmnopqrstuvwxyz012345");
@ -100,60 +195,60 @@ static const ZeroTier::AES s_mimc52AES("abcdefghijklmnopqrstuvwxyz012345");
// This doesn't have to be non-reversible or secure, just strongly random.
ZT_INLINE void fillK(uint64_t k[34], const uint8_t challenge[32])
{
s_mimc52AES.encrypt(challenge, k);
s_mimc52AES.encrypt(challenge + 16, k + 2);
k[2] ^= k[0];
k[3] ^= k[1];
for (unsigned int i = 2, j = 4; j < 34; i += 2, j += 2)
s_mimc52AES.encrypt(k + i, k + j);
s_mimc52AES.encrypt(challenge, k);
s_mimc52AES.encrypt(challenge + 16, k + 2);
k[2] ^= k[0];
k[3] ^= k[1];
for (unsigned int i = 2, j = 4; j < 34; i += 2, j += 2)
s_mimc52AES.encrypt(k + i, k + j);
#if __BYTE_ORDER == __BIG_ENDIAN
for (unsigned int i = 0; i < 34; ++i)
k[i] = Utils::swapBytes(k[i]);
for (unsigned int i = 0; i < 34; ++i)
k[i] = Utils::swapBytes(k[i]);
#endif
}
} // anonymous namespace
} // anonymous namespace
namespace ZeroTier {
namespace MIMC52 {
uint64_t delay(const uint8_t challenge[32], const unsigned long rounds)
{
uint64_t k[34];
fillK(k, challenge);
uint64_t k[34];
fillK(k, challenge);
const uint64_t p = 0x000fffff00000000ULL | (uint64_t)ZT_MIMC52_PRIMES[((unsigned long)k[32]) & 1023];
const uint64_t e = ((p * 2ULL) - 1ULL) / 3ULL;
const uint64_t m52 = 0xfffffffffffffULL;
const double pf = (double)p;
const uint64_t p = 0x000fffff00000000ULL | (uint64_t)ZT_MIMC52_PRIMES[((unsigned long)k[32]) & 1023];
const uint64_t e = ((p * 2ULL) - 1ULL) / 3ULL;
const uint64_t m52 = 0xfffffffffffffULL;
const double pf = (double)p;
uint64_t x = k[33] % p;
for (unsigned long r = 0, kn = rounds; r < rounds; ++r) {
x = (x - k[--kn & 31]) & m52;
x = modpow52(x, e, p, pf);
}
uint64_t x = k[33] % p;
for (unsigned long r = 0, kn = rounds; r < rounds; ++r) {
x = (x - k[--kn & 31]) & m52;
x = modpow52(x, e, p, pf);
}
return x;
return x;
}
bool verify(const uint8_t challenge[32], const unsigned long rounds, uint64_t proof)
{
uint64_t k[34];
fillK(k, challenge);
uint64_t k[34];
fillK(k, challenge);
const uint64_t p = 0x000fffff00000000ULL | (uint64_t)ZT_MIMC52_PRIMES[((unsigned long)k[32]) & 1023];
const uint64_t m52 = 0xfffffffffffffULL;
const double pf = (double)p;
const uint64_t p = 0x000fffff00000000ULL | (uint64_t)ZT_MIMC52_PRIMES[((unsigned long)k[32]) & 1023];
const uint64_t m52 = 0xfffffffffffffULL;
const double pf = (double)p;
for (unsigned long r = 0; r < rounds; ++r) {
const uint64_t kk = k[r & 31];
proof = mulmod52(mulmod52(proof, proof, p, pf), proof, p, pf); // y = y ^ 3
proof = (proof + kk) & m52;
}
for (unsigned long r = 0; r < rounds; ++r) {
const uint64_t kk = k[r & 31];
proof = mulmod52(mulmod52(proof, proof, p, pf), proof, p, pf); // y = y ^ 3
proof = (proof + kk) & m52;
}
return ((proof % p) == (k[33] % p));
return ((proof % p) == (k[33] % p));
}
} // namespace MIMC52
} // namespace ZeroTier
} // namespace MIMC52
} // namespace ZeroTier

View file

@ -56,7 +56,7 @@ uint64_t delay(const uint8_t challenge[32], unsigned long rounds);
*/
bool verify(const uint8_t challenge[32], unsigned long rounds, uint64_t proof);
} // namespace MIMC52
} // namespcae ZeroTier
} // namespace MIMC52
} // namespace ZeroTier
#endif

View file

@ -11,27 +11,32 @@
*/
/****/
#include <algorithm>
#include "Member.hpp"
#include "Context.hpp"
#include "Peer.hpp"
#include "Topology.hpp"
#include <algorithm>
namespace ZeroTier {
Member::Member() :
m_comRevocationThreshold(0),
m_lastPushedCredentials(0),
m_comAgreementLocalTimestamp(0),
m_comAgreementRemoteTimestamp(0)
Member::Member()
: m_comRevocationThreshold(0)
, m_lastPushedCredentials(0)
, m_comAgreementLocalTimestamp(0)
, m_comAgreementRemoteTimestamp(0)
{
}
void Member::pushCredentials(const Context &ctx, const CallContext &cc, const SharedPtr< Peer > &to, const NetworkConfig &nconf)
void Member::pushCredentials(
const Context& ctx,
const CallContext& cc,
const SharedPtr<Peer>& to,
const NetworkConfig& nconf)
{
if (!nconf.com) // sanity check
return;
if (! nconf.com) // sanity check
return;
#if 0
SharedPtr<Buf> outp(new Buf());
@ -112,147 +117,242 @@ void Member::pushCredentials(const Context &ctx, const CallContext &cc, const Sh
}
#endif
m_lastPushedCredentials = cc.ticks;
m_lastPushedCredentials = cc.ticks;
}
void Member::clean(const NetworkConfig &nconf)
void Member::clean(const NetworkConfig& nconf)
{
m_cleanCredImpl< TagCredential >(nconf, m_remoteTags);
m_cleanCredImpl< CapabilityCredential >(nconf, m_remoteCaps);
m_cleanCredImpl< OwnershipCredential >(nconf, m_remoteCoos);
m_cleanCredImpl<TagCredential>(nconf, m_remoteTags);
m_cleanCredImpl<CapabilityCredential>(nconf, m_remoteCaps);
m_cleanCredImpl<OwnershipCredential>(nconf, m_remoteCoos);
}
Member::AddCredentialResult Member::addCredential(
const Context &ctx,
const CallContext &cc,
const Identity &sourcePeerIdentity,
const NetworkConfig &nconf,
const MembershipCredential &com)
const Context& ctx,
const CallContext& cc,
const Identity& sourcePeerIdentity,
const NetworkConfig& nconf,
const MembershipCredential& com)
{
const int64_t newts = com.timestamp();
if (newts <= m_comRevocationThreshold) {
ctx.t->credentialRejected(cc, 0xd9992121, com.networkId(), sourcePeerIdentity, com.id(), com.timestamp(), ZT_CREDENTIAL_TYPE_COM, ZT_TRACE_CREDENTIAL_REJECTION_REASON_REVOKED);
return ADD_REJECTED;
}
const int64_t newts = com.timestamp();
if (newts <= m_comRevocationThreshold) {
ctx.t->credentialRejected(
cc,
0xd9992121,
com.networkId(),
sourcePeerIdentity,
com.id(),
com.timestamp(),
ZT_CREDENTIAL_TYPE_COM,
ZT_TRACE_CREDENTIAL_REJECTION_REASON_REVOKED);
return ADD_REJECTED;
}
const int64_t oldts = m_com.timestamp();
if (newts < oldts) {
ctx.t->credentialRejected(cc, 0xd9928192, com.networkId(), sourcePeerIdentity, com.id(), com.timestamp(), ZT_CREDENTIAL_TYPE_COM, ZT_TRACE_CREDENTIAL_REJECTION_REASON_OLDER_THAN_LATEST);
return ADD_REJECTED;
}
if ((newts == oldts) && (m_com == com))
return ADD_ACCEPTED_REDUNDANT;
const int64_t oldts = m_com.timestamp();
if (newts < oldts) {
ctx.t->credentialRejected(
cc,
0xd9928192,
com.networkId(),
sourcePeerIdentity,
com.id(),
com.timestamp(),
ZT_CREDENTIAL_TYPE_COM,
ZT_TRACE_CREDENTIAL_REJECTION_REASON_OLDER_THAN_LATEST);
return ADD_REJECTED;
}
if ((newts == oldts) && (m_com == com))
return ADD_ACCEPTED_REDUNDANT;
switch (com.verify(ctx, cc)) {
default:
ctx.t->credentialRejected(cc, 0x0f198241, com.networkId(), sourcePeerIdentity, com.id(), com.timestamp(), ZT_CREDENTIAL_TYPE_COM, ZT_TRACE_CREDENTIAL_REJECTION_REASON_INVALID);
return Member::ADD_REJECTED;
case Credential::VERIFY_OK:
m_com = com;
return ADD_ACCEPTED_NEW;
case Credential::VERIFY_BAD_SIGNATURE:
ctx.t->credentialRejected(cc, 0xbaf0aaaa, com.networkId(), sourcePeerIdentity, com.id(), com.timestamp(), ZT_CREDENTIAL_TYPE_COM, ZT_TRACE_CREDENTIAL_REJECTION_REASON_SIGNATURE_VERIFICATION_FAILED);
return ADD_REJECTED;
case Credential::VERIFY_NEED_IDENTITY:
return ADD_DEFERRED_FOR_WHOIS;
}
switch (com.verify(ctx, cc)) {
default:
ctx.t->credentialRejected(
cc,
0x0f198241,
com.networkId(),
sourcePeerIdentity,
com.id(),
com.timestamp(),
ZT_CREDENTIAL_TYPE_COM,
ZT_TRACE_CREDENTIAL_REJECTION_REASON_INVALID);
return Member::ADD_REJECTED;
case Credential::VERIFY_OK:
m_com = com;
return ADD_ACCEPTED_NEW;
case Credential::VERIFY_BAD_SIGNATURE:
ctx.t->credentialRejected(
cc,
0xbaf0aaaa,
com.networkId(),
sourcePeerIdentity,
com.id(),
com.timestamp(),
ZT_CREDENTIAL_TYPE_COM,
ZT_TRACE_CREDENTIAL_REJECTION_REASON_SIGNATURE_VERIFICATION_FAILED);
return ADD_REJECTED;
case Credential::VERIFY_NEED_IDENTITY:
return ADD_DEFERRED_FOR_WHOIS;
}
}
// 3/5 of the credential types have identical addCredential() code
template< typename C >
template <typename C>
static ZT_INLINE Member::AddCredentialResult _addCredImpl(
Map< uint32_t, C > &remoteCreds,
const Map< uint64_t, int64_t > &revocations,
const Context &ctx,
const CallContext &cc,
const Identity &sourcePeerIdentity,
const NetworkConfig &nconf,
const C &cred)
Map<uint32_t, C>& remoteCreds,
const Map<uint64_t, int64_t>& revocations,
const Context& ctx,
const CallContext& cc,
const Identity& sourcePeerIdentity,
const NetworkConfig& nconf,
const C& cred)
{
typename Map< uint32_t, C >::const_iterator rc(remoteCreds.find(cred.id()));
if (rc != remoteCreds.end()) {
if (rc->second.revision() > cred.revision()) {
ctx.t->credentialRejected(cc, 0x40000001, nconf.networkId, sourcePeerIdentity, cred.id(), cred.revision(), C::credentialType(), ZT_TRACE_CREDENTIAL_REJECTION_REASON_OLDER_THAN_LATEST);
return Member::ADD_REJECTED;
}
if (rc->second == cred)
return Member::ADD_ACCEPTED_REDUNDANT;
}
typename Map<uint32_t, C>::const_iterator rc(remoteCreds.find(cred.id()));
if (rc != remoteCreds.end()) {
if (rc->second.revision() > cred.revision()) {
ctx.t->credentialRejected(
cc,
0x40000001,
nconf.networkId,
sourcePeerIdentity,
cred.id(),
cred.revision(),
C::credentialType(),
ZT_TRACE_CREDENTIAL_REJECTION_REASON_OLDER_THAN_LATEST);
return Member::ADD_REJECTED;
}
if (rc->second == cred)
return Member::ADD_ACCEPTED_REDUNDANT;
}
typename Map< uint64_t, int64_t >::const_iterator rt(revocations.find(Member::credentialKey(C::credentialType(), cred.id())));
if ((rt != revocations.end()) && (rt->second >= cred.revision())) {
ctx.t->credentialRejected(cc, 0x24248124, nconf.networkId, sourcePeerIdentity, cred.id(), cred.revision(), C::credentialType(), ZT_TRACE_CREDENTIAL_REJECTION_REASON_REVOKED);
return Member::ADD_REJECTED;
}
typename Map<uint64_t, int64_t>::const_iterator rt(
revocations.find(Member::credentialKey(C::credentialType(), cred.id())));
if ((rt != revocations.end()) && (rt->second >= cred.revision())) {
ctx.t->credentialRejected(
cc,
0x24248124,
nconf.networkId,
sourcePeerIdentity,
cred.id(),
cred.revision(),
C::credentialType(),
ZT_TRACE_CREDENTIAL_REJECTION_REASON_REVOKED);
return Member::ADD_REJECTED;
}
switch (cred.verify(ctx, cc)) {
default:
ctx.t->credentialRejected(cc, 0x01feba012, nconf.networkId, sourcePeerIdentity, cred.id(), cred.revision(), C::credentialType(), ZT_TRACE_CREDENTIAL_REJECTION_REASON_INVALID);
return Member::ADD_REJECTED;
case 0:
if (rc == remoteCreds.end())
remoteCreds[cred.id()] = cred;
return Member::ADD_ACCEPTED_NEW;
case 1:
return Member::ADD_DEFERRED_FOR_WHOIS;
}
switch (cred.verify(ctx, cc)) {
default:
ctx.t->credentialRejected(
cc,
0x01feba012,
nconf.networkId,
sourcePeerIdentity,
cred.id(),
cred.revision(),
C::credentialType(),
ZT_TRACE_CREDENTIAL_REJECTION_REASON_INVALID);
return Member::ADD_REJECTED;
case 0:
if (rc == remoteCreds.end())
remoteCreds[cred.id()] = cred;
return Member::ADD_ACCEPTED_NEW;
case 1:
return Member::ADD_DEFERRED_FOR_WHOIS;
}
}
Member::AddCredentialResult Member::addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const TagCredential &tag)
{ return _addCredImpl< TagCredential >(m_remoteTags, m_revocations, ctx, cc, sourcePeerIdentity, nconf, tag); }
Member::AddCredentialResult Member::addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const CapabilityCredential &cap)
{ return _addCredImpl< CapabilityCredential >(m_remoteCaps, m_revocations, ctx, cc, sourcePeerIdentity, nconf, cap); }
Member::AddCredentialResult Member::addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const OwnershipCredential &coo)
{ return _addCredImpl< OwnershipCredential >(m_remoteCoos, m_revocations, ctx, cc, sourcePeerIdentity, nconf, coo); }
Member::AddCredentialResult Member::addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const RevocationCredential &rev)
Member::AddCredentialResult Member::addCredential(
const Context& ctx,
const CallContext& cc,
const Identity& sourcePeerIdentity,
const NetworkConfig& nconf,
const TagCredential& tag)
{
int64_t *rt;
switch (rev.verify(ctx, cc)) {
default:
ctx.t->credentialRejected(cc, 0x938ff009, nconf.networkId, sourcePeerIdentity, rev.id(), 0, ZT_CREDENTIAL_TYPE_REVOCATION, ZT_TRACE_CREDENTIAL_REJECTION_REASON_INVALID);
return ADD_REJECTED;
case 0: {
const ZT_CredentialType ct = rev.typeBeingRevoked();
switch (ct) {
case ZT_CREDENTIAL_TYPE_COM:
if (rev.threshold() > m_comRevocationThreshold) {
m_comRevocationThreshold = rev.threshold();
return ADD_ACCEPTED_NEW;
}
return ADD_ACCEPTED_REDUNDANT;
case ZT_CREDENTIAL_TYPE_CAPABILITY:
case ZT_CREDENTIAL_TYPE_TAG:
case ZT_CREDENTIAL_TYPE_COO:
rt = &(m_revocations[credentialKey(ct, rev.credentialId())]);
if (*rt < rev.threshold()) {
*rt = rev.threshold();
m_comRevocationThreshold = rev.threshold();
return ADD_ACCEPTED_NEW;
}
return ADD_ACCEPTED_REDUNDANT;
default:
ctx.t->credentialRejected(cc, 0x0bbbb1a4, nconf.networkId, sourcePeerIdentity, rev.id(), 0, ZT_CREDENTIAL_TYPE_REVOCATION, ZT_TRACE_CREDENTIAL_REJECTION_REASON_INVALID);
return ADD_REJECTED;
}
}
case 1:
return ADD_DEFERRED_FOR_WHOIS;
}
return _addCredImpl<TagCredential>(m_remoteTags, m_revocations, ctx, cc, sourcePeerIdentity, nconf, tag);
}
bool Member::m_isUnspoofableAddress(const NetworkConfig &nconf, const InetAddress &ip) const noexcept
Member::AddCredentialResult Member::addCredential(
const Context& ctx,
const CallContext& cc,
const Identity& sourcePeerIdentity,
const NetworkConfig& nconf,
const CapabilityCredential& cap)
{
return (
ip.isV6() &&
nconf.ndpEmulation() &&
(
(ip == InetAddress::makeIpv66plane(nconf.networkId, m_com.issuedTo().address)) ||
(ip == InetAddress::makeIpv6rfc4193(nconf.networkId, m_com.issuedTo().address))
)
);
return _addCredImpl<CapabilityCredential>(m_remoteCaps, m_revocations, ctx, cc, sourcePeerIdentity, nconf, cap);
}
} // namespace ZeroTier
Member::AddCredentialResult Member::addCredential(
const Context& ctx,
const CallContext& cc,
const Identity& sourcePeerIdentity,
const NetworkConfig& nconf,
const OwnershipCredential& coo)
{
return _addCredImpl<OwnershipCredential>(m_remoteCoos, m_revocations, ctx, cc, sourcePeerIdentity, nconf, coo);
}
Member::AddCredentialResult Member::addCredential(
const Context& ctx,
const CallContext& cc,
const Identity& sourcePeerIdentity,
const NetworkConfig& nconf,
const RevocationCredential& rev)
{
int64_t* rt;
switch (rev.verify(ctx, cc)) {
default:
ctx.t->credentialRejected(
cc,
0x938ff009,
nconf.networkId,
sourcePeerIdentity,
rev.id(),
0,
ZT_CREDENTIAL_TYPE_REVOCATION,
ZT_TRACE_CREDENTIAL_REJECTION_REASON_INVALID);
return ADD_REJECTED;
case 0: {
const ZT_CredentialType ct = rev.typeBeingRevoked();
switch (ct) {
case ZT_CREDENTIAL_TYPE_COM:
if (rev.threshold() > m_comRevocationThreshold) {
m_comRevocationThreshold = rev.threshold();
return ADD_ACCEPTED_NEW;
}
return ADD_ACCEPTED_REDUNDANT;
case ZT_CREDENTIAL_TYPE_CAPABILITY:
case ZT_CREDENTIAL_TYPE_TAG:
case ZT_CREDENTIAL_TYPE_COO:
rt = &(m_revocations[credentialKey(ct, rev.credentialId())]);
if (*rt < rev.threshold()) {
*rt = rev.threshold();
m_comRevocationThreshold = rev.threshold();
return ADD_ACCEPTED_NEW;
}
return ADD_ACCEPTED_REDUNDANT;
default:
ctx.t->credentialRejected(
cc,
0x0bbbb1a4,
nconf.networkId,
sourcePeerIdentity,
rev.id(),
0,
ZT_CREDENTIAL_TYPE_REVOCATION,
ZT_TRACE_CREDENTIAL_REJECTION_REASON_INVALID);
return ADD_REJECTED;
}
}
case 1:
return ADD_DEFERRED_FOR_WHOIS;
}
}
bool Member::m_isUnspoofableAddress(const NetworkConfig& nconf, const InetAddress& ip) const noexcept
{
return (
ip.isV6() && nconf.ndpEmulation()
&& ((ip == InetAddress::makeIpv66plane(nconf.networkId, m_com.issuedTo().address))
|| (ip == InetAddress::makeIpv6rfc4193(nconf.networkId, m_com.issuedTo().address))));
}
} // namespace ZeroTier

View file

@ -14,14 +14,14 @@
#ifndef ZT_MEMBERSHIP_HPP
#define ZT_MEMBERSHIP_HPP
#include "Constants.hpp"
#include "Credential.hpp"
#include "Containers.hpp"
#include "MembershipCredential.hpp"
#include "CapabilityCredential.hpp"
#include "TagCredential.hpp"
#include "RevocationCredential.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Credential.hpp"
#include "MembershipCredential.hpp"
#include "NetworkConfig.hpp"
#include "RevocationCredential.hpp"
#include "TagCredential.hpp"
namespace ZeroTier {
@ -36,200 +36,230 @@ class Network;
*
* This class is not thread safe. It must be locked externally.
*/
class Member
{
public:
enum AddCredentialResult
{
ADD_REJECTED,
ADD_ACCEPTED_NEW,
ADD_ACCEPTED_REDUNDANT,
ADD_DEFERRED_FOR_WHOIS
};
class Member {
public:
enum AddCredentialResult { ADD_REJECTED, ADD_ACCEPTED_NEW, ADD_ACCEPTED_REDUNDANT, ADD_DEFERRED_FOR_WHOIS };
Member();
Member();
/**
* Send COM and other credentials to this peer
*
* @param to Peer identity
* @param nconf My network config
*/
void pushCredentials(const Context &ctx, const CallContext &cc, const SharedPtr< Peer > &to, const NetworkConfig &nconf);
/**
* Send COM and other credentials to this peer
*
* @param to Peer identity
* @param nconf My network config
*/
void
pushCredentials(const Context& ctx, const CallContext& cc, const SharedPtr<Peer>& to, const NetworkConfig& nconf);
/**
* @return Time we last pushed credentials to this member
*/
ZT_INLINE int64_t lastPushedCredentials() const noexcept
{ return m_lastPushedCredentials; }
/**
* @return Time we last pushed credentials to this member
*/
ZT_INLINE int64_t lastPushedCredentials() const noexcept
{
return m_lastPushedCredentials;
}
/**
* Get a remote member's tag (if we have it)
*
* @param nconf Network configuration
* @param id Tag ID
* @return Pointer to tag or NULL if not found
*/
ZT_INLINE const TagCredential *getTag(const NetworkConfig &nconf, const uint32_t id) const noexcept
{
Map< uint32_t, TagCredential >::const_iterator t(m_remoteTags.find(id));
return (((t != m_remoteTags.end()) && (m_isCredentialTimestampValid(nconf, t->second))) ? &(t->second) : (TagCredential *)0);
}
/**
* Get a remote member's tag (if we have it)
*
* @param nconf Network configuration
* @param id Tag ID
* @return Pointer to tag or NULL if not found
*/
ZT_INLINE const TagCredential* getTag(const NetworkConfig& nconf, const uint32_t id) const noexcept
{
Map<uint32_t, TagCredential>::const_iterator t(m_remoteTags.find(id));
return (
((t != m_remoteTags.end()) && (m_isCredentialTimestampValid(nconf, t->second))) ? &(t->second)
: (TagCredential*)0);
}
/**
* Clean internal databases of stale entries
*
* @param nconf Current network configuration
*/
void clean(const NetworkConfig &nconf);
/**
* Clean internal databases of stale entries
*
* @param nconf Current network configuration
*/
void clean(const NetworkConfig& nconf);
/**
* Generates a key for internal use in indexing credentials by type and credential ID
*/
static ZT_INLINE uint64_t credentialKey(const ZT_CredentialType &t, const uint32_t i) noexcept
{ return (((uint64_t)t << 32U) | (uint64_t)i); }
/**
* Generates a key for internal use in indexing credentials by type and credential ID
*/
static ZT_INLINE uint64_t credentialKey(const ZT_CredentialType& t, const uint32_t i) noexcept
{
return (((uint64_t)t << 32U) | (uint64_t)i);
}
/**
* Check whether the peer represented by this Membership owns a given address
*
* @tparam Type of resource: InetAddress or MAC
* @param nconf Our network config
* @param r Resource to check
* @return True if this peer has a certificate of ownership for the given resource
*/
template< typename T >
ZT_INLINE bool peerOwnsAddress(const NetworkConfig &nconf, const T &r) const noexcept
{
if (m_isUnspoofableAddress(nconf, r))
return true;
for (Map< uint32_t, OwnershipCredential >::const_iterator i(m_remoteCoos.begin()); i != m_remoteCoos.end(); ++i) {
if (m_isCredentialTimestampValid(nconf, i->second) && (i->second.owns(r)))
return true;
}
return false;
}
/**
* Check whether the peer represented by this Membership owns a given address
*
* @tparam Type of resource: InetAddress or MAC
* @param nconf Our network config
* @param r Resource to check
* @return True if this peer has a certificate of ownership for the given resource
*/
template <typename T> ZT_INLINE bool peerOwnsAddress(const NetworkConfig& nconf, const T& r) const noexcept
{
if (m_isUnspoofableAddress(nconf, r))
return true;
for (Map<uint32_t, OwnershipCredential>::const_iterator i(m_remoteCoos.begin()); i != m_remoteCoos.end(); ++i) {
if (m_isCredentialTimestampValid(nconf, i->second) && (i->second.owns(r)))
return true;
}
return false;
}
/**
* Check if our local COM agrees with theirs, with possible memo-ization.
*
* @param localCom
*/
ZT_INLINE bool certificateOfMembershipAgress(const MembershipCredential &localCom, const Identity &remoteIdentity)
{
if ((m_comAgreementLocalTimestamp == localCom.timestamp()) && (m_comAgreementRemoteTimestamp == m_com.timestamp()))
return true;
if (m_com.agreesWith(localCom)) {
// SECURITY: newer network controllers embed the full fingerprint into the COM. If we are
// joined to a network managed by one of these, our COM will contain one. If it's present
// we compare vs the other and require them to match. If our COM does not contain a full
// identity fingerprint we compare by address only, which is a legacy mode supported for
// old network controllers. Note that this is safe because the controller issues us our COM
// and in so doing indicates if it's new or old. However this will go away after a while
// once we can be pretty sure there are no ancient controllers around.
if (localCom.issuedTo().haveHash()) {
if (localCom.issuedTo() != m_com.issuedTo())
return false;
} else {
// LEGACY: support networks run by old controllers.
if (localCom.issuedTo().address != m_com.issuedTo().address)
return false;
}
/**
* Check if our local COM agrees with theirs, with possible memo-ization.
*
* @param localCom
*/
ZT_INLINE bool certificateOfMembershipAgress(const MembershipCredential& localCom, const Identity& remoteIdentity)
{
if ((m_comAgreementLocalTimestamp == localCom.timestamp())
&& (m_comAgreementRemoteTimestamp == m_com.timestamp()))
return true;
if (m_com.agreesWith(localCom)) {
// SECURITY: newer network controllers embed the full fingerprint into the COM. If we are
// joined to a network managed by one of these, our COM will contain one. If it's present
// we compare vs the other and require them to match. If our COM does not contain a full
// identity fingerprint we compare by address only, which is a legacy mode supported for
// old network controllers. Note that this is safe because the controller issues us our COM
// and in so doing indicates if it's new or old. However this will go away after a while
// once we can be pretty sure there are no ancient controllers around.
if (localCom.issuedTo().haveHash()) {
if (localCom.issuedTo() != m_com.issuedTo())
return false;
}
else {
// LEGACY: support networks run by old controllers.
if (localCom.issuedTo().address != m_com.issuedTo().address)
return false;
}
// Remember that these two COMs agreed. If any are updated this is invalidated and a full
// agreement check will be done again.
m_comAgreementLocalTimestamp = localCom.timestamp();
m_comAgreementRemoteTimestamp = m_com.timestamp();
// Remember that these two COMs agreed. If any are updated this is invalidated and a full
// agreement check will be done again.
m_comAgreementLocalTimestamp = localCom.timestamp();
m_comAgreementRemoteTimestamp = m_com.timestamp();
return true;
}
return false;
}
return true;
}
return false;
}
AddCredentialResult addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const MembershipCredential &com);
AddCredentialResult addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const TagCredential &tag);
AddCredentialResult addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const CapabilityCredential &cap);
AddCredentialResult addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const OwnershipCredential &coo);
AddCredentialResult addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const RevocationCredential &rev);
AddCredentialResult addCredential(
const Context& ctx,
const CallContext& cc,
const Identity& sourcePeerIdentity,
const NetworkConfig& nconf,
const MembershipCredential& com);
AddCredentialResult addCredential(
const Context& ctx,
const CallContext& cc,
const Identity& sourcePeerIdentity,
const NetworkConfig& nconf,
const TagCredential& tag);
AddCredentialResult addCredential(
const Context& ctx,
const CallContext& cc,
const Identity& sourcePeerIdentity,
const NetworkConfig& nconf,
const CapabilityCredential& cap);
AddCredentialResult addCredential(
const Context& ctx,
const CallContext& cc,
const Identity& sourcePeerIdentity,
const NetworkConfig& nconf,
const OwnershipCredential& coo);
AddCredentialResult addCredential(
const Context& ctx,
const CallContext& cc,
const Identity& sourcePeerIdentity,
const NetworkConfig& nconf,
const RevocationCredential& rev);
private:
// This returns true if a resource is an IPv6 NDP-emulated address. These embed the ZT
// address of the peer and therefore cannot be spoofed, causing peerOwnsAddress() to
// always return true for them. A certificate is not required for these.
ZT_INLINE bool m_isUnspoofableAddress(const NetworkConfig &nconf, const MAC &m) const noexcept
{ return false; }
private:
// This returns true if a resource is an IPv6 NDP-emulated address. These embed the ZT
// address of the peer and therefore cannot be spoofed, causing peerOwnsAddress() to
// always return true for them. A certificate is not required for these.
ZT_INLINE bool m_isUnspoofableAddress(const NetworkConfig& nconf, const MAC& m) const noexcept
{
return false;
}
bool m_isUnspoofableAddress(const NetworkConfig &nconf, const InetAddress &ip) const noexcept;
bool m_isUnspoofableAddress(const NetworkConfig& nconf, const InetAddress& ip) const noexcept;
// This compares the remote credential's timestamp to the timestamp in our network config
// plus or minus the permitted maximum timestamp delta.
template< typename C >
ZT_INLINE bool m_isCredentialTimestampValid(const NetworkConfig &nconf, const C &remoteCredential) const noexcept
{
const int64_t ts = remoteCredential.revision();
if (((ts >= nconf.timestamp) ? (ts - nconf.timestamp) : (nconf.timestamp - ts)) <= nconf.credentialTimeMaxDelta) {
Map< uint64_t, int64_t >::const_iterator threshold(m_revocations.find(credentialKey(C::credentialType(), remoteCredential.id())));
return ((threshold == m_revocations.end()) || (ts > threshold->second));
}
return false;
}
// This compares the remote credential's timestamp to the timestamp in our network config
// plus or minus the permitted maximum timestamp delta.
template <typename C>
ZT_INLINE bool m_isCredentialTimestampValid(const NetworkConfig& nconf, const C& remoteCredential) const noexcept
{
const int64_t ts = remoteCredential.revision();
if (((ts >= nconf.timestamp) ? (ts - nconf.timestamp) : (nconf.timestamp - ts))
<= nconf.credentialTimeMaxDelta) {
Map<uint64_t, int64_t>::const_iterator threshold(
m_revocations.find(credentialKey(C::credentialType(), remoteCredential.id())));
return ((threshold == m_revocations.end()) || (ts > threshold->second));
}
return false;
}
template< typename C >
ZT_INLINE void m_cleanCredImpl(const NetworkConfig &nconf, Map< uint32_t, C > &remoteCreds)
{
for (typename Map< uint32_t, C >::iterator i(remoteCreds.begin()); i != remoteCreds.end();) {
if (!m_isCredentialTimestampValid(nconf, i->second))
remoteCreds.erase(i++);
else ++i;
}
}
template <typename C> ZT_INLINE void m_cleanCredImpl(const NetworkConfig& nconf, Map<uint32_t, C>& remoteCreds)
{
for (typename Map<uint32_t, C>::iterator i(remoteCreds.begin()); i != remoteCreds.end();) {
if (! m_isCredentialTimestampValid(nconf, i->second))
remoteCreds.erase(i++);
else
++i;
}
}
// Revocation threshold for COM or 0 if none
int64_t m_comRevocationThreshold;
// Revocation threshold for COM or 0 if none
int64_t m_comRevocationThreshold;
// Time we last pushed credentials
int64_t m_lastPushedCredentials;
// Time we last pushed credentials
int64_t m_lastPushedCredentials;
// COM timestamps at which we last agreed-- used to memo-ize agreement and avoid having to recompute constantly.
int64_t m_comAgreementLocalTimestamp, m_comAgreementRemoteTimestamp;
// COM timestamps at which we last agreed-- used to memo-ize agreement and avoid having to recompute constantly.
int64_t m_comAgreementLocalTimestamp, m_comAgreementRemoteTimestamp;
// Remote member's latest network COM
MembershipCredential m_com;
// Remote member's latest network COM
MembershipCredential m_com;
// Revocations by credentialKey()
Map< uint64_t, int64_t > m_revocations;
// Revocations by credentialKey()
Map<uint64_t, int64_t> m_revocations;
// Remote credentials that we have received from this member (and that are valid)
Map< uint32_t, TagCredential > m_remoteTags;
Map< uint32_t, CapabilityCredential > m_remoteCaps;
Map< uint32_t, OwnershipCredential > m_remoteCoos;
// Remote credentials that we have received from this member (and that are valid)
Map<uint32_t, TagCredential> m_remoteTags;
Map<uint32_t, CapabilityCredential> m_remoteCaps;
Map<uint32_t, OwnershipCredential> m_remoteCoos;
public:
class CapabilityIterator
{
public:
ZT_INLINE CapabilityIterator(Member &m, const NetworkConfig &nconf) noexcept:
m_hti(m.m_remoteCaps.begin()),
m_parent(m),
m_nconf(nconf)
{}
public:
class CapabilityIterator {
public:
ZT_INLINE CapabilityIterator(Member& m, const NetworkConfig& nconf) noexcept
: m_hti(m.m_remoteCaps.begin())
, m_parent(m)
, m_nconf(nconf)
{
}
ZT_INLINE CapabilityCredential *next() noexcept
{
while (m_hti != m_parent.m_remoteCaps.end()) {
Map< uint32_t, CapabilityCredential >::iterator i(m_hti++);
if (m_parent.m_isCredentialTimestampValid(m_nconf, i->second))
return &(i->second);
}
return nullptr;
}
ZT_INLINE CapabilityCredential* next() noexcept
{
while (m_hti != m_parent.m_remoteCaps.end()) {
Map<uint32_t, CapabilityCredential>::iterator i(m_hti++);
if (m_parent.m_isCredentialTimestampValid(m_nconf, i->second))
return &(i->second);
}
return nullptr;
}
private:
Map< uint32_t, CapabilityCredential >::iterator m_hti;
Member &m_parent;
const NetworkConfig &m_nconf;
};
private:
Map<uint32_t, CapabilityCredential>::iterator m_hti;
Member& m_parent;
const NetworkConfig& m_nconf;
};
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -15,275 +15,304 @@
namespace ZeroTier {
MembershipCredential::MembershipCredential(const int64_t timestamp, const int64_t timestampMaxDelta, const uint64_t nwid, const Identity &issuedTo) noexcept: // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
m_timestamp(timestamp),
m_timestampMaxDelta(timestampMaxDelta),
m_networkId(nwid),
m_issuedTo(issuedTo.fingerprint()),
m_signatureLength(0)
{}
bool MembershipCredential::agreesWith(const MembershipCredential &other) const noexcept
MembershipCredential::MembershipCredential(
const int64_t timestamp,
const int64_t timestampMaxDelta,
const uint64_t nwid,
const Identity& issuedTo) noexcept
: // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
m_timestamp(timestamp)
, m_timestampMaxDelta(timestampMaxDelta)
, m_networkId(nwid)
, m_issuedTo(issuedTo.fingerprint())
, m_signatureLength(0)
{
// NOTE: we always do explicit absolute value with an if() since llabs() can have overflow
// conditions that could introduce a vulnerability.
if (other.m_timestamp > m_timestamp) {
if ((other.m_timestamp - m_timestamp) > std::min(m_timestampMaxDelta, other.m_timestampMaxDelta))
return false;
} else {
if ((m_timestamp - other.m_timestamp) > std::min(m_timestampMaxDelta, other.m_timestampMaxDelta))
return false;
}
// us <> them
for (FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS>::const_iterator i(m_additionalQualifiers.begin()); i != m_additionalQualifiers.end(); ++i) {
if (i->delta != 0xffffffffffffffffULL) {
const uint64_t *v2 = nullptr;
for (FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS>::const_iterator j(other.m_additionalQualifiers.begin()); j != other.m_additionalQualifiers.end(); ++i) {
if (j->id == i->id) {
v2 = &(j->value);
break;
}
}
if (!v2)
return false;
if (*v2 > i->value) {
if ((*v2 - i->value) > i->delta)
return false;
} else {
if ((i->value - *v2) > i->delta)
return false;
}
}
}
// them <> us (we need a second pass in case they have qualifiers we don't or vice versa)
for (FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS>::const_iterator i(other.m_additionalQualifiers.begin()); i != other.m_additionalQualifiers.end(); ++i) {
if (i->delta != 0xffffffffffffffffULL) {
const uint64_t *v2 = nullptr;
for (FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS>::const_iterator j(m_additionalQualifiers.begin()); j != m_additionalQualifiers.end(); ++i) {
if (j->id == i->id) {
v2 = &(j->value);
break;
}
}
if (!v2)
return false;
if (*v2 > i->value) {
if ((*v2 - i->value) > i->delta)
return false;
} else {
if ((i->value - *v2) > i->delta)
return false;
}
}
}
// SECURITY: check for issued-to inequality is a sanity check. This should be impossible since elsewhere
// in the code COMs are checked to ensure that they do in fact belong to their issued-to identities.
return (other.m_networkId == m_networkId) && (m_networkId != 0) && (other.m_issuedTo.address != m_issuedTo.address);
}
bool MembershipCredential::sign(const Identity &with) noexcept
bool MembershipCredential::agreesWith(const MembershipCredential& other) const noexcept
{
m_signedBy = with.address();
uint64_t buf[ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX / 8];
const unsigned int bufSize = m_fillSigningBuf(buf);
m_signatureLength = with.sign(buf, bufSize, m_signature, sizeof(m_signature));
return m_signatureLength > 0;
// NOTE: we always do explicit absolute value with an if() since llabs() can have overflow
// conditions that could introduce a vulnerability.
if (other.m_timestamp > m_timestamp) {
if ((other.m_timestamp - m_timestamp) > std::min(m_timestampMaxDelta, other.m_timestampMaxDelta))
return false;
}
else {
if ((m_timestamp - other.m_timestamp) > std::min(m_timestampMaxDelta, other.m_timestampMaxDelta))
return false;
}
// us <> them
for (FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS>::const_iterator i(
m_additionalQualifiers.begin());
i != m_additionalQualifiers.end();
++i) {
if (i->delta != 0xffffffffffffffffULL) {
const uint64_t* v2 = nullptr;
for (FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS>::const_iterator j(
other.m_additionalQualifiers.begin());
j != other.m_additionalQualifiers.end();
++i) {
if (j->id == i->id) {
v2 = &(j->value);
break;
}
}
if (! v2)
return false;
if (*v2 > i->value) {
if ((*v2 - i->value) > i->delta)
return false;
}
else {
if ((i->value - *v2) > i->delta)
return false;
}
}
}
// them <> us (we need a second pass in case they have qualifiers we don't or vice versa)
for (FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS>::const_iterator i(
other.m_additionalQualifiers.begin());
i != other.m_additionalQualifiers.end();
++i) {
if (i->delta != 0xffffffffffffffffULL) {
const uint64_t* v2 = nullptr;
for (FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS>::const_iterator j(
m_additionalQualifiers.begin());
j != m_additionalQualifiers.end();
++i) {
if (j->id == i->id) {
v2 = &(j->value);
break;
}
}
if (! v2)
return false;
if (*v2 > i->value) {
if ((*v2 - i->value) > i->delta)
return false;
}
else {
if ((i->value - *v2) > i->delta)
return false;
}
}
}
// SECURITY: check for issued-to inequality is a sanity check. This should be impossible since elsewhere
// in the code COMs are checked to ensure that they do in fact belong to their issued-to identities.
return (other.m_networkId == m_networkId) && (m_networkId != 0) && (other.m_issuedTo.address != m_issuedTo.address);
}
bool MembershipCredential::sign(const Identity& with) noexcept
{
m_signedBy = with.address();
uint64_t buf[ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX / 8];
const unsigned int bufSize = m_fillSigningBuf(buf);
m_signatureLength = with.sign(buf, bufSize, m_signature, sizeof(m_signature));
return m_signatureLength > 0;
}
int MembershipCredential::marshal(uint8_t data[ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX], const bool v2) const noexcept
{
data[0] = v2 ? 2 : 1;
data[0] = v2 ? 2 : 1;
// All formats start with the standard three qualifiers: timestamp with delta, network ID as a strict
// equality compare, and the address of the issued-to node as an informational tuple.
int p = 3;
Utils::storeBigEndian<uint64_t>(data + p, 0);
Utils::storeBigEndian<uint64_t>(data + p + 8, (uint64_t) m_timestamp);
Utils::storeBigEndian<uint64_t>(data + p + 16, (uint64_t) m_timestampMaxDelta);
Utils::storeBigEndian<uint64_t>(data + p + 24, 1);
Utils::storeBigEndian<uint64_t>(data + p + 32, m_networkId);
Utils::storeBigEndian<uint64_t>(data + p + 40, 0);
Utils::storeBigEndian<uint64_t>(data + p + 48, 2);
Utils::storeBigEndian<uint64_t>(data + p + 56, m_issuedTo.address);
Utils::storeMachineEndian< uint64_t >(data + p + 64, 0xffffffffffffffffULL);
p += 72;
// All formats start with the standard three qualifiers: timestamp with delta, network ID as a strict
// equality compare, and the address of the issued-to node as an informational tuple.
int p = 3;
Utils::storeBigEndian<uint64_t>(data + p, 0);
Utils::storeBigEndian<uint64_t>(data + p + 8, (uint64_t)m_timestamp);
Utils::storeBigEndian<uint64_t>(data + p + 16, (uint64_t)m_timestampMaxDelta);
Utils::storeBigEndian<uint64_t>(data + p + 24, 1);
Utils::storeBigEndian<uint64_t>(data + p + 32, m_networkId);
Utils::storeBigEndian<uint64_t>(data + p + 40, 0);
Utils::storeBigEndian<uint64_t>(data + p + 48, 2);
Utils::storeBigEndian<uint64_t>(data + p + 56, m_issuedTo.address);
Utils::storeMachineEndian<uint64_t>(data + p + 64, 0xffffffffffffffffULL);
p += 72;
if (v2) {
// V2 marshal format will have three tuples followed by the fingerprint hash.
Utils::storeBigEndian<uint16_t>(data + 1, 3);
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(data + p, m_issuedTo.hash);
p += 48;
} else {
// V1 marshal format must shove everything into tuples, resulting in nine.
Utils::storeBigEndian<uint16_t>(data + 1, 9);
for (int k = 0;k < 6;++k) {
Utils::storeBigEndian<uint64_t>(data + p, (uint64_t) k + 3);
Utils::storeMachineEndian< uint64_t >(data + p + 8, Utils::loadMachineEndian< uint64_t >(m_issuedTo.hash + (k * 8)));
Utils::storeMachineEndian< uint64_t >(data + p + 16, 0xffffffffffffffffULL);
p += 24;
}
}
if (v2) {
// V2 marshal format will have three tuples followed by the fingerprint hash.
Utils::storeBigEndian<uint16_t>(data + 1, 3);
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(data + p, m_issuedTo.hash);
p += 48;
}
else {
// V1 marshal format must shove everything into tuples, resulting in nine.
Utils::storeBigEndian<uint16_t>(data + 1, 9);
for (int k = 0; k < 6; ++k) {
Utils::storeBigEndian<uint64_t>(data + p, (uint64_t)k + 3);
Utils::storeMachineEndian<uint64_t>(
data + p + 8,
Utils::loadMachineEndian<uint64_t>(m_issuedTo.hash + (k * 8)));
Utils::storeMachineEndian<uint64_t>(data + p + 16, 0xffffffffffffffffULL);
p += 24;
}
}
m_signedBy.copyTo(data + p);
p += 5;
m_signedBy.copyTo(data + p);
p += 5;
if (v2) {
// V2 marshal format prefixes signatures with a 16-bit length to support future signature types.
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t) m_signatureLength);
p += 2;
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
} else {
// V1 only supports 96-byte signature fields.
Utils::copy<96>(data + p, m_signature);
p += 96;
}
if (v2) {
// V2 marshal format prefixes signatures with a 16-bit length to support future signature types.
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_signatureLength);
p += 2;
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
}
else {
// V1 only supports 96-byte signature fields.
Utils::copy<96>(data + p, m_signature);
p += 96;
}
return p;
return p;
}
int MembershipCredential::unmarshal(const uint8_t *data, int len) noexcept
int MembershipCredential::unmarshal(const uint8_t* data, int len) noexcept
{
if (len < (1 + 2 + 72))
return -1;
if (len < (1 + 2 + 72))
return -1;
TriviallyCopyable::memoryZero(this);
TriviallyCopyable::memoryZero(this);
const unsigned int numq = Utils::loadBigEndian<uint16_t>(data + 1);
if ((numq < 3) || (numq > (ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS + 3)))
return -1;
int p = 3;
for (unsigned int q = 0;q < numq;++q) {
if ((p + 24) > len)
return -1;
const uint64_t id = Utils::loadBigEndian<uint64_t>(data + p);
p += 8; // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint64_t value = Utils::loadBigEndian<uint64_t>(data + p);
p += 8; // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint64_t delta = Utils::loadBigEndian<uint64_t>(data + p);
p += 8; // NOLINT(hicpp-use-auto,modernize-use-auto)
switch (id) {
case 0:
m_timestamp = (int64_t) value;
m_timestampMaxDelta = (int64_t) delta;
break;
case 1:
m_networkId = value;
break;
case 2:
m_issuedTo.address = value;
break;
const unsigned int numq = Utils::loadBigEndian<uint16_t>(data + 1);
if ((numq < 3) || (numq > (ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS + 3)))
return -1;
int p = 3;
for (unsigned int q = 0; q < numq; ++q) {
if ((p + 24) > len)
return -1;
const uint64_t id = Utils::loadBigEndian<uint64_t>(data + p);
p += 8; // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint64_t value = Utils::loadBigEndian<uint64_t>(data + p);
p += 8; // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint64_t delta = Utils::loadBigEndian<uint64_t>(data + p);
p += 8; // NOLINT(hicpp-use-auto,modernize-use-auto)
switch (id) {
case 0:
m_timestamp = (int64_t)value;
m_timestampMaxDelta = (int64_t)delta;
break;
case 1:
m_networkId = value;
break;
case 2:
m_issuedTo.address = value;
break;
// V1 nodes will pack the hash into qualifier tuples.
case 3:
Utils::storeBigEndian<uint64_t>(m_issuedTo.hash, value);
break;
case 4:
Utils::storeBigEndian<uint64_t>(m_issuedTo.hash + 8, value);
break;
case 5:
Utils::storeBigEndian<uint64_t>(m_issuedTo.hash + 16, value);
break;
case 6:
Utils::storeBigEndian<uint64_t>(m_issuedTo.hash + 24, value);
break;
case 7:
Utils::storeBigEndian<uint64_t>(m_issuedTo.hash + 32, value);
break;
case 8:
Utils::storeBigEndian<uint64_t>(m_issuedTo.hash + 40, value);
break;
// V1 nodes will pack the hash into qualifier tuples.
case 3:
Utils::storeBigEndian<uint64_t>(m_issuedTo.hash, value);
break;
case 4:
Utils::storeBigEndian<uint64_t>(m_issuedTo.hash + 8, value);
break;
case 5:
Utils::storeBigEndian<uint64_t>(m_issuedTo.hash + 16, value);
break;
case 6:
Utils::storeBigEndian<uint64_t>(m_issuedTo.hash + 24, value);
break;
case 7:
Utils::storeBigEndian<uint64_t>(m_issuedTo.hash + 32, value);
break;
case 8:
Utils::storeBigEndian<uint64_t>(m_issuedTo.hash + 40, value);
break;
default:
if (m_additionalQualifiers.size() >= ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS)
return -1;
m_additionalQualifiers.push_back(p_Qualifier(id, value, delta));
break;
}
}
default:
if (m_additionalQualifiers.size() >= ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS)
return -1;
m_additionalQualifiers.push_back(p_Qualifier(id, value, delta));
break;
}
}
std::sort(m_additionalQualifiers.begin(), m_additionalQualifiers.end());
std::sort(m_additionalQualifiers.begin(), m_additionalQualifiers.end());
if (data[0] == 1) {
if ((p + 96) > len)
return -1;
m_signatureLength = 96;
Utils::copy<96>(m_signature, data + p);
return p + 96;
} else if (data[0] == 2) {
if ((p + 48) > len)
return -1;
Utils::copy<48>(m_issuedTo.hash, data + p);
p += 48;
if ((p + 2) > len)
return -1;
m_signatureLength = Utils::loadBigEndian<uint16_t>(data + p);
if ((m_signatureLength > (unsigned int) sizeof(m_signature)) || ((p + (int) m_signatureLength) > len))
return -1;
Utils::copy(m_signature, data + p, m_signatureLength);
return p + (int) m_signatureLength;
}
if (data[0] == 1) {
if ((p + 96) > len)
return -1;
m_signatureLength = 96;
Utils::copy<96>(m_signature, data + p);
return p + 96;
}
else if (data[0] == 2) {
if ((p + 48) > len)
return -1;
Utils::copy<48>(m_issuedTo.hash, data + p);
p += 48;
if ((p + 2) > len)
return -1;
m_signatureLength = Utils::loadBigEndian<uint16_t>(data + p);
if ((m_signatureLength > (unsigned int)sizeof(m_signature)) || ((p + (int)m_signatureLength) > len))
return -1;
Utils::copy(m_signature, data + p, m_signatureLength);
return p + (int)m_signatureLength;
}
return -1;
return -1;
}
unsigned int MembershipCredential::m_fillSigningBuf(uint64_t *buf) const noexcept
unsigned int MembershipCredential::m_fillSigningBuf(uint64_t* buf) const noexcept
{
const uint64_t informational = 0xffffffffffffffffULL;
const uint64_t informational = 0xffffffffffffffffULL;
/*
* Signing always embeds all data to be signed in qualifier tuple format for
* backward compatibility with V1 nodes, since otherwise we'd need a signature
* for v1 nodes to verify and another for v2 nodes to verify.
*/
/*
* Signing always embeds all data to be signed in qualifier tuple format for
* backward compatibility with V1 nodes, since otherwise we'd need a signature
* for v1 nodes to verify and another for v2 nodes to verify.
*/
// The standard three tuples that must begin every COM.
buf[0] = 0;
buf[1] = Utils::hton((uint64_t) m_timestamp);
buf[2] = Utils::hton((uint64_t) m_timestampMaxDelta);
buf[3] = ZT_CONST_TO_BE_UINT64(1);
buf[4] = Utils::hton(m_networkId);
buf[5] = 0;
buf[6] = ZT_CONST_TO_BE_UINT64(2);
buf[7] = Utils::hton(m_issuedTo.address);
buf[8] = informational;
// The standard three tuples that must begin every COM.
buf[0] = 0;
buf[1] = Utils::hton((uint64_t)m_timestamp);
buf[2] = Utils::hton((uint64_t)m_timestampMaxDelta);
buf[3] = ZT_CONST_TO_BE_UINT64(1);
buf[4] = Utils::hton(m_networkId);
buf[5] = 0;
buf[6] = ZT_CONST_TO_BE_UINT64(2);
buf[7] = Utils::hton(m_issuedTo.address);
buf[8] = informational;
unsigned int p = 9;
unsigned int p = 9;
// The full identity fingerprint of the peer to whom the COM was issued,
// embeded as a series of informational tuples.
if (m_issuedTo.haveHash()) {
buf[p++] = ZT_CONST_TO_BE_UINT64(3);
buf[p++] = Utils::loadMachineEndian< uint64_t >(m_issuedTo.hash);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(4);
buf[p++] = Utils::loadMachineEndian< uint64_t >(m_issuedTo.hash + 8);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(5);
buf[p++] = Utils::loadMachineEndian< uint64_t >(m_issuedTo.hash + 16);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(6);
buf[p++] = Utils::loadMachineEndian< uint64_t >(m_issuedTo.hash + 24);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(7);
buf[p++] = Utils::loadMachineEndian< uint64_t >(m_issuedTo.hash + 32);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(8);
buf[p++] = Utils::loadMachineEndian< uint64_t >(m_issuedTo.hash + 40);
buf[p++] = informational;
}
// The full identity fingerprint of the peer to whom the COM was issued,
// embeded as a series of informational tuples.
if (m_issuedTo.haveHash()) {
buf[p++] = ZT_CONST_TO_BE_UINT64(3);
buf[p++] = Utils::loadMachineEndian<uint64_t>(m_issuedTo.hash);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(4);
buf[p++] = Utils::loadMachineEndian<uint64_t>(m_issuedTo.hash + 8);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(5);
buf[p++] = Utils::loadMachineEndian<uint64_t>(m_issuedTo.hash + 16);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(6);
buf[p++] = Utils::loadMachineEndian<uint64_t>(m_issuedTo.hash + 24);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(7);
buf[p++] = Utils::loadMachineEndian<uint64_t>(m_issuedTo.hash + 32);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(8);
buf[p++] = Utils::loadMachineEndian<uint64_t>(m_issuedTo.hash + 40);
buf[p++] = informational;
}
for (FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS>::const_iterator i(m_additionalQualifiers.begin()); i != m_additionalQualifiers.end(); ++i) { // NOLINT(modernize-loop-convert)
buf[p++] = Utils::hton(i->id);
buf[p++] = Utils::hton(i->value);
buf[p++] = Utils::hton(i->delta);
}
for (FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS>::const_iterator i(
m_additionalQualifiers.begin());
i != m_additionalQualifiers.end();
++i) { // NOLINT(modernize-loop-convert)
buf[p++] = Utils::hton(i->id);
buf[p++] = Utils::hton(i->value);
buf[p++] = Utils::hton(i->delta);
}
return p * 8;
return p * 8;
}
} // namespace ZeroTier
} // namespace ZeroTier

View file

@ -14,23 +14,24 @@
#ifndef ZT_CERTIFICATEOFMEMBERSHIP_HPP
#define ZT_CERTIFICATEOFMEMBERSHIP_HPP
#include <string>
#include <stdexcept>
#include <algorithm>
#include "Constants.hpp"
#include "Credential.hpp"
#include "Address.hpp"
#include "C25519.hpp"
#include "Constants.hpp"
#include "Credential.hpp"
#include "FCV.hpp"
#include "Identity.hpp"
#include "Utils.hpp"
#include "FCV.hpp"
#include <algorithm>
#include <stdexcept>
#include <string>
// Maximum number of additional tuples beyond the standard always-present three.
#define ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS 8
// version + qualifier count + three required qualifiers + additional qualifiers +
#define ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX (1 + 2 + (3 * 3 * 8) + (ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS * 3 * 8) + 144 + 5 + 2 + 96)
#define ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX \
(1 + 2 + (3 * 3 * 8) + (ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS * 3 * 8) + 144 + 5 + 2 + 96)
namespace ZeroTier {
@ -96,135 +97,166 @@ class Context;
* order with the fingerprint hash being packed into tuple IDs 3-8 and this buffer is
* then signed.
*/
class MembershipCredential : public Credential
{
friend class Credential;
class MembershipCredential : public Credential {
friend class Credential;
public:
static constexpr ZT_CredentialType credentialType() noexcept
{ return ZT_CREDENTIAL_TYPE_COM; }
public:
static constexpr ZT_CredentialType credentialType() noexcept
{
return ZT_CREDENTIAL_TYPE_COM;
}
/**
* Create an empty certificate of membership
*/
ZT_INLINE MembershipCredential() noexcept
{ memoryZero(this); }
/**
* Create an empty certificate of membership
*/
ZT_INLINE MembershipCredential() noexcept
{
memoryZero(this);
}
/**
* Create from required fields common to all networks
*
* @param timestamp Timestamp of certificate
* @param timestampMaxDelta Maximum variation between timestamps on this net
* @param nwid Network ID
* @param issuedTo Certificate recipient
*/
MembershipCredential(int64_t timestamp, int64_t timestampMaxDelta, uint64_t nwid, const Identity &issuedTo) noexcept;
/**
* Create from required fields common to all networks
*
* @param timestamp Timestamp of certificate
* @param timestampMaxDelta Maximum variation between timestamps on this net
* @param nwid Network ID
* @param issuedTo Certificate recipient
*/
MembershipCredential(int64_t timestamp, int64_t timestampMaxDelta, uint64_t nwid, const Identity& issuedTo)
noexcept;
/**
* @return True if there's something here
*/
ZT_INLINE operator bool() const noexcept
{ return (m_networkId != 0); }
/**
* @return True if there's something here
*/
ZT_INLINE operator bool() const noexcept
{
return (m_networkId != 0);
}
/**
* @return Credential ID, always 0 for COMs
*/
ZT_INLINE uint32_t id() const noexcept
{ return 0; }
/**
* @return Credential ID, always 0 for COMs
*/
ZT_INLINE uint32_t id() const noexcept
{
return 0;
}
/**
* @return Timestamp for this cert and maximum delta for timestamp
*/
ZT_INLINE int64_t timestamp() const noexcept
{ return m_timestamp; }
/**
* @return Timestamp for this cert and maximum delta for timestamp
*/
ZT_INLINE int64_t timestamp() const noexcept
{
return m_timestamp;
}
ZT_INLINE int64_t revision() const noexcept
{ return m_timestamp; }
ZT_INLINE int64_t revision() const noexcept
{
return m_timestamp;
}
/**
* @return Maximum allowed difference between timestamps
*/
ZT_INLINE int64_t timestampMaxDelta() const noexcept
{ return m_timestampMaxDelta; }
/**
* @return Maximum allowed difference between timestamps
*/
ZT_INLINE int64_t timestampMaxDelta() const noexcept
{
return m_timestampMaxDelta;
}
/**
* @return Fingerprint of identity to which this cert was issued
*/
ZT_INLINE const Fingerprint &issuedTo() const noexcept
{ return m_issuedTo; }
/**
* @return Fingerprint of identity to which this cert was issued
*/
ZT_INLINE const Fingerprint& issuedTo() const noexcept
{
return m_issuedTo;
}
/**
* @return Network ID for which this cert was issued
*/
ZT_INLINE uint64_t networkId() const noexcept
{ return m_networkId; }
/**
* @return Network ID for which this cert was issued
*/
ZT_INLINE uint64_t networkId() const noexcept
{
return m_networkId;
}
/**
* Compare two certificates for parameter agreement
*
* This compares this certificate with the other and returns true if all
* parameters in this cert are present in the other and if they agree to
* within this cert's max delta value for each given parameter.
*
* Tuples present in other but not in this cert are ignored, but any
* tuples present in this cert but not in other result in 'false'.
*
* @param other Cert to compare with
* @return True if certs agree and 'other' may be communicated with
*/
bool agreesWith(const MembershipCredential &other) const noexcept;
/**
* Compare two certificates for parameter agreement
*
* This compares this certificate with the other and returns true if all
* parameters in this cert are present in the other and if they agree to
* within this cert's max delta value for each given parameter.
*
* Tuples present in other but not in this cert are ignored, but any
* tuples present in this cert but not in other result in 'false'.
*
* @param other Cert to compare with
* @return True if certs agree and 'other' may be communicated with
*/
bool agreesWith(const MembershipCredential& other) const noexcept;
/**
* Sign this certificate
*
* @param with Identity to sign with, must include private key
* @return True if signature was successful
*/
bool sign(const Identity &with) noexcept;
/**
* Sign this certificate
*
* @param with Identity to sign with, must include private key
* @return True if signature was successful
*/
bool sign(const Identity& with) noexcept;
/**
* Verify this COM and its signature
*
* @param RR Runtime environment for looking up peers
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
*/
ZT_INLINE Credential::VerifyResult verify(const Context &ctx, const CallContext &cc) const
{ return s_verify(ctx, cc, *this); }
/**
* Verify this COM and its signature
*
* @param RR Runtime environment for looking up peers
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
*/
ZT_INLINE Credential::VerifyResult verify(const Context& ctx, const CallContext& cc) const
{
return s_verify(ctx, cc, *this);
}
static constexpr int marshalSizeMax() noexcept
{ return ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX; }
static constexpr int marshalSizeMax() noexcept
{
return ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX;
}
int marshal(uint8_t data[ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX], bool v2 = false) const noexcept;
int unmarshal(const uint8_t *data, int len) noexcept;
int marshal(uint8_t data[ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX], bool v2 = false) const noexcept;
int unmarshal(const uint8_t* data, int len) noexcept;
private:
unsigned int m_fillSigningBuf(uint64_t *buf) const noexcept;
private:
unsigned int m_fillSigningBuf(uint64_t* buf) const noexcept;
struct p_Qualifier
{
ZT_INLINE p_Qualifier() noexcept: id(0), value(0), delta(0)
{}
struct p_Qualifier {
ZT_INLINE p_Qualifier() noexcept
: id(0)
, value(0)
, delta(0)
{
}
ZT_INLINE p_Qualifier(const uint64_t id_, const uint64_t value_, const uint64_t delta_) noexcept: id(id_), value(value_), delta(delta_)
{}
ZT_INLINE p_Qualifier(const uint64_t id_, const uint64_t value_, const uint64_t delta_) noexcept
: id(id_)
, value(value_)
, delta(delta_)
{
}
uint64_t id;
uint64_t value;
uint64_t delta;
ZT_INLINE bool operator<(const p_Qualifier &q) const noexcept
{ return (id < q.id); } // sort order
};
uint64_t id;
uint64_t value;
uint64_t delta;
ZT_INLINE bool operator<(const p_Qualifier& q) const noexcept
{
return (id < q.id);
} // sort order
};
FCV< p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS > m_additionalQualifiers;
int64_t m_timestamp;
int64_t m_timestampMaxDelta;
uint64_t m_networkId;
Fingerprint m_issuedTo;
Address m_signedBy;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS> m_additionalQualifiers;
int64_t m_timestamp;
int64_t m_timestampMaxDelta;
uint64_t m_networkId;
Fingerprint m_issuedTo;
Address m_signedBy;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -34,59 +34,61 @@ namespace ZeroTier {
* @tparam TUNIT Unit of time in milliseconds (default: 1000 for one second)
* @tparam LSIZE Log size in units of time (default: 10 for 10s worth of data)
*/
template<int64_t TUNIT = 1000, unsigned long LSIZE = 10>
class Meter
{
public:
/**
* Create and initialize a new meter
*
* @param now Start time
*/
ZT_INLINE Meter() noexcept
{}
template <int64_t TUNIT = 1000, unsigned long LSIZE = 10> class Meter {
public:
/**
* Create and initialize a new meter
*
* @param now Start time
*/
ZT_INLINE Meter() noexcept
{
}
/**
* Add a measurement
*
* @param ts Timestamp for measurement
* @param count Count of items (usually bytes)
*/
ZT_INLINE void log(const int64_t ts, const uint64_t count) noexcept
{
// We log by choosing a log bucket based on the current time in units modulo
// the log size and then if it's a new bucket setting it or otherwise adding
// to it.
const unsigned long bucket = ((unsigned long)(ts / TUNIT)) % LSIZE;
if (unlikely(m_bucket.exchange(bucket, std::memory_order_relaxed) != bucket)) {
m_totalExclCounts.fetch_add(m_counts[bucket].exchange(count, std::memory_order_relaxed), std::memory_order_relaxed);
} else {
m_counts[bucket].fetch_add(count, std::memory_order_relaxed);
}
}
/**
* Add a measurement
*
* @param ts Timestamp for measurement
* @param count Count of items (usually bytes)
*/
ZT_INLINE void log(const int64_t ts, const uint64_t count) noexcept
{
// We log by choosing a log bucket based on the current time in units modulo
// the log size and then if it's a new bucket setting it or otherwise adding
// to it.
const unsigned long bucket = ((unsigned long)(ts / TUNIT)) % LSIZE;
if (unlikely(m_bucket.exchange(bucket, std::memory_order_relaxed) != bucket)) {
m_totalExclCounts.fetch_add(
m_counts[bucket].exchange(count, std::memory_order_relaxed),
std::memory_order_relaxed);
}
else {
m_counts[bucket].fetch_add(count, std::memory_order_relaxed);
}
}
/**
* Get rate per TUNIT time
*
* @param now Current time
* @param rate Result parameter: rate in count/TUNIT
* @param total Total count for life of object
*/
ZT_INLINE void rate(double &rate, uint64_t &total) const noexcept
{
total = 0;
for (unsigned long i = 0;i < LSIZE;++i)
total += m_counts[i].load(std::memory_order_relaxed);
rate = (double) total / (double) LSIZE;
total += m_totalExclCounts.load(std::memory_order_relaxed);
}
/**
* Get rate per TUNIT time
*
* @param now Current time
* @param rate Result parameter: rate in count/TUNIT
* @param total Total count for life of object
*/
ZT_INLINE void rate(double& rate, uint64_t& total) const noexcept
{
total = 0;
for (unsigned long i = 0; i < LSIZE; ++i)
total += m_counts[i].load(std::memory_order_relaxed);
rate = (double)total / (double)LSIZE;
total += m_totalExclCounts.load(std::memory_order_relaxed);
}
private:
std::atomic<uint64_t> m_counts[LSIZE];
std::atomic<uint64_t> m_totalExclCounts;
std::atomic<unsigned long> m_bucket;
private:
std::atomic<uint64_t> m_counts[LSIZE];
std::atomic<uint64_t> m_totalExclCounts;
std::atomic<unsigned long> m_bucket;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -15,10 +15,10 @@
#define ZT_MULTICASTGROUP_HPP
#include "Constants.hpp"
#include "MAC.hpp"
#include "InetAddress.hpp"
#include "Utils.hpp"
#include "MAC.hpp"
#include "TriviallyCopyable.hpp"
#include "Utils.hpp"
namespace ZeroTier {
@ -37,84 +37,108 @@ namespace ZeroTier {
*
* MulticastGroup behaves as an immutable value object.
*/
class MulticastGroup : public TriviallyCopyable
{
public:
ZT_INLINE MulticastGroup() noexcept: m_mac(), m_adi(0)
{}
class MulticastGroup : public TriviallyCopyable {
public:
ZT_INLINE MulticastGroup() noexcept
: m_mac()
, m_adi(0)
{
}
ZT_INLINE MulticastGroup(const MAC &m, uint32_t a) noexcept: m_mac(m), m_adi(a)
{}
ZT_INLINE MulticastGroup(const MAC& m, uint32_t a) noexcept
: m_mac(m)
, m_adi(a)
{
}
/**
* Derive the multicast group used for address resolution (ARP/NDP) for an IP
*
* @param ip IP address (port field is ignored)
* @return Multicast group for ARP/NDP
*/
static ZT_INLINE MulticastGroup deriveMulticastGroupForAddressResolution(const InetAddress &ip) noexcept
{
if (ip.isV4()) {
// IPv4 wants broadcast MACs, so we shove the V4 address itself into
// the Multicast Group ADI field. Making V4 ARP work is basically why
// ADI was added, as well as handling other things that want mindless
// Ethernet broadcast to all.
return MulticastGroup(MAC(0xffffffffffffULL), Utils::ntoh(*((const uint32_t *)ip.rawIpData())));
} else if (ip.isV6()) {
// IPv6 is better designed in this respect. We can compute the IPv6
// multicast address directly from the IP address, and it gives us
// 24 bits of uniqueness. Collisions aren't likely to be common enough
// to care about.
const uint8_t *const a = reinterpret_cast<const uint8_t *>(ip.rawIpData()); // NOLINT(hicpp-use-auto,modernize-use-auto)
return MulticastGroup(MAC(0x33, 0x33, 0xff, a[13], a[14], a[15]), 0);
}
return MulticastGroup(); // NOLINT(modernize-return-braced-init-list)
}
/**
* Derive the multicast group used for address resolution (ARP/NDP) for an IP
*
* @param ip IP address (port field is ignored)
* @return Multicast group for ARP/NDP
*/
static ZT_INLINE MulticastGroup deriveMulticastGroupForAddressResolution(const InetAddress& ip) noexcept
{
if (ip.isV4()) {
// IPv4 wants broadcast MACs, so we shove the V4 address itself into
// the Multicast Group ADI field. Making V4 ARP work is basically why
// ADI was added, as well as handling other things that want mindless
// Ethernet broadcast to all.
return MulticastGroup(MAC(0xffffffffffffULL), Utils::ntoh(*((const uint32_t*)ip.rawIpData())));
}
else if (ip.isV6()) {
// IPv6 is better designed in this respect. We can compute the IPv6
// multicast address directly from the IP address, and it gives us
// 24 bits of uniqueness. Collisions aren't likely to be common enough
// to care about.
const uint8_t* const a =
reinterpret_cast<const uint8_t*>(ip.rawIpData()); // NOLINT(hicpp-use-auto,modernize-use-auto)
return MulticastGroup(MAC(0x33, 0x33, 0xff, a[13], a[14], a[15]), 0);
}
return MulticastGroup(); // NOLINT(modernize-return-braced-init-list)
}
/**
* @return Ethernet MAC portion of multicast group
*/
ZT_INLINE const MAC &mac() const noexcept
{ return m_mac; }
/**
* @return Ethernet MAC portion of multicast group
*/
ZT_INLINE const MAC& mac() const noexcept
{
return m_mac;
}
/**
* @return Additional distinguishing information, which is normally zero except for IPv4 ARP where it's the IPv4 address
*/
ZT_INLINE uint32_t adi() const
{ return m_adi; }
/**
* @return Additional distinguishing information, which is normally zero except for IPv4 ARP where it's the IPv4
* address
*/
ZT_INLINE uint32_t adi() const
{
return m_adi;
}
ZT_INLINE bool operator==(const MulticastGroup &g) const noexcept
{ return ((m_mac == g.m_mac) && (m_adi == g.m_adi)); }
ZT_INLINE bool operator==(const MulticastGroup& g) const noexcept
{
return ((m_mac == g.m_mac) && (m_adi == g.m_adi));
}
ZT_INLINE bool operator!=(const MulticastGroup &g) const noexcept
{ return ((m_mac != g.m_mac) || (m_adi != g.m_adi)); }
ZT_INLINE bool operator!=(const MulticastGroup& g) const noexcept
{
return ((m_mac != g.m_mac) || (m_adi != g.m_adi));
}
ZT_INLINE bool operator<(const MulticastGroup &g) const noexcept
{
if (m_mac < g.m_mac)
return true;
else if (m_mac == g.m_mac)
return (m_adi < g.m_adi);
return false;
}
ZT_INLINE bool operator<(const MulticastGroup& g) const noexcept
{
if (m_mac < g.m_mac)
return true;
else if (m_mac == g.m_mac)
return (m_adi < g.m_adi);
return false;
}
ZT_INLINE bool operator>(const MulticastGroup &g) const noexcept
{ return (g < *this); }
ZT_INLINE bool operator>(const MulticastGroup& g) const noexcept
{
return (g < *this);
}
ZT_INLINE bool operator<=(const MulticastGroup &g) const noexcept
{ return !(g < *this); }
ZT_INLINE bool operator<=(const MulticastGroup& g) const noexcept
{
return ! (g < *this);
}
ZT_INLINE bool operator>=(const MulticastGroup &g) const noexcept
{ return !(*this < g); }
ZT_INLINE bool operator>=(const MulticastGroup& g) const noexcept
{
return ! (*this < g);
}
ZT_INLINE unsigned long hashCode() const noexcept
{ return (m_mac.hashCode() + (unsigned long)m_adi); }
ZT_INLINE unsigned long hashCode() const noexcept
{
return (m_mac.hashCode() + (unsigned long)m_adi);
}
private:
MAC m_mac;
uint32_t m_adi;
private:
MAC m_mac;
uint32_t m_adi;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -36,122 +36,242 @@ namespace ZeroTier {
/**
* A simple mutual exclusion lock.
*/
class Mutex
{
public:
class Mutex {
public:
#ifdef ZT_USE_PTHREADS
ZT_INLINE Mutex() noexcept { pthread_mutex_init(&_mh,nullptr); }
ZT_INLINE ~Mutex() noexcept { pthread_mutex_destroy(&_mh); }
ZT_INLINE void lock() const noexcept { pthread_mutex_lock(&((const_cast <Mutex *> (this))->_mh)); }
ZT_INLINE void unlock() const noexcept { pthread_mutex_unlock(&((const_cast <Mutex *> (this))->_mh)); }
ZT_INLINE Mutex() noexcept
{
pthread_mutex_init(&_mh, nullptr);
}
ZT_INLINE ~Mutex() noexcept
{
pthread_mutex_destroy(&_mh);
}
ZT_INLINE void lock() const noexcept
{
pthread_mutex_lock(&((const_cast<Mutex*>(this))->_mh));
}
ZT_INLINE void unlock() const noexcept
{
pthread_mutex_unlock(&((const_cast<Mutex*>(this))->_mh));
}
#else
ZT_INLINE Mutex() noexcept : _m() {}
ZT_INLINE void lock() const noexcept { const_cast<Mutex *>(this)->_m.lock(); }
ZT_INLINE void unlock() const noexcept { const_cast<Mutex *>(this)->_m.unlock(); }
ZT_INLINE Mutex() noexcept : _m()
{
}
ZT_INLINE void lock() const noexcept
{
const_cast<Mutex*>(this)->_m.lock();
}
ZT_INLINE void unlock() const noexcept
{
const_cast<Mutex*>(this)->_m.unlock();
}
#endif
class Lock
{
public:
explicit ZT_INLINE Lock(Mutex &m) noexcept : _m(&m) { m.lock(); }
explicit ZT_INLINE Lock(const Mutex &m) noexcept : _m(const_cast<Mutex *>(&m)) { _m->lock(); }
ZT_INLINE ~Lock() { _m->unlock(); }
private:
Mutex *const _m;
};
class Lock {
public:
explicit ZT_INLINE Lock(Mutex& m) noexcept : _m(&m)
{
m.lock();
}
explicit ZT_INLINE Lock(const Mutex& m) noexcept : _m(const_cast<Mutex*>(&m))
{
_m->lock();
}
ZT_INLINE ~Lock()
{
_m->unlock();
}
private:
ZT_INLINE Mutex(const Mutex &) noexcept {}
ZT_INLINE const Mutex &operator=(const Mutex &) noexcept { return *this; }
private:
Mutex* const _m;
};
private:
ZT_INLINE Mutex(const Mutex&) noexcept
{
}
ZT_INLINE const Mutex& operator=(const Mutex&) noexcept
{
return *this;
}
#ifdef ZT_USE_PTHREADS
pthread_mutex_t _mh;
pthread_mutex_t _mh;
#else
std::mutex _m;
std::mutex _m;
#endif
};
/**
* A lock allowing multiple threads to read but making all wait on any writing thread.
*/
class RWMutex
{
public:
class RWMutex {
public:
#ifdef ZT_USE_PTHREADS
ZT_INLINE RWMutex() noexcept { pthread_rwlock_init(&_mh,nullptr); }
ZT_INLINE ~RWMutex() noexcept { pthread_rwlock_destroy(&_mh); }
ZT_INLINE void lock() const noexcept { pthread_rwlock_wrlock(&((const_cast <RWMutex *> (this))->_mh)); }
ZT_INLINE void rlock() const noexcept { pthread_rwlock_rdlock(&((const_cast <RWMutex *> (this))->_mh)); }
ZT_INLINE void unlock() const noexcept { pthread_rwlock_unlock(&((const_cast <RWMutex *> (this))->_mh)); }
ZT_INLINE void runlock() const noexcept { pthread_rwlock_unlock(&((const_cast <RWMutex *> (this))->_mh)); }
ZT_INLINE RWMutex() noexcept
{
pthread_rwlock_init(&_mh, nullptr);
}
ZT_INLINE ~RWMutex() noexcept
{
pthread_rwlock_destroy(&_mh);
}
ZT_INLINE void lock() const noexcept
{
pthread_rwlock_wrlock(&((const_cast<RWMutex*>(this))->_mh));
}
ZT_INLINE void rlock() const noexcept
{
pthread_rwlock_rdlock(&((const_cast<RWMutex*>(this))->_mh));
}
ZT_INLINE void unlock() const noexcept
{
pthread_rwlock_unlock(&((const_cast<RWMutex*>(this))->_mh));
}
ZT_INLINE void runlock() const noexcept
{
pthread_rwlock_unlock(&((const_cast<RWMutex*>(this))->_mh));
}
#else
ZT_INLINE RWMutex() noexcept : _m() {}
ZT_INLINE void lock() const noexcept { const_cast<RWMutex *>(this)->_m.lock(); }
ZT_INLINE void rlock() const noexcept { const_cast<RWMutex *>(this)->_m.lock_shared(); }
ZT_INLINE void unlock() const noexcept { const_cast<RWMutex *>(this)->_m.unlock(); }
ZT_INLINE void runlock() const noexcept { const_cast<RWMutex *>(this)->_m.unlock_shared(); }
ZT_INLINE RWMutex() noexcept : _m()
{
}
ZT_INLINE void lock() const noexcept
{
const_cast<RWMutex*>(this)->_m.lock();
}
ZT_INLINE void rlock() const noexcept
{
const_cast<RWMutex*>(this)->_m.lock_shared();
}
ZT_INLINE void unlock() const noexcept
{
const_cast<RWMutex*>(this)->_m.unlock();
}
ZT_INLINE void runlock() const noexcept
{
const_cast<RWMutex*>(this)->_m.unlock_shared();
}
#endif
/**
* RAAI locker that acquires only the read lock (shared read)
*/
class RLock
{
public:
explicit ZT_INLINE RLock(RWMutex &m) noexcept : _m(&m) { m.rlock(); }
explicit ZT_INLINE RLock(const RWMutex &m) noexcept : _m(const_cast<RWMutex *>(&m)) { _m->rlock(); }
ZT_INLINE ~RLock() { _m->runlock(); }
private:
RWMutex *const _m;
};
/**
* RAAI locker that acquires only the read lock (shared read)
*/
class RLock {
public:
explicit ZT_INLINE RLock(RWMutex& m) noexcept : _m(&m)
{
m.rlock();
}
explicit ZT_INLINE RLock(const RWMutex& m) noexcept : _m(const_cast<RWMutex*>(&m))
{
_m->rlock();
}
ZT_INLINE ~RLock()
{
_m->runlock();
}
/**
* RAAI locker that acquires the write lock (exclusive write, no readers)
*/
class Lock
{
public:
explicit ZT_INLINE Lock(RWMutex &m) noexcept : _m(&m) { m.lock(); }
explicit ZT_INLINE Lock(const RWMutex &m) noexcept : _m(const_cast<RWMutex *>(&m)) { _m->lock(); }
ZT_INLINE ~Lock() { _m->unlock(); }
private:
RWMutex *const _m;
};
private:
RWMutex* const _m;
};
/**
* RAAI locker that acquires the read lock first and can switch to writing.
*
* Use writing() to acquire the write lock if not already acquired. Use reading() to
* let go of the write lock and go back to only holding the read lock. Note that on
* most platforms there's a brief moment where the lock is unlocked during the
* transition, meaning protected variable states can change. Code must not assume
* that the lock is held constantly if writing() is used to change mode.
*/
class RMaybeWLock
{
public:
explicit ZT_INLINE RMaybeWLock(RWMutex &m) noexcept : _m(&m),_w(false) { m.rlock(); }
explicit ZT_INLINE RMaybeWLock(const RWMutex &m) noexcept : _m(const_cast<RWMutex *>(&m)),_w(false) { _m->rlock(); }
ZT_INLINE void writing() noexcept { if (!_w) { _w = true; _m->runlock(); _m->lock(); } }
ZT_INLINE void reading() noexcept { if (_w) { _w = false; _m->unlock(); _m->rlock(); } }
ZT_INLINE bool isWriting() const noexcept { return _w; }
ZT_INLINE ~RMaybeWLock() { if (_w) _m->unlock(); else _m->runlock(); }
private:
RWMutex *const _m;
bool _w;
};
/**
* RAAI locker that acquires the write lock (exclusive write, no readers)
*/
class Lock {
public:
explicit ZT_INLINE Lock(RWMutex& m) noexcept : _m(&m)
{
m.lock();
}
explicit ZT_INLINE Lock(const RWMutex& m) noexcept : _m(const_cast<RWMutex*>(&m))
{
_m->lock();
}
ZT_INLINE ~Lock()
{
_m->unlock();
}
private:
ZT_INLINE RWMutex(const RWMutex &) noexcept {}
ZT_INLINE const RWMutex &operator=(const RWMutex &) noexcept { return *this; }
private:
RWMutex* const _m;
};
/**
* RAAI locker that acquires the read lock first and can switch to writing.
*
* Use writing() to acquire the write lock if not already acquired. Use reading() to
* let go of the write lock and go back to only holding the read lock. Note that on
* most platforms there's a brief moment where the lock is unlocked during the
* transition, meaning protected variable states can change. Code must not assume
* that the lock is held constantly if writing() is used to change mode.
*/
class RMaybeWLock {
public:
explicit ZT_INLINE RMaybeWLock(RWMutex& m) noexcept
: _m(&m)
, _w(false)
{
m.rlock();
}
explicit ZT_INLINE RMaybeWLock(const RWMutex& m) noexcept
: _m(const_cast<RWMutex*>(&m))
, _w(false)
{
_m->rlock();
}
ZT_INLINE void writing() noexcept
{
if (! _w) {
_w = true;
_m->runlock();
_m->lock();
}
}
ZT_INLINE void reading() noexcept
{
if (_w) {
_w = false;
_m->unlock();
_m->rlock();
}
}
ZT_INLINE bool isWriting() const noexcept
{
return _w;
}
ZT_INLINE ~RMaybeWLock()
{
if (_w)
_m->unlock();
else
_m->runlock();
}
private:
RWMutex* const _m;
bool _w;
};
private:
ZT_INLINE RWMutex(const RWMutex&) noexcept
{
}
ZT_INLINE const RWMutex& operator=(const RWMutex&) noexcept
{
return *this;
}
#ifdef ZT_USE_PTHREADS
pthread_rwlock_t _mh;
pthread_rwlock_t _mh;
#else
std::shared_mutex _m;
std::shared_mutex _m;
#endif
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

File diff suppressed because it is too large Load diff

View file

@ -14,19 +14,19 @@
#ifndef ZT_NETWORK_HPP
#define ZT_NETWORK_HPP
#include "Constants.hpp"
#include "Address.hpp"
#include "Mutex.hpp"
#include "SharedPtr.hpp"
#include "MulticastGroup.hpp"
#include "MAC.hpp"
#include "Buf.hpp"
#include "Dictionary.hpp"
#include "Member.hpp"
#include "NetworkConfig.hpp"
#include "MembershipCredential.hpp"
#include "Containers.hpp"
#include "CallContext.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Dictionary.hpp"
#include "MAC.hpp"
#include "Member.hpp"
#include "MembershipCredential.hpp"
#include "MulticastGroup.hpp"
#include "Mutex.hpp"
#include "NetworkConfig.hpp"
#include "SharedPtr.hpp"
#define ZT_NETWORK_MAX_INCOMING_UPDATES 3
@ -39,349 +39,375 @@ class Peer;
/**
* A virtual LAN
*/
class Network
{
friend class SharedPtr< Network >;
class Network {
friend class SharedPtr<Network>;
public:
/**
* Broadcast multicast group: ff:ff:ff:ff:ff:ff / 0
*/
static const MulticastGroup BROADCAST;
public:
/**
* Broadcast multicast group: ff:ff:ff:ff:ff:ff / 0
*/
static const MulticastGroup BROADCAST;
/**
* Compute primary controller device ID from network ID
*/
static ZT_INLINE Address controllerFor(uint64_t nwid) noexcept
{ return Address(nwid >> 24U); }
/**
* Compute primary controller device ID from network ID
*/
static ZT_INLINE Address controllerFor(uint64_t nwid) noexcept
{
return Address(nwid >> 24U);
}
/**
* Construct a new network
*
* Note that init() should be called immediately after the network is
* constructed to actually configure the port.
*
* @param nwid Network ID
* @param controllerFingerprint Initial controller fingerprint if non-NULL
* @param uptr Arbitrary pointer used by externally-facing API (for user use)
* @param nconf Network config, if known
*/
Network(
const Context &ctx,
const CallContext &cc,
uint64_t nwid,
const Fingerprint &controllerFingerprint,
void *uptr,
const NetworkConfig *nconf);
/**
* Construct a new network
*
* Note that init() should be called immediately after the network is
* constructed to actually configure the port.
*
* @param nwid Network ID
* @param controllerFingerprint Initial controller fingerprint if non-NULL
* @param uptr Arbitrary pointer used by externally-facing API (for user use)
* @param nconf Network config, if known
*/
Network(
const Context& ctx,
const CallContext& cc,
uint64_t nwid,
const Fingerprint& controllerFingerprint,
void* uptr,
const NetworkConfig* nconf);
~Network();
~Network();
ZT_INLINE uint64_t id() const noexcept
{ return m_id; }
ZT_INLINE uint64_t id() const noexcept
{
return m_id;
}
ZT_INLINE Address controller() const noexcept
{ return Address(m_id >> 24U); }
ZT_INLINE Address controller() const noexcept
{
return Address(m_id >> 24U);
}
ZT_INLINE bool multicastEnabled() const noexcept
{ return (m_config.multicastLimit > 0); }
ZT_INLINE bool multicastEnabled() const noexcept
{
return (m_config.multicastLimit > 0);
}
ZT_INLINE bool hasConfig() const noexcept
{ return (m_config); }
ZT_INLINE bool hasConfig() const noexcept
{
return (m_config);
}
ZT_INLINE uint64_t lastConfigUpdate() const noexcept
{ return m_lastConfigUpdate; }
ZT_INLINE uint64_t lastConfigUpdate() const noexcept
{
return m_lastConfigUpdate;
}
ZT_INLINE ZT_VirtualNetworkStatus status() const noexcept
{ return m_status(); }
ZT_INLINE ZT_VirtualNetworkStatus status() const noexcept
{
return m_status();
}
ZT_INLINE const NetworkConfig &config() const noexcept
{ return m_config; }
ZT_INLINE const NetworkConfig& config() const noexcept
{
return m_config;
}
ZT_INLINE const MAC &mac() const noexcept
{ return m_mac; }
ZT_INLINE const MAC& mac() const noexcept
{
return m_mac;
}
/**
* Apply filters to an outgoing packet
*
* This applies filters from our network config and, if that doesn't match,
* our capabilities in ascending order of capability ID. Additional actions
* such as TEE may be taken, and credentials may be pushed, so this is not
* side-effect-free. It's basically step one in sending something over VL2.
*
* @param noTee If true, do not TEE anything anywhere (for two-pass filtering as done with multicast and bridging)
* @param ztSource Source ZeroTier address
* @param ztDest Destination ZeroTier address
* @param macSource Ethernet layer source address
* @param macDest Ethernet layer destination address
* @param frameData Ethernet frame data
* @param frameLen Ethernet frame payload length
* @param etherType 16-bit ethernet type ID
* @param vlanId 16-bit VLAN ID
* @return True if packet should be sent, false if dropped or redirected
*/
bool filterOutgoingPacket(
const CallContext &cc,
bool noTee,
const Address &ztSource,
const Address &ztDest,
const MAC &macSource,
const MAC &macDest,
const uint8_t *frameData,
unsigned int frameLen,
unsigned int etherType,
unsigned int vlanId,
uint8_t &qosBucket);
/**
* Apply filters to an outgoing packet
*
* This applies filters from our network config and, if that doesn't match,
* our capabilities in ascending order of capability ID. Additional actions
* such as TEE may be taken, and credentials may be pushed, so this is not
* side-effect-free. It's basically step one in sending something over VL2.
*
* @param noTee If true, do not TEE anything anywhere (for two-pass filtering as done with multicast and bridging)
* @param ztSource Source ZeroTier address
* @param ztDest Destination ZeroTier address
* @param macSource Ethernet layer source address
* @param macDest Ethernet layer destination address
* @param frameData Ethernet frame data
* @param frameLen Ethernet frame payload length
* @param etherType 16-bit ethernet type ID
* @param vlanId 16-bit VLAN ID
* @return True if packet should be sent, false if dropped or redirected
*/
bool filterOutgoingPacket(
const CallContext& cc,
bool noTee,
const Address& ztSource,
const Address& ztDest,
const MAC& macSource,
const MAC& macDest,
const uint8_t* frameData,
unsigned int frameLen,
unsigned int etherType,
unsigned int vlanId,
uint8_t& qosBucket);
/**
* Apply filters to an incoming packet
*
* This applies filters from our network config and, if that doesn't match,
* the peer's capabilities in ascending order of capability ID. If there is
* a match certain actions may be taken such as sending a copy of the packet
* to a TEE or REDIRECT target.
*
* @param sourcePeer Source Peer
* @param ztDest Destination ZeroTier address
* @param macSource Ethernet layer source address
* @param macDest Ethernet layer destination address
* @param frameData Ethernet frame data
* @param frameLen Ethernet frame payload length
* @param etherType 16-bit ethernet type ID
* @param vlanId 16-bit VLAN ID
* @return 0 == drop, 1 == accept, 2 == accept even if bridged
*/
int filterIncomingPacket(
const CallContext &cc,
const SharedPtr< Peer > &sourcePeer,
const Address &ztDest,
const MAC &macSource,
const MAC &macDest,
const uint8_t *frameData,
unsigned int frameLen,
unsigned int etherType,
unsigned int vlanId);
/**
* Apply filters to an incoming packet
*
* This applies filters from our network config and, if that doesn't match,
* the peer's capabilities in ascending order of capability ID. If there is
* a match certain actions may be taken such as sending a copy of the packet
* to a TEE or REDIRECT target.
*
* @param sourcePeer Source Peer
* @param ztDest Destination ZeroTier address
* @param macSource Ethernet layer source address
* @param macDest Ethernet layer destination address
* @param frameData Ethernet frame data
* @param frameLen Ethernet frame payload length
* @param etherType 16-bit ethernet type ID
* @param vlanId 16-bit VLAN ID
* @return 0 == drop, 1 == accept, 2 == accept even if bridged
*/
int filterIncomingPacket(
const CallContext& cc,
const SharedPtr<Peer>& sourcePeer,
const Address& ztDest,
const MAC& macSource,
const MAC& macDest,
const uint8_t* frameData,
unsigned int frameLen,
unsigned int etherType,
unsigned int vlanId);
/**
* Subscribe to a multicast group
*
* @param mg New multicast group
*/
void multicastSubscribe(const CallContext &cc, const MulticastGroup &mg);
/**
* Subscribe to a multicast group
*
* @param mg New multicast group
*/
void multicastSubscribe(const CallContext& cc, const MulticastGroup& mg);
/**
* Unsubscribe from a multicast group
*
* @param mg Multicast group
*/
void multicastUnsubscribe(const MulticastGroup &mg);
/**
* Unsubscribe from a multicast group
*
* @param mg Multicast group
*/
void multicastUnsubscribe(const MulticastGroup& mg);
/**
* Parse, verify, and handle an inbound network config chunk
*
* This is called from IncomingPacket to handle incoming network config
* chunks via OK(NETWORK_CONFIG_REQUEST) or NETWORK_CONFIG. It's a common
* bit of packet parsing code that also verifies chunks and replicates
* them (via rumor mill flooding) if their fast propagate flag is set.
*
* @param packetId Packet ID or 0 if none (e.g. via cluster path)
* @param source Peer that actually sent this chunk (probably controller)
* @param chunk Buffer containing chunk
* @param ptr Index of chunk and related fields in packet (starting with network ID)
* @param size Size of data in chunk buffer (total, not relative to ptr)
* @return Update ID if update was fully assembled and accepted or 0 otherwise
*/
uint64_t handleConfigChunk(
const CallContext &cc,
uint64_t packetId,
const SharedPtr< Peer > &source,
const Buf &chunk,
int ptr,
int size);
/**
* Parse, verify, and handle an inbound network config chunk
*
* This is called from IncomingPacket to handle incoming network config
* chunks via OK(NETWORK_CONFIG_REQUEST) or NETWORK_CONFIG. It's a common
* bit of packet parsing code that also verifies chunks and replicates
* them (via rumor mill flooding) if their fast propagate flag is set.
*
* @param packetId Packet ID or 0 if none (e.g. via cluster path)
* @param source Peer that actually sent this chunk (probably controller)
* @param chunk Buffer containing chunk
* @param ptr Index of chunk and related fields in packet (starting with network ID)
* @param size Size of data in chunk buffer (total, not relative to ptr)
* @return Update ID if update was fully assembled and accepted or 0 otherwise
*/
uint64_t handleConfigChunk(
const CallContext& cc,
uint64_t packetId,
const SharedPtr<Peer>& source,
const Buf& chunk,
int ptr,
int size);
/**
* Set network configuration
*
* This is normally called internally when a configuration is received
* and fully assembled, but it can also be called on Node startup when
* cached configurations are re-read from the data store.
*
* @param nconf Network configuration
* @param saveToDisk Save to disk? Used during loading, should usually be true otherwise.
* @return 0 == bad, 1 == accepted but duplicate/unchanged, 2 == accepted and new
*/
int setConfiguration(
const CallContext &cc,
const NetworkConfig &nconf,
bool saveToDisk);
/**
* Set network configuration
*
* This is normally called internally when a configuration is received
* and fully assembled, but it can also be called on Node startup when
* cached configurations are re-read from the data store.
*
* @param nconf Network configuration
* @param saveToDisk Save to disk? Used during loading, should usually be true otherwise.
* @return 0 == bad, 1 == accepted but duplicate/unchanged, 2 == accepted and new
*/
int setConfiguration(const CallContext& cc, const NetworkConfig& nconf, bool saveToDisk);
/**
* Set netconf failure to 'access denied' -- called in IncomingPacket when controller reports this
*/
ZT_INLINE void setAccessDenied() noexcept
{ _netconfFailure = NETCONF_FAILURE_ACCESS_DENIED; }
/**
* Set netconf failure to 'access denied' -- called in IncomingPacket when controller reports this
*/
ZT_INLINE void setAccessDenied() noexcept
{
_netconfFailure = NETCONF_FAILURE_ACCESS_DENIED;
}
/**
* Set netconf failure to 'not found' -- called by IncomingPacket when controller reports this
*/
ZT_INLINE void setNotFound() noexcept
{ _netconfFailure = NETCONF_FAILURE_NOT_FOUND; }
/**
* Set netconf failure to 'not found' -- called by IncomingPacket when controller reports this
*/
ZT_INLINE void setNotFound() noexcept
{
_netconfFailure = NETCONF_FAILURE_NOT_FOUND;
}
/**
* Determine whether this peer is permitted to communicate on this network
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param peer Peer to check
*/
bool gate(void *tPtr, const SharedPtr< Peer > &peer) noexcept;
/**
* Determine whether this peer is permitted to communicate on this network
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param peer Peer to check
*/
bool gate(void* tPtr, const SharedPtr<Peer>& peer) noexcept;
/**
* Do periodic cleanup and housekeeping tasks
*/
void doPeriodicTasks(const CallContext &cc);
/**
* Do periodic cleanup and housekeeping tasks
*/
void doPeriodicTasks(const CallContext& cc);
/**
* Find the node on this network that has this MAC behind it (if any)
*
* @param mac MAC address
* @return ZeroTier address of bridge to this MAC
*/
ZT_INLINE Address findBridgeTo(const MAC &mac) const
{
Mutex::Lock _l(m_remoteBridgeRoutes_l);
Map< MAC, Address >::const_iterator br(m_remoteBridgeRoutes.find(mac));
return ((br == m_remoteBridgeRoutes.end()) ? Address() : br->second);
}
/**
* Find the node on this network that has this MAC behind it (if any)
*
* @param mac MAC address
* @return ZeroTier address of bridge to this MAC
*/
ZT_INLINE Address findBridgeTo(const MAC& mac) const
{
Mutex::Lock _l(m_remoteBridgeRoutes_l);
Map<MAC, Address>::const_iterator br(m_remoteBridgeRoutes.find(mac));
return ((br == m_remoteBridgeRoutes.end()) ? Address() : br->second);
}
/**
* Set a bridge route
*
* @param mac MAC address of destination
* @param addr Bridge this MAC is reachable behind
*/
void learnBridgeRoute(const MAC &mac, const Address &addr);
/**
* Set a bridge route
*
* @param mac MAC address of destination
* @param addr Bridge this MAC is reachable behind
*/
void learnBridgeRoute(const MAC& mac, const Address& addr);
/**
* Learn a multicast group that is bridged to our tap device
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param mg Multicast group
* @param now Current time
*/
ZT_INLINE void learnBridgedMulticastGroup(const MulticastGroup &mg, int64_t now)
{
Mutex::Lock l(m_myMulticastGroups_l);
m_multicastGroupsBehindMe[mg] = now;
}
/**
* Learn a multicast group that is bridged to our tap device
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param mg Multicast group
* @param now Current time
*/
ZT_INLINE void learnBridgedMulticastGroup(const MulticastGroup& mg, int64_t now)
{
Mutex::Lock l(m_myMulticastGroups_l);
m_multicastGroupsBehindMe[mg] = now;
}
/**
* Validate a credential and learn it if it passes certificate and other checks
*/
Member::AddCredentialResult addCredential(const CallContext &cc, const Identity &sourcePeerIdentity, const MembershipCredential &com);
/**
* Validate a credential and learn it if it passes certificate and other checks
*/
Member::AddCredentialResult
addCredential(const CallContext& cc, const Identity& sourcePeerIdentity, const MembershipCredential& com);
/**
* Validate a credential and learn it if it passes certificate and other checks
*/
Member::AddCredentialResult addCredential(const CallContext &cc, const Identity &sourcePeerIdentity, const CapabilityCredential &cap);
/**
* Validate a credential and learn it if it passes certificate and other checks
*/
Member::AddCredentialResult
addCredential(const CallContext& cc, const Identity& sourcePeerIdentity, const CapabilityCredential& cap);
/**
* Validate a credential and learn it if it passes certificate and other checks
*/
Member::AddCredentialResult addCredential(const CallContext &cc, const Identity &sourcePeerIdentity, const TagCredential &tag);
/**
* Validate a credential and learn it if it passes certificate and other checks
*/
Member::AddCredentialResult
addCredential(const CallContext& cc, const Identity& sourcePeerIdentity, const TagCredential& tag);
/**
* Validate a credential and learn it if it passes certificate and other checks
*/
Member::AddCredentialResult addCredential(const CallContext &cc, const Identity &sourcePeerIdentity, const RevocationCredential &rev);
/**
* Validate a credential and learn it if it passes certificate and other checks
*/
Member::AddCredentialResult
addCredential(const CallContext& cc, const Identity& sourcePeerIdentity, const RevocationCredential& rev);
/**
* Validate a credential and learn it if it passes certificate and other checks
*/
Member::AddCredentialResult addCredential(const CallContext &cc, const Identity &sourcePeerIdentity, const OwnershipCredential &coo);
/**
* Validate a credential and learn it if it passes certificate and other checks
*/
Member::AddCredentialResult
addCredential(const CallContext& cc, const Identity& sourcePeerIdentity, const OwnershipCredential& coo);
/**
* Push credentials to a peer if timeouts indicate that we should do so
*
* @param to Destination peer
*/
void pushCredentials(const CallContext &cc, const SharedPtr< Peer > &to);
/**
* Push credentials to a peer if timeouts indicate that we should do so
*
* @param to Destination peer
*/
void pushCredentials(const CallContext& cc, const SharedPtr<Peer>& to);
/**
* Destroy this network
*
* This sets the network to completely remove itself on delete. This also prevents the
* call of the normal port shutdown event on delete.
*/
void destroy();
/**
* Destroy this network
*
* This sets the network to completely remove itself on delete. This also prevents the
* call of the normal port shutdown event on delete.
*/
void destroy();
/**
* Get this network's config for export via the ZT core API
*
* @param ec Buffer to fill with externally-visible network configuration
*/
void externalConfig(ZT_VirtualNetworkConfig *ec) const;
/**
* Get this network's config for export via the ZT core API
*
* @param ec Buffer to fill with externally-visible network configuration
*/
void externalConfig(ZT_VirtualNetworkConfig* ec) const;
/**
* Iterate through memberships
*
* @param f Function of (const Address,const Membership)
*/
template< typename F >
ZT_INLINE void eachMember(F f)
{
Mutex::Lock ml(m_memberships_l);
for (Map< Address, Member >::iterator i(m_memberships.begin()); i != m_memberships.end(); ++i) { // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
if (!f(i->first, i->second))
break;
}
}
/**
* Iterate through memberships
*
* @param f Function of (const Address,const Membership)
*/
template <typename F> ZT_INLINE void eachMember(F f)
{
Mutex::Lock ml(m_memberships_l);
for (Map<Address, Member>::iterator i(m_memberships.begin()); i != m_memberships.end();
++i) { // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
if (! f(i->first, i->second))
break;
}
}
/**
* @return Externally usable pointer-to-pointer exported via the core API
*/
ZT_INLINE void **userPtr() noexcept
{ return &m_uPtr; }
/**
* @return Externally usable pointer-to-pointer exported via the core API
*/
ZT_INLINE void** userPtr() noexcept
{
return &m_uPtr;
}
private:
void m_requestConfiguration(const CallContext &cc);
ZT_VirtualNetworkStatus m_status() const;
void m_externalConfig(ZT_VirtualNetworkConfig *ec) const; // assumes _lock is locked
void m_announceMulticastGroups(void *tPtr, bool force);
void m_announceMulticastGroupsTo(void *tPtr, const Address &peer, const Vector< MulticastGroup > &allMulticastGroups);
Vector< MulticastGroup > m_allMulticastGroups() const;
private:
void m_requestConfiguration(const CallContext& cc);
ZT_VirtualNetworkStatus m_status() const;
void m_externalConfig(ZT_VirtualNetworkConfig* ec) const; // assumes _lock is locked
void m_announceMulticastGroups(void* tPtr, bool force);
void m_announceMulticastGroupsTo(void* tPtr, const Address& peer, const Vector<MulticastGroup>& allMulticastGroups);
Vector<MulticastGroup> m_allMulticastGroups() const;
const Context &m_ctx;
void *m_uPtr;
const uint64_t m_id;
Fingerprint m_controllerFingerprint;
MAC m_mac; // local MAC address
bool m_portInitialized;
std::atomic< bool > m_destroyed;
const Context& m_ctx;
void* m_uPtr;
const uint64_t m_id;
Fingerprint m_controllerFingerprint;
MAC m_mac; // local MAC address
bool m_portInitialized;
std::atomic<bool> m_destroyed;
Vector< MulticastGroup > m_myMulticastGroups; // multicast groups that we belong to (according to tap)
Map< MulticastGroup, int64_t > m_multicastGroupsBehindMe; // multicast groups that seem to be behind us and when we last saw them (if we are a bridge)
Map< MAC, Address > m_remoteBridgeRoutes; // remote addresses where given MACs are reachable (for tracking devices behind remote bridges)
Vector<MulticastGroup> m_myMulticastGroups; // multicast groups that we belong to (according to tap)
Map<MulticastGroup, int64_t> m_multicastGroupsBehindMe; // multicast groups that seem to be behind us and when we
// last saw them (if we are a bridge)
Map<MAC, Address> m_remoteBridgeRoutes; // remote addresses where given MACs are reachable (for tracking devices
// behind remote bridges)
NetworkConfig m_config;
std::atomic< int64_t > m_lastConfigUpdate;
NetworkConfig m_config;
std::atomic<int64_t> m_lastConfigUpdate;
volatile enum
{
NETCONF_FAILURE_NONE,
NETCONF_FAILURE_ACCESS_DENIED,
NETCONF_FAILURE_NOT_FOUND,
NETCONF_FAILURE_INIT_FAILED
} _netconfFailure;
volatile enum {
NETCONF_FAILURE_NONE,
NETCONF_FAILURE_ACCESS_DENIED,
NETCONF_FAILURE_NOT_FOUND,
NETCONF_FAILURE_INIT_FAILED
} _netconfFailure;
Map< Address, Member > m_memberships;
Map<Address, Member> m_memberships;
Mutex m_myMulticastGroups_l;
Mutex m_remoteBridgeRoutes_l;
Mutex m_config_l;
Mutex m_memberships_l;
Mutex m_myMulticastGroups_l;
Mutex m_remoteBridgeRoutes_l;
Mutex m_config_l;
Mutex m_memberships_l;
std::atomic< int > __refCount;
std::atomic<int> __refCount;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -11,265 +11,293 @@
*/
/****/
#include <cstdint>
#include "NetworkConfig.hpp"
#include "Buf.hpp"
#include "ScopedPtr.hpp"
#include <algorithm>
#include "NetworkConfig.hpp"
#include "ScopedPtr.hpp"
#include "Buf.hpp"
#include <cstdint>
namespace ZeroTier {
bool NetworkConfig::toDictionary(Dictionary &d) const
bool NetworkConfig::toDictionary(Dictionary& d) const
{
uint8_t tmp[ZT_BUF_MEM_SIZE];
try {
d.clear();
uint8_t tmp[ZT_BUF_MEM_SIZE];
try {
d.clear();
d.add(ZT_NETWORKCONFIG_DICT_KEY_NETWORK_ID,this->networkId);
d.add(ZT_NETWORKCONFIG_DICT_KEY_TIMESTAMP,this->timestamp);
d.add(ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TIME_MAX_DELTA,this->credentialTimeMaxDelta);
d.add(ZT_NETWORKCONFIG_DICT_KEY_REVISION,this->revision);
d.add(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO,this->issuedTo.toString((char *)tmp));
d.add(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO_IDENTITY_HASH,this->issuedToFingerprintHash,ZT_FINGERPRINT_HASH_SIZE);
d.add(ZT_NETWORKCONFIG_DICT_KEY_FLAGS,this->flags);
d.add(ZT_NETWORKCONFIG_DICT_KEY_MULTICAST_LIMIT,(uint64_t)this->multicastLimit);
d.add(ZT_NETWORKCONFIG_DICT_KEY_TYPE,(uint64_t)this->type);
d.add(ZT_NETWORKCONFIG_DICT_KEY_NAME,this->name);
d.add(ZT_NETWORKCONFIG_DICT_KEY_MTU,(uint64_t)this->mtu);
d.add(ZT_NETWORKCONFIG_DICT_KEY_NETWORK_ID, this->networkId);
d.add(ZT_NETWORKCONFIG_DICT_KEY_TIMESTAMP, this->timestamp);
d.add(ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TIME_MAX_DELTA, this->credentialTimeMaxDelta);
d.add(ZT_NETWORKCONFIG_DICT_KEY_REVISION, this->revision);
d.add(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO, this->issuedTo.toString((char*)tmp));
d.add(
ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO_IDENTITY_HASH,
this->issuedToFingerprintHash,
ZT_FINGERPRINT_HASH_SIZE);
d.add(ZT_NETWORKCONFIG_DICT_KEY_FLAGS, this->flags);
d.add(ZT_NETWORKCONFIG_DICT_KEY_MULTICAST_LIMIT, (uint64_t)this->multicastLimit);
d.add(ZT_NETWORKCONFIG_DICT_KEY_TYPE, (uint64_t)this->type);
d.add(ZT_NETWORKCONFIG_DICT_KEY_NAME, this->name);
d.add(ZT_NETWORKCONFIG_DICT_KEY_MTU, (uint64_t)this->mtu);
if (this->com) {
d.add(ZT_NETWORKCONFIG_DICT_KEY_COM,tmp,this->com.marshal(tmp));
}
if (this->com) {
d.add(ZT_NETWORKCONFIG_DICT_KEY_COM, tmp, this->com.marshal(tmp));
}
Vector<uint8_t> *blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_CAPABILITIES]);
for (unsigned int i = 0; i < this->capabilityCount; ++i) {
int l = this->capabilities[i].marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(),tmp,tmp + l);
}
Vector<uint8_t>* blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_CAPABILITIES]);
for (unsigned int i = 0; i < this->capabilityCount; ++i) {
int l = this->capabilities[i].marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(), tmp, tmp + l);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_TAGS]);
for (unsigned int i = 0; i < this->tagCount; ++i) {
int l = this->tags[i].marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(),tmp,tmp + l);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_TAGS]);
for (unsigned int i = 0; i < this->tagCount; ++i) {
int l = this->tags[i].marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(), tmp, tmp + l);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_CERTIFICATES_OF_OWNERSHIP]);
for (unsigned int i = 0; i < this->certificateOfOwnershipCount; ++i) {
int l = this->certificatesOfOwnership[i].marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(),tmp,tmp + l);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_CERTIFICATES_OF_OWNERSHIP]);
for (unsigned int i = 0; i < this->certificateOfOwnershipCount; ++i) {
int l = this->certificatesOfOwnership[i].marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(), tmp, tmp + l);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_SPECIALISTS]);
for (unsigned int i = 0; i < this->specialistCount; ++i) {
Utils::storeBigEndian<uint64_t>(tmp,this->specialists[i]);
blob->insert(blob->end(),tmp,tmp + 8);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_SPECIALISTS]);
for (unsigned int i = 0; i < this->specialistCount; ++i) {
Utils::storeBigEndian<uint64_t>(tmp, this->specialists[i]);
blob->insert(blob->end(), tmp, tmp + 8);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_ROUTES]);
for (unsigned int i = 0; i < this->routeCount; ++i) {
int l = asInetAddress(this->routes[i].target).marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(),tmp,tmp + l);
l = asInetAddress(this->routes[i].via).marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(),tmp,tmp + l);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_ROUTES]);
for (unsigned int i = 0; i < this->routeCount; ++i) {
int l = asInetAddress(this->routes[i].target).marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(), tmp, tmp + l);
l = asInetAddress(this->routes[i].via).marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(), tmp, tmp + l);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_STATIC_IPS]);
for (unsigned int i = 0; i < this->staticIpCount; ++i) {
int l = this->staticIps[i].marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(),tmp,tmp + l);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_STATIC_IPS]);
for (unsigned int i = 0; i < this->staticIpCount; ++i) {
int l = this->staticIps[i].marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(), tmp, tmp + l);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_RULES]);
if (this->ruleCount) {
blob->resize(ruleCount * ZT_VIRTUALNETWORKRULE_MARSHAL_SIZE_MAX);
int l = CapabilityCredential::marshalVirtualNetworkRules(blob->data(), rules, ruleCount);
if (l > 0)
blob->resize(l);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_RULES]);
if (this->ruleCount) {
blob->resize(ruleCount * ZT_VIRTUALNETWORKRULE_MARSHAL_SIZE_MAX);
int l = CapabilityCredential::marshalVirtualNetworkRules(blob->data(), rules, ruleCount);
if (l > 0)
blob->resize(l);
}
return true;
} catch ( ... ) {}
return false;
return true;
}
catch (...) {
}
return false;
}
bool NetworkConfig::fromDictionary(const Dictionary &d)
bool NetworkConfig::fromDictionary(const Dictionary& d)
{
static const NetworkConfig NIL_NC;
try {
*this = NIL_NC;
static const NetworkConfig NIL_NC;
try {
*this = NIL_NC;
this->networkId = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_NETWORK_ID,0);
if (!this->networkId)
return false;
this->timestamp = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_TIMESTAMP,0);
if (this->timestamp <= 0)
return false;
this->credentialTimeMaxDelta = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TIME_MAX_DELTA,0);
this->revision = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_REVISION,0);
this->issuedTo = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO,0);
const Vector<uint8_t> *blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO_IDENTITY_HASH]);
if (blob->size() == ZT_FINGERPRINT_HASH_SIZE) {
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(this->issuedToFingerprintHash,blob->data());
} else {
Utils::zero<ZT_FINGERPRINT_HASH_SIZE>(this->issuedToFingerprintHash);
}
if (!this->issuedTo)
return false;
this->multicastLimit = (unsigned int)d.getUI(ZT_NETWORKCONFIG_DICT_KEY_MULTICAST_LIMIT,0);
d.getS(ZT_NETWORKCONFIG_DICT_KEY_NAME,this->name,sizeof(this->name));
this->mtu = (unsigned int)d.getUI(ZT_NETWORKCONFIG_DICT_KEY_MTU,ZT_DEFAULT_MTU);
if (this->mtu < 1280)
this->mtu = 1280; // minimum MTU allowed by IPv6 standard and others
else if (this->mtu > ZT_MAX_MTU)
this->mtu = ZT_MAX_MTU;
this->networkId = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_NETWORK_ID, 0);
if (! this->networkId)
return false;
this->timestamp = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_TIMESTAMP, 0);
if (this->timestamp <= 0)
return false;
this->credentialTimeMaxDelta = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TIME_MAX_DELTA, 0);
this->revision = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_REVISION, 0);
this->issuedTo = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO, 0);
const Vector<uint8_t>* blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO_IDENTITY_HASH]);
if (blob->size() == ZT_FINGERPRINT_HASH_SIZE) {
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(this->issuedToFingerprintHash, blob->data());
}
else {
Utils::zero<ZT_FINGERPRINT_HASH_SIZE>(this->issuedToFingerprintHash);
}
if (! this->issuedTo)
return false;
this->multicastLimit = (unsigned int)d.getUI(ZT_NETWORKCONFIG_DICT_KEY_MULTICAST_LIMIT, 0);
d.getS(ZT_NETWORKCONFIG_DICT_KEY_NAME, this->name, sizeof(this->name));
this->mtu = (unsigned int)d.getUI(ZT_NETWORKCONFIG_DICT_KEY_MTU, ZT_DEFAULT_MTU);
if (this->mtu < 1280)
this->mtu = 1280; // minimum MTU allowed by IPv6 standard and others
else if (this->mtu > ZT_MAX_MTU)
this->mtu = ZT_MAX_MTU;
if (d.getUI(ZT_NETWORKCONFIG_DICT_KEY_VERSION,0) < 6) {
return false;
} else {
this->flags = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_FLAGS,0);
this->type = (ZT_VirtualNetworkType)d.getUI(ZT_NETWORKCONFIG_DICT_KEY_TYPE,(uint64_t)ZT_NETWORK_TYPE_PRIVATE);
if (d.getUI(ZT_NETWORKCONFIG_DICT_KEY_VERSION, 0) < 6) {
return false;
}
else {
this->flags = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_FLAGS, 0);
this->type =
(ZT_VirtualNetworkType)d.getUI(ZT_NETWORKCONFIG_DICT_KEY_TYPE, (uint64_t)ZT_NETWORK_TYPE_PRIVATE);
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_COM]);
if (!blob->empty()) {
if (this->com.unmarshal(blob->data(),(int)(blob->size()) < 0))
return false;
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_COM]);
if (! blob->empty()) {
if (this->com.unmarshal(blob->data(), (int)(blob->size()) < 0))
return false;
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_CAPABILITIES]);
if (!blob->empty()) {
try {
unsigned int p = 0;
while (p < blob->size()) {
CapabilityCredential cap;
int l = cap.unmarshal(blob->data() + p,(int)(blob->size() - p));
if (l < 0)
return false;
p += l;
if (this->capabilityCount < ZT_MAX_NETWORK_CAPABILITIES)
this->capabilities[this->capabilityCount++] = cap;
}
} catch ( ... ) {}
std::sort(&(this->capabilities[0]),&(this->capabilities[this->capabilityCount]));
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_CAPABILITIES]);
if (! blob->empty()) {
try {
unsigned int p = 0;
while (p < blob->size()) {
CapabilityCredential cap;
int l = cap.unmarshal(blob->data() + p, (int)(blob->size() - p));
if (l < 0)
return false;
p += l;
if (this->capabilityCount < ZT_MAX_NETWORK_CAPABILITIES)
this->capabilities[this->capabilityCount++] = cap;
}
}
catch (...) {
}
std::sort(&(this->capabilities[0]), &(this->capabilities[this->capabilityCount]));
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_TAGS]);
if (!blob->empty()) {
try {
unsigned int p = 0;
while (p < blob->size()) {
TagCredential tag;
int l = tag.unmarshal(blob->data() + p,(int)(blob->size() - p));
if (l < 0)
return false;
p += l;
if (this->tagCount < ZT_MAX_NETWORK_TAGS)
this->tags[this->tagCount++] = tag;
}
} catch ( ... ) {}
std::sort(&(this->tags[0]),&(this->tags[this->tagCount]));
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_TAGS]);
if (! blob->empty()) {
try {
unsigned int p = 0;
while (p < blob->size()) {
TagCredential tag;
int l = tag.unmarshal(blob->data() + p, (int)(blob->size() - p));
if (l < 0)
return false;
p += l;
if (this->tagCount < ZT_MAX_NETWORK_TAGS)
this->tags[this->tagCount++] = tag;
}
}
catch (...) {
}
std::sort(&(this->tags[0]), &(this->tags[this->tagCount]));
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_CERTIFICATES_OF_OWNERSHIP]);
if (!blob->empty()) {
try {
unsigned int p = 0;
while (p < blob->size()) {
OwnershipCredential coo;
int l = coo.unmarshal(blob->data() + p,(int)(blob->size() - p));
if (l < 0)
return false;
p += l;
if (this->certificateOfOwnershipCount < ZT_MAX_CERTIFICATES_OF_OWNERSHIP)
this->certificatesOfOwnership[certificateOfOwnershipCount++] = coo;
}
} catch ( ... ) {}
std::sort(&(this->certificatesOfOwnership[0]),&(this->certificatesOfOwnership[this->certificateOfOwnershipCount]));
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_CERTIFICATES_OF_OWNERSHIP]);
if (! blob->empty()) {
try {
unsigned int p = 0;
while (p < blob->size()) {
OwnershipCredential coo;
int l = coo.unmarshal(blob->data() + p, (int)(blob->size() - p));
if (l < 0)
return false;
p += l;
if (this->certificateOfOwnershipCount < ZT_MAX_CERTIFICATES_OF_OWNERSHIP)
this->certificatesOfOwnership[certificateOfOwnershipCount++] = coo;
}
}
catch (...) {
}
std::sort(
&(this->certificatesOfOwnership[0]),
&(this->certificatesOfOwnership[this->certificateOfOwnershipCount]));
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_SPECIALISTS]);
if (!blob->empty()) {
unsigned int p = 0;
while (((p + 8) <= blob->size())&&(specialistCount < ZT_MAX_NETWORK_SPECIALISTS)) {
this->specialists[this->specialistCount++] = Utils::loadBigEndian<uint64_t>(blob->data() + p);
p += 8;
}
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_SPECIALISTS]);
if (! blob->empty()) {
unsigned int p = 0;
while (((p + 8) <= blob->size()) && (specialistCount < ZT_MAX_NETWORK_SPECIALISTS)) {
this->specialists[this->specialistCount++] = Utils::loadBigEndian<uint64_t>(blob->data() + p);
p += 8;
}
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_ROUTES]);
if (!blob->empty()) {
unsigned int p = 0;
while ((p < blob->size())&&(routeCount < ZT_MAX_NETWORK_ROUTES)) {
int l = asInetAddress(this->routes[this->routeCount].target).unmarshal(blob->data(),(int)(blob->size() - p));
if (l < 0)
return false;
p += l;
if (p >= blob->size())
return false;
l = asInetAddress(this->routes[this->routeCount].via).unmarshal(blob->data(),(int)(blob->size() - p));
if (l < 0)
return false;
p += l;
if ((p + 4) > blob->size())
return false;
this->routes[this->routeCount].flags = Utils::loadBigEndian<uint16_t>(blob->data() + p); p += 2;
this->routes[this->routeCount].metric = Utils::loadBigEndian<uint16_t>(blob->data() + p); p += 2;
++this->routeCount;
}
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_ROUTES]);
if (! blob->empty()) {
unsigned int p = 0;
while ((p < blob->size()) && (routeCount < ZT_MAX_NETWORK_ROUTES)) {
int l = asInetAddress(this->routes[this->routeCount].target)
.unmarshal(blob->data(), (int)(blob->size() - p));
if (l < 0)
return false;
p += l;
if (p >= blob->size())
return false;
l = asInetAddress(this->routes[this->routeCount].via)
.unmarshal(blob->data(), (int)(blob->size() - p));
if (l < 0)
return false;
p += l;
if ((p + 4) > blob->size())
return false;
this->routes[this->routeCount].flags = Utils::loadBigEndian<uint16_t>(blob->data() + p);
p += 2;
this->routes[this->routeCount].metric = Utils::loadBigEndian<uint16_t>(blob->data() + p);
p += 2;
++this->routeCount;
}
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_STATIC_IPS]);
if (!blob->empty()) {
unsigned int p = 0;
while ((p < blob->size())&&(staticIpCount < ZT_MAX_ZT_ASSIGNED_ADDRESSES)) {
int l = this->staticIps[this->staticIpCount].unmarshal(blob->data() + p,(int)(blob->size() - p));
if (l < 0)
return false;
p += l;
++this->staticIpCount;
}
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_STATIC_IPS]);
if (! blob->empty()) {
unsigned int p = 0;
while ((p < blob->size()) && (staticIpCount < ZT_MAX_ZT_ASSIGNED_ADDRESSES)) {
int l = this->staticIps[this->staticIpCount].unmarshal(blob->data() + p, (int)(blob->size() - p));
if (l < 0)
return false;
p += l;
++this->staticIpCount;
}
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_RULES]);
if (!blob->empty()) {
this->ruleCount = 0;
if (CapabilityCredential::unmarshalVirtualNetworkRules(blob->data(), (int)blob->size(), this->rules, this->ruleCount, ZT_MAX_NETWORK_RULES) < 0)
return false;
}
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_RULES]);
if (! blob->empty()) {
this->ruleCount = 0;
if (CapabilityCredential::unmarshalVirtualNetworkRules(
blob->data(),
(int)blob->size(),
this->rules,
this->ruleCount,
ZT_MAX_NETWORK_RULES)
< 0)
return false;
}
}
return true;
} catch ( ... ) {}
return false;
return true;
}
catch (...) {
}
return false;
}
bool NetworkConfig::addSpecialist(const Address &a,const uint64_t f) noexcept
bool NetworkConfig::addSpecialist(const Address& a, const uint64_t f) noexcept
{
const uint64_t aint = a.toInt();
for(unsigned int i=0;i<specialistCount;++i) {
if ((specialists[i] & 0xffffffffffULL) == aint) {
specialists[i] |= f;
return true;
}
}
if (specialistCount < ZT_MAX_NETWORK_SPECIALISTS) {
specialists[specialistCount++] = f | aint;
return true;
}
return false;
const uint64_t aint = a.toInt();
for (unsigned int i = 0; i < specialistCount; ++i) {
if ((specialists[i] & 0xffffffffffULL) == aint) {
specialists[i] |= f;
return true;
}
}
if (specialistCount < ZT_MAX_NETWORK_SPECIALISTS) {
specialists[specialistCount++] = f | aint;
return true;
}
return false;
}
} // namespace ZeroTier
} // namespace ZeroTier

View file

@ -14,23 +14,23 @@
#ifndef ZT_NETWORKCONFIG_HPP
#define ZT_NETWORKCONFIG_HPP
#include "Constants.hpp"
#include "InetAddress.hpp"
#include "MulticastGroup.hpp"
#include "Address.hpp"
#include "MembershipCredential.hpp"
#include "OwnershipCredential.hpp"
#include "CapabilityCredential.hpp"
#include "TagCredential.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Dictionary.hpp"
#include "Identity.hpp"
#include "Utils.hpp"
#include "InetAddress.hpp"
#include "MembershipCredential.hpp"
#include "MulticastGroup.hpp"
#include "OwnershipCredential.hpp"
#include "TagCredential.hpp"
#include "Trace.hpp"
#include "TriviallyCopyable.hpp"
#include "Containers.hpp"
#include "Utils.hpp"
#include <stdexcept>
#include <algorithm>
#include <stdexcept>
namespace ZeroTier {
@ -144,239 +144,255 @@ namespace ZeroTier {
/**
* Network configuration received from network controller nodes
*/
struct NetworkConfig : TriviallyCopyable
{
ZT_INLINE NetworkConfig() noexcept
{ memoryZero(this); } // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
struct NetworkConfig : TriviallyCopyable {
ZT_INLINE NetworkConfig() noexcept
{
memoryZero(this);
} // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
/**
* Write this network config to a dictionary for transport
*
* @param d Dictionary
* @return True if dictionary was successfully created, false if e.g. overflow
*/
bool toDictionary(Dictionary &d) const;
/**
* Write this network config to a dictionary for transport
*
* @param d Dictionary
* @return True if dictionary was successfully created, false if e.g. overflow
*/
bool toDictionary(Dictionary& d) const;
/**
* Read this network config from a dictionary
*
* @param d Dictionary (non-const since it might be modified during parse, should not be used after call)
* @return True if dictionary was valid and network config successfully initialized
*/
bool fromDictionary(const Dictionary &d);
/**
* Read this network config from a dictionary
*
* @param d Dictionary (non-const since it might be modified during parse, should not be used after call)
* @return True if dictionary was valid and network config successfully initialized
*/
bool fromDictionary(const Dictionary& d);
/**
* @return True if broadcast (ff:ff:ff:ff:ff:ff) address should work on this network
*/
ZT_INLINE bool enableBroadcast() const noexcept
{ return ((this->flags & ZT_NETWORKCONFIG_FLAG_ENABLE_BROADCAST) != 0); }
/**
* @return True if broadcast (ff:ff:ff:ff:ff:ff) address should work on this network
*/
ZT_INLINE bool enableBroadcast() const noexcept
{
return ((this->flags & ZT_NETWORKCONFIG_FLAG_ENABLE_BROADCAST) != 0);
}
/**
* @return True if IPv6 NDP emulation should be allowed for certain "magic" IPv6 address patterns
*/
ZT_INLINE bool ndpEmulation() const noexcept
{ return ((this->flags & ZT_NETWORKCONFIG_FLAG_ENABLE_IPV6_NDP_EMULATION) != 0); }
/**
* @return True if IPv6 NDP emulation should be allowed for certain "magic" IPv6 address patterns
*/
ZT_INLINE bool ndpEmulation() const noexcept
{
return ((this->flags & ZT_NETWORKCONFIG_FLAG_ENABLE_IPV6_NDP_EMULATION) != 0);
}
/**
* @return Network type is public (no access control)
*/
ZT_INLINE bool isPublic() const noexcept
{ return (this->type == ZT_NETWORK_TYPE_PUBLIC); }
/**
* @return Network type is public (no access control)
*/
ZT_INLINE bool isPublic() const noexcept
{
return (this->type == ZT_NETWORK_TYPE_PUBLIC);
}
/**
* @return Network type is private (certificate access control)
*/
ZT_INLINE bool isPrivate() const noexcept
{ return (this->type == ZT_NETWORK_TYPE_PRIVATE); }
/**
* @return Network type is private (certificate access control)
*/
ZT_INLINE bool isPrivate() const noexcept
{
return (this->type == ZT_NETWORK_TYPE_PRIVATE);
}
/**
* @param fromPeer Peer attempting to bridge other Ethernet peers onto network
* @return True if this network allows bridging
*/
ZT_INLINE bool permitsBridging(const Address &fromPeer) const noexcept
{
for (unsigned int i = 0;i < specialistCount;++i) {
if ((fromPeer.toInt() == (specialists[i] & ZT_ADDRESS_MASK)) && ((specialists[i] & ZT_NETWORKCONFIG_SPECIALIST_TYPE_ACTIVE_BRIDGE) != 0))
return true;
}
return false;
}
/**
* @param fromPeer Peer attempting to bridge other Ethernet peers onto network
* @return True if this network allows bridging
*/
ZT_INLINE bool permitsBridging(const Address& fromPeer) const noexcept
{
for (unsigned int i = 0; i < specialistCount; ++i) {
if ((fromPeer.toInt() == (specialists[i] & ZT_ADDRESS_MASK))
&& ((specialists[i] & ZT_NETWORKCONFIG_SPECIALIST_TYPE_ACTIVE_BRIDGE) != 0))
return true;
}
return false;
}
ZT_INLINE operator bool() const noexcept
{ return (networkId != 0); } // NOLINT(google-explicit-constructor,hicpp-explicit-conversions)
ZT_INLINE bool operator==(const NetworkConfig &nc) const noexcept
{ return (memcmp(this, &nc, sizeof(NetworkConfig)) == 0); }
ZT_INLINE operator bool() const noexcept
{
return (networkId != 0);
} // NOLINT(google-explicit-constructor,hicpp-explicit-conversions)
ZT_INLINE bool operator==(const NetworkConfig& nc) const noexcept
{
return (memcmp(this, &nc, sizeof(NetworkConfig)) == 0);
}
ZT_INLINE bool operator!=(const NetworkConfig &nc) const noexcept
{ return (!(*this == nc)); }
ZT_INLINE bool operator!=(const NetworkConfig& nc) const noexcept
{
return (! (*this == nc));
}
/**
* Add a specialist or mask flags if already present
*
* This masks the existing flags if the specialist is already here or adds
* it otherwise.
*
* @param a Address of specialist
* @param f Flags (OR of specialist role/type flags)
* @return True if successfully masked or added
*/
bool addSpecialist(const Address &a, uint64_t f) noexcept;
/**
* Add a specialist or mask flags if already present
*
* This masks the existing flags if the specialist is already here or adds
* it otherwise.
*
* @param a Address of specialist
* @param f Flags (OR of specialist role/type flags)
* @return True if successfully masked or added
*/
bool addSpecialist(const Address& a, uint64_t f) noexcept;
ZT_INLINE const CapabilityCredential *capability(const uint32_t id) const
{
for (unsigned int i = 0;i < capabilityCount;++i) {
if (capabilities[i].id() == id)
return &(capabilities[i]);
}
return nullptr;
}
ZT_INLINE const CapabilityCredential* capability(const uint32_t id) const
{
for (unsigned int i = 0; i < capabilityCount; ++i) {
if (capabilities[i].id() == id)
return &(capabilities[i]);
}
return nullptr;
}
ZT_INLINE const TagCredential *tag(const uint32_t id) const
{
for (unsigned int i = 0;i < tagCount;++i) {
if (tags[i].id() == id)
return &(tags[i]);
}
return nullptr;
}
ZT_INLINE const TagCredential* tag(const uint32_t id) const
{
for (unsigned int i = 0; i < tagCount; ++i) {
if (tags[i].id() == id)
return &(tags[i]);
}
return nullptr;
}
/**
* Network ID that this configuration applies to
*/
uint64_t networkId;
/**
* Network ID that this configuration applies to
*/
uint64_t networkId;
/**
* Controller-side time of config generation/issue
*/
int64_t timestamp;
/**
* Controller-side time of config generation/issue
*/
int64_t timestamp;
/**
* Max difference between timestamp and tag/capability timestamp
*/
int64_t credentialTimeMaxDelta;
/**
* Max difference between timestamp and tag/capability timestamp
*/
int64_t credentialTimeMaxDelta;
/**
* Controller-side revision counter for this configuration
*/
uint64_t revision;
/**
* Controller-side revision counter for this configuration
*/
uint64_t revision;
/**
* Address of device to which this config is issued
*/
Address issuedTo;
/**
* Address of device to which this config is issued
*/
Address issuedTo;
/**
* Hash of identity public key(s) of node to whom this is issued
*
* If this field is all zero it is treated as undefined since old controllers
* do not set it.
*/
uint8_t issuedToFingerprintHash[ZT_FINGERPRINT_HASH_SIZE];
/**
* Hash of identity public key(s) of node to whom this is issued
*
* If this field is all zero it is treated as undefined since old controllers
* do not set it.
*/
uint8_t issuedToFingerprintHash[ZT_FINGERPRINT_HASH_SIZE];
/**
* Flags (64-bit)
*/
uint64_t flags;
/**
* Flags (64-bit)
*/
uint64_t flags;
/**
* Network MTU
*/
unsigned int mtu;
/**
* Network MTU
*/
unsigned int mtu;
/**
* Maximum number of recipients per multicast (not including active bridges)
*/
unsigned int multicastLimit;
/**
* Maximum number of recipients per multicast (not including active bridges)
*/
unsigned int multicastLimit;
/**
* Number of specialists
*/
unsigned int specialistCount;
/**
* Number of specialists
*/
unsigned int specialistCount;
/**
* Number of routes
*/
unsigned int routeCount;
/**
* Number of routes
*/
unsigned int routeCount;
/**
* Number of ZT-managed static IP assignments
*/
unsigned int staticIpCount;
/**
* Number of ZT-managed static IP assignments
*/
unsigned int staticIpCount;
/**
* Number of rule table entries
*/
unsigned int ruleCount;
/**
* Number of rule table entries
*/
unsigned int ruleCount;
/**
* Number of capabilities
*/
unsigned int capabilityCount;
/**
* Number of capabilities
*/
unsigned int capabilityCount;
/**
* Number of tags
*/
unsigned int tagCount;
/**
* Number of tags
*/
unsigned int tagCount;
/**
* Number of certificates of ownership
*/
unsigned int certificateOfOwnershipCount;
/**
* Number of certificates of ownership
*/
unsigned int certificateOfOwnershipCount;
/**
* Specialist devices
*
* For each entry the least significant 40 bits are the device's ZeroTier
* address and the most significant 24 bits are flags indicating its role.
*/
uint64_t specialists[ZT_MAX_NETWORK_SPECIALISTS];
/**
* Specialist devices
*
* For each entry the least significant 40 bits are the device's ZeroTier
* address and the most significant 24 bits are flags indicating its role.
*/
uint64_t specialists[ZT_MAX_NETWORK_SPECIALISTS];
/**
* Statically defined "pushed" routes (including default gateways)
*/
ZT_VirtualNetworkRoute routes[ZT_MAX_NETWORK_ROUTES];
/**
* Statically defined "pushed" routes (including default gateways)
*/
ZT_VirtualNetworkRoute routes[ZT_MAX_NETWORK_ROUTES];
/**
* Static IP assignments
*/
InetAddress staticIps[ZT_MAX_ZT_ASSIGNED_ADDRESSES];
/**
* Static IP assignments
*/
InetAddress staticIps[ZT_MAX_ZT_ASSIGNED_ADDRESSES];
/**
* Base network rules
*/
ZT_VirtualNetworkRule rules[ZT_MAX_NETWORK_RULES];
/**
* Base network rules
*/
ZT_VirtualNetworkRule rules[ZT_MAX_NETWORK_RULES];
/**
* Capabilities for this node on this network, in ascending order of capability ID
*/
CapabilityCredential capabilities[ZT_MAX_NETWORK_CAPABILITIES];
/**
* Capabilities for this node on this network, in ascending order of capability ID
*/
CapabilityCredential capabilities[ZT_MAX_NETWORK_CAPABILITIES];
/**
* Tags for this node on this network, in ascending order of tag ID
*/
TagCredential tags[ZT_MAX_NETWORK_TAGS];
/**
* Tags for this node on this network, in ascending order of tag ID
*/
TagCredential tags[ZT_MAX_NETWORK_TAGS];
/**
* Certificates of ownership for this network member
*/
OwnershipCredential certificatesOfOwnership[ZT_MAX_CERTIFICATES_OF_OWNERSHIP];
/**
* Certificates of ownership for this network member
*/
OwnershipCredential certificatesOfOwnership[ZT_MAX_CERTIFICATES_OF_OWNERSHIP];
/**
* Network type (currently just public or private)
*/
ZT_VirtualNetworkType type;
/**
* Network type (currently just public or private)
*/
ZT_VirtualNetworkType type;
/**
* Network short name or empty string if not defined
*/
char name[ZT_MAX_NETWORK_SHORT_NAME_LENGTH + 1];
/**
* Network short name or empty string if not defined
*/
char name[ZT_MAX_NETWORK_SHORT_NAME_LENGTH + 1];
/**
* Certificate of membership (for private networks)
*/
MembershipCredential com;
/**
* Certificate of membership (for private networks)
*/
MembershipCredential com;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -14,11 +14,11 @@
#ifndef ZT_NETWORKCONFIGMASTER_HPP
#define ZT_NETWORKCONFIGMASTER_HPP
#include "Address.hpp"
#include "Constants.hpp"
#include "Dictionary.hpp"
#include "NetworkConfig.hpp"
#include "RevocationCredential.hpp"
#include "Address.hpp"
namespace ZeroTier {
@ -28,82 +28,104 @@ struct InetAddress;
/**
* Interface for network controller implementations
*/
class NetworkController
{
public:
enum ErrorCode
{
NC_ERROR_NONE = 0,
NC_ERROR_OBJECT_NOT_FOUND = 1,
NC_ERROR_ACCESS_DENIED = 2,
NC_ERROR_INTERNAL_SERVER_ERROR = 3
};
class NetworkController {
public:
enum ErrorCode {
NC_ERROR_NONE = 0,
NC_ERROR_OBJECT_NOT_FOUND = 1,
NC_ERROR_ACCESS_DENIED = 2,
NC_ERROR_INTERNAL_SERVER_ERROR = 3
};
/**
* Interface for sender used to send pushes and replies
*/
class Sender
{
public:
/**
* Send a configuration to a remote peer
*
* @param nwid Network ID
* @param requestPacketId Request packet ID to send OK(NETWORK_CONFIG_REQUEST) or 0 to send NETWORK_CONFIG (push)
* @param destination Destination peer Address
* @param nc Network configuration to send
* @param sendLegacyFormatConfig If true, send an old-format network config
*/
virtual void ncSendConfig(void *tPtr, int64_t clock, int64_t ticks, uint64_t nwid, uint64_t requestPacketId, const Address &destination, const NetworkConfig &nc, bool sendLegacyFormatConfig) = 0;
/**
* Interface for sender used to send pushes and replies
*/
class Sender {
public:
/**
* Send a configuration to a remote peer
*
* @param nwid Network ID
* @param requestPacketId Request packet ID to send OK(NETWORK_CONFIG_REQUEST) or 0 to send NETWORK_CONFIG
* (push)
* @param destination Destination peer Address
* @param nc Network configuration to send
* @param sendLegacyFormatConfig If true, send an old-format network config
*/
virtual void ncSendConfig(
void* tPtr,
int64_t clock,
int64_t ticks,
uint64_t nwid,
uint64_t requestPacketId,
const Address& destination,
const NetworkConfig& nc,
bool sendLegacyFormatConfig) = 0;
/**
* Send revocation to a node
*
* @param destination Destination node address
* @param rev Revocation to send
*/
virtual void ncSendRevocation(void *tPtr, int64_t clock, int64_t ticks, const Address &destination, const RevocationCredential &rev) = 0;
/**
* Send revocation to a node
*
* @param destination Destination node address
* @param rev Revocation to send
*/
virtual void ncSendRevocation(
void* tPtr,
int64_t clock,
int64_t ticks,
const Address& destination,
const RevocationCredential& rev) = 0;
/**
* Send a network configuration request error
*
* @param nwid Network ID
* @param requestPacketId Request packet ID or 0 if none
* @param destination Destination peer Address
* @param errorCode Error code
*/
virtual void ncSendError(void *tPtr, int64_t clock, int64_t ticks, uint64_t nwid, uint64_t requestPacketId, const Address &destination, NetworkController::ErrorCode errorCode) = 0;
};
/**
* Send a network configuration request error
*
* @param nwid Network ID
* @param requestPacketId Request packet ID or 0 if none
* @param destination Destination peer Address
* @param errorCode Error code
*/
virtual void ncSendError(
void* tPtr,
int64_t clock,
int64_t ticks,
uint64_t nwid,
uint64_t requestPacketId,
const Address& destination,
NetworkController::ErrorCode errorCode) = 0;
};
NetworkController() {}
virtual ~NetworkController() {}
NetworkController()
{
}
virtual ~NetworkController()
{
}
/**
* Called when this is added to a Node to initialize and supply info
*
* @param signingId Identity for signing of network configurations, certs, etc.
* @param sender Sender implementation for sending replies or config pushes
*/
virtual void init(const Identity &signingId, Sender *sender) = 0;
/**
* Called when this is added to a Node to initialize and supply info
*
* @param signingId Identity for signing of network configurations, certs, etc.
* @param sender Sender implementation for sending replies or config pushes
*/
virtual void init(const Identity& signingId, Sender* sender) = 0;
/**
* Handle a network configuration request
*
* @param nwid 64-bit network ID
* @param fromAddr Originating wire address or null address if packet is not direct (or from self)
* @param requestPacketId Packet ID of request packet or 0 if not initiated by remote request
* @param identity ZeroTier identity of originating peer
* @param metaData Meta-data bundled with request (if any)
* @return Returns NETCONF_QUERY_OK if result 'nc' is valid, or an error code on error
*/
virtual void request(
uint64_t nwid,
const InetAddress &fromAddr,
uint64_t requestPacketId,
const Identity &identity,
const Dictionary &metaData) = 0;
/**
* Handle a network configuration request
*
* @param nwid 64-bit network ID
* @param fromAddr Originating wire address or null address if packet is not direct (or from self)
* @param requestPacketId Packet ID of request packet or 0 if not initiated by remote request
* @param identity ZeroTier identity of originating peer
* @param metaData Meta-data bundled with request (if any)
* @return Returns NETCONF_QUERY_OK if result 'nc' is valid, or an error code on error
*/
virtual void request(
uint64_t nwid,
const InetAddress& fromAddr,
uint64_t requestPacketId,
const Identity& identity,
const Dictionary& metaData) = 0;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

File diff suppressed because it is too large Load diff

View file

@ -14,19 +14,19 @@
#ifndef ZT_NODE_HPP
#define ZT_NODE_HPP
#include "Buf.hpp"
#include "CallContext.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Context.hpp"
#include "InetAddress.hpp"
#include "Mutex.hpp"
#include "MAC.hpp"
#include "Mutex.hpp"
#include "Network.hpp"
#include "NetworkController.hpp"
#include "Path.hpp"
#include "Salsa20.hpp"
#include "NetworkController.hpp"
#include "Buf.hpp"
#include "Containers.hpp"
#include "Store.hpp"
#include "CallContext.hpp"
namespace ZeroTier {
@ -35,167 +35,170 @@ namespace ZeroTier {
*
* The pointer returned by ZT_Node_new() is an instance of this class.
*/
class Node : public NetworkController::Sender
{
public:
// Get rid of alignment warnings on 32-bit Windows
class Node : public NetworkController::Sender {
public:
// Get rid of alignment warnings on 32-bit Windows
#ifdef __WINDOWS__
void * operator new(size_t i) { return _mm_malloc(i,16); }
void operator delete(void* p) { _mm_free(p); }
void* operator new(size_t i)
{
return _mm_malloc(i, 16);
}
void operator delete(void* p)
{
_mm_free(p);
}
#endif
Node(void *uPtr, const struct ZT_Node_Callbacks *callbacks, const CallContext &cc);
Node(void* uPtr, const struct ZT_Node_Callbacks* callbacks, const CallContext& cc);
virtual ~Node();
virtual ~Node();
void shutdown(const CallContext &cc);
void shutdown(const CallContext& cc);
ZT_ResultCode processBackgroundTasks(
const CallContext &cc,
volatile int64_t *nextBackgroundTaskDeadline);
ZT_ResultCode processBackgroundTasks(const CallContext& cc, volatile int64_t* nextBackgroundTaskDeadline);
ZT_ResultCode join(
uint64_t nwid,
const ZT_Fingerprint *controllerFingerprint,
void *uptr,
const CallContext &cc);
ZT_ResultCode join(uint64_t nwid, const ZT_Fingerprint* controllerFingerprint, void* uptr, const CallContext& cc);
ZT_ResultCode leave(
uint64_t nwid,
void **uptr,
const CallContext &cc);
ZT_ResultCode leave(uint64_t nwid, void** uptr, const CallContext& cc);
ZT_ResultCode multicastSubscribe(
const CallContext &cc,
uint64_t nwid,
uint64_t multicastGroup,
unsigned long multicastAdi);
ZT_ResultCode
multicastSubscribe(const CallContext& cc, uint64_t nwid, uint64_t multicastGroup, unsigned long multicastAdi);
ZT_ResultCode multicastUnsubscribe(
const CallContext &cc,
uint64_t nwid,
uint64_t multicastGroup,
unsigned long multicastAdi);
ZT_ResultCode
multicastUnsubscribe(const CallContext& cc, uint64_t nwid, uint64_t multicastGroup, unsigned long multicastAdi);
void status(
ZT_NodeStatus *status) const;
void status(ZT_NodeStatus* status) const;
ZT_PeerList *peers(
const CallContext &cc) const;
ZT_PeerList* peers(const CallContext& cc) const;
ZT_VirtualNetworkConfig *networkConfig(
uint64_t nwid) const;
ZT_VirtualNetworkConfig* networkConfig(uint64_t nwid) const;
ZT_VirtualNetworkList *networks() const;
ZT_VirtualNetworkList* networks() const;
void setNetworkUserPtr(
uint64_t nwid,
void *ptr);
void setNetworkUserPtr(uint64_t nwid, void* ptr);
void setInterfaceAddresses(
const ZT_InterfaceAddress *addrs,
unsigned int addrCount);
void setInterfaceAddresses(const ZT_InterfaceAddress* addrs, unsigned int addrCount);
ZT_CertificateError addCertificate(
const CallContext &cc,
unsigned int localTrust,
const ZT_Certificate *cert,
const void *certData,
unsigned int certSize);
ZT_CertificateError addCertificate(
const CallContext& cc,
unsigned int localTrust,
const ZT_Certificate* cert,
const void* certData,
unsigned int certSize);
ZT_ResultCode deleteCertificate(
const CallContext &cc,
const void *serialNo);
ZT_ResultCode deleteCertificate(const CallContext& cc, const void* serialNo);
ZT_CertificateList *listCertificates();
ZT_CertificateList* listCertificates();
int sendUserMessage(
const CallContext &cc,
uint64_t dest,
uint64_t typeId,
const void *data,
unsigned int len);
int sendUserMessage(const CallContext& cc, uint64_t dest, uint64_t typeId, const void* data, unsigned int len);
void setController(
void *networkControllerInstance);
void setController(void* networkControllerInstance);
/**
* Post an event via external callback
*
* @param tPtr Thread pointer
* @param ev Event object
* @param md Event data or NULL if none
* @param mdSize Size of event data
*/
ZT_INLINE void postEvent(void *const tPtr, const ZT_Event ev, const void *const md = nullptr, const unsigned int mdSize = 0) noexcept
{ m_ctx.cb.eventCallback(reinterpret_cast<ZT_Node *>(this), m_ctx.uPtr, tPtr, ev, md, mdSize); }
/**
* Post an event via external callback
*
* @param tPtr Thread pointer
* @param ev Event object
* @param md Event data or NULL if none
* @param mdSize Size of event data
*/
ZT_INLINE void
postEvent(void* const tPtr, const ZT_Event ev, const void* const md = nullptr, const unsigned int mdSize = 0)
noexcept
{
m_ctx.cb.eventCallback(reinterpret_cast<ZT_Node*>(this), m_ctx.uPtr, tPtr, ev, md, mdSize);
}
/**
* Check whether a path should be used for ZeroTier traffic
*
* This performs internal checks and also calls out to an external callback if one is defined.
*
* @param tPtr Thread pointer
* @param id Identity of peer
* @param localSocket Local socket or -1 if unknown
* @param remoteAddress Remote address
* @return True if path should be used
*/
bool filterPotentialPath(void *tPtr, const Identity &id, int64_t localSocket, const InetAddress &remoteAddress);
/**
* Check whether a path should be used for ZeroTier traffic
*
* This performs internal checks and also calls out to an external callback if one is defined.
*
* @param tPtr Thread pointer
* @param id Identity of peer
* @param localSocket Local socket or -1 if unknown
* @param remoteAddress Remote address
* @return True if path should be used
*/
bool filterPotentialPath(void* tPtr, const Identity& id, int64_t localSocket, const InetAddress& remoteAddress);
/**
* Query callback for a physical address for a peer
*
* @param tPtr Thread pointer
* @param id Full identity of ZeroTier node
* @param family Desired address family or -1 for any
* @param addr Buffer to store address (result paramter)
* @return True if addr was filled with something
*/
bool externalPathLookup(void *tPtr, const Identity &id, int family, InetAddress &addr);
/**
* Query callback for a physical address for a peer
*
* @param tPtr Thread pointer
* @param id Full identity of ZeroTier node
* @param family Desired address family or -1 for any
* @param addr Buffer to store address (result paramter)
* @return True if addr was filled with something
*/
bool externalPathLookup(void* tPtr, const Identity& id, int family, InetAddress& addr);
ZT_INLINE const Identity &identity() const noexcept
{ return m_ctx.identity; }
ZT_INLINE const Identity& identity() const noexcept
{
return m_ctx.identity;
}
ZT_INLINE const Context &context() const noexcept
{ return m_ctx; }
ZT_INLINE const Context& context() const noexcept
{
return m_ctx;
}
// Implementation of NetworkController::Sender interface
virtual void ncSendConfig(void *tPtr, int64_t clock, int64_t ticks, uint64_t nwid, uint64_t requestPacketId, const Address &destination, const NetworkConfig &nc, bool sendLegacyFormatConfig);
virtual void ncSendRevocation(void *tPtr, int64_t clock, int64_t ticks, const Address &destination, const RevocationCredential &rev);
virtual void ncSendError(void *tPtr, int64_t clock, int64_t ticks, uint64_t nwid, uint64_t requestPacketId, const Address &destination, NetworkController::ErrorCode errorCode);
// Implementation of NetworkController::Sender interface
virtual void ncSendConfig(
void* tPtr,
int64_t clock,
int64_t ticks,
uint64_t nwid,
uint64_t requestPacketId,
const Address& destination,
const NetworkConfig& nc,
bool sendLegacyFormatConfig);
virtual void ncSendRevocation(
void* tPtr,
int64_t clock,
int64_t ticks,
const Address& destination,
const RevocationCredential& rev);
virtual void ncSendError(
void* tPtr,
int64_t clock,
int64_t ticks,
uint64_t nwid,
uint64_t requestPacketId,
const Address& destination,
NetworkController::ErrorCode errorCode);
private:
Context m_ctx;
private:
Context m_ctx;
// Data store wrapper
Store m_store;
// Data store wrapper
Store m_store;
// Pointer to a struct defined in Node that holds instances of core objects.
void *m_objects;
// Pointer to a struct defined in Node that holds instances of core objects.
void* m_objects;
// This stores networks for rapid iteration, while RR->networks is the primary lookup.
Vector< SharedPtr< Network > > m_allNetworks;
Mutex m_allNetworks_l;
// This stores networks for rapid iteration, while RR->networks is the primary lookup.
Vector<SharedPtr<Network> > m_allNetworks;
Mutex m_allNetworks_l;
// These are local interface addresses that have been configured via the API
// and can be pushed to other nodes.
Vector< ZT_InterfaceAddress > m_localInterfaceAddresses;
Mutex m_localInterfaceAddresses_m;
// These are local interface addresses that have been configured via the API
// and can be pushed to other nodes.
Vector<ZT_InterfaceAddress> m_localInterfaceAddresses;
Mutex m_localInterfaceAddresses_m;
// This is locked while running processBackgroundTasks().
Mutex m_backgroundTasksLock;
// This is locked while running processBackgroundTasks().
Mutex m_backgroundTasksLock;
// These are locked via _backgroundTasksLock as they're only checked and modified in processBackgroundTasks().
int64_t m_lastPeerPulse;
int64_t m_lastHousekeepingRun;
int64_t m_lastNetworkHousekeepingRun;
int64_t m_lastTrustStoreUpdate;
// These are locked via _backgroundTasksLock as they're only checked and modified in processBackgroundTasks().
int64_t m_lastPeerPulse;
int64_t m_lastHousekeepingRun;
int64_t m_lastNetworkHousekeepingRun;
int64_t m_lastTrustStoreUpdate;
// True if at least one root appears reachable.
std::atomic< bool > m_online;
// True if at least one root appears reachable.
std::atomic<bool> m_online;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -17,7 +17,9 @@
/* Uncomment this to force a whole lot of debug output. */
#define ZT_DEBUG_SPEW
#if !defined(__GNUC__) && (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(__INTEL_COMPILER) || defined(__clang__))
#if ! defined(__GNUC__) \
&& (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) \
|| defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(__INTEL_COMPILER) || defined(__clang__))
#define __GNUC__ 3
#endif
@ -48,46 +50,50 @@
#undef __BSD__
#endif
#include <Shlobj.h>
#include <WinSock2.h>
#include <ws2tcpip.h>
#include <Windows.h>
#include <memoryapi.h>
#include <shlwapi.h>
#include <Shlobj.h>
#include <sys/param.h>
#include <ws2tcpip.h>
#endif /* Microsoft Windows */
#ifndef __WINDOWS__
#include <arpa/inet.h>
#include <netinet/in.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/types.h>
#endif /* NOT Microsoft Windows */
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#if (defined(__amd64) || defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || defined(__AMD64) || defined(__AMD64__) || defined(_M_X64))
#if ( \
defined(__amd64) || defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || defined(__AMD64) \
|| defined(__AMD64__) || defined(_M_X64))
#define ZT_ARCH_X64 1
#include <xmmintrin.h>
#include <emmintrin.h>
#include <immintrin.h>
#include <xmmintrin.h>
#endif
#if defined(ZT_ARCH_X64) || defined(i386) || defined(__i386) || defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) || defined(_M_IX86) || defined(__X86__) || defined(_X86_) || defined(__I86__) || defined(__INTEL__) || defined(__386)
#if defined(ZT_ARCH_X64) || defined(i386) || defined(__i386) || defined(__i386__) || defined(__i486__) \
|| defined(__i586__) || defined(__i686__) || defined(_M_IX86) || defined(__X86__) || defined(_X86_) \
|| defined(__I86__) || defined(__INTEL__) || defined(__386)
#define ZT_ARCH_X86 1
#endif
#if !defined(ZT_ARCH_X86)
#if ! defined(ZT_ARCH_X86)
#ifndef ZT_NO_UNALIGNED_ACCESS
#define ZT_NO_UNALIGNED_ACCESS 1
#endif
#endif
#if defined(__ARM_NEON) || defined(__ARM_NEON__) || defined(ZT_ARCH_ARM_HAS_NEON)
#if (defined(__APPLE__) && !defined(__LP64__)) || (defined(__ANDROID__) && defined(__arm__))
#if (defined(__APPLE__) && ! defined(__LP64__)) || (defined(__ANDROID__) && defined(__arm__))
#ifdef ZT_ARCH_ARM_HAS_NEON
#undef ZT_ARCH_ARM_HAS_NEON
#endif
@ -110,8 +116,8 @@
#define __BSD__ 1
#endif
#ifndef __BYTE_ORDER
#define __BYTE_ORDER __DARWIN_BYTE_ORDER
#define __BIG_ENDIAN __DARWIN_BIG_ENDIAN
#define __BYTE_ORDER __DARWIN_BYTE_ORDER
#define __BIG_ENDIAN __DARWIN_BIG_ENDIAN
#define __LITTLE_ENDIAN __DARWIN_LITTLE_ENDIAN
#endif
#endif
@ -140,13 +146,13 @@
#endif
#ifdef __WINDOWS__
#define ZT_PATH_SEPARATOR '\\'
#define ZT_PATH_SEPARATOR '\\'
#define ZT_PATH_SEPARATOR_S "\\"
#define ZT_EOL_S "\r\n"
#define ZT_EOL_S "\r\n"
#else
#define ZT_PATH_SEPARATOR '/'
#define ZT_PATH_SEPARATOR '/'
#define ZT_PATH_SEPARATOR_S "/"
#define ZT_EOL_S "\n"
#define ZT_EOL_S "\n"
#endif
#ifdef SOCKET
@ -208,9 +214,9 @@
* if a shim for <atomic> were included. */
#ifndef __CPP11__
#error TODO: to build on pre-c++11 compilers we will need to make a subset of std::atomic for integers
#define nullptr (0)
#define nullptr (0)
#define constexpr ZT_INLINE
#define noexcept throw()
#define noexcept throw()
#define explicit
#endif
#endif
@ -225,7 +231,7 @@
#ifndef likely
#if defined(__GNUC__) || defined(__clang__)
#define likely(x) __builtin_expect((x),1)
#define likely(x) __builtin_expect((x), 1)
#else
#define likely(x) x
#endif
@ -233,7 +239,7 @@
#ifndef unlikely
#if defined(__GNUC__) || defined(__clang__)
#define unlikely(x) __builtin_expect((x),0)
#define unlikely(x) __builtin_expect((x), 0)
#else
#define unlikely(x) x
#endif
@ -249,28 +255,29 @@ typedef unsigned uint128_t __attribute__((mode(TI)));
#endif
#endif
#if !defined(__BYTE_ORDER) && defined(__BYTE_ORDER__)
#define __BYTE_ORDER __BYTE_ORDER__
#if ! defined(__BYTE_ORDER) && defined(__BYTE_ORDER__)
#define __BYTE_ORDER __BYTE_ORDER__
#define __LITTLE_ENDIAN __ORDER_LITTLE_ENDIAN__
#define __BIG_ENDIAN __ORDER_BIG_ENDIAN__
#define __BIG_ENDIAN __ORDER_BIG_ENDIAN__
#endif
#if !defined(__BYTE_ORDER) && defined(BYTE_ORDER)
#define __BYTE_ORDER BYTE_ORDER
#if ! defined(__BYTE_ORDER) && defined(BYTE_ORDER)
#define __BYTE_ORDER BYTE_ORDER
#define __LITTLE_ENDIAN LITTLE_ENDIAN
#define __BIG_ENDIAN BIG_ENDIAN
#define __BIG_ENDIAN BIG_ENDIAN
#endif
#if !defined(__BYTE_ORDER) && defined(_BYTE_ORDER)
#define __BYTE_ORDER _BYTE_ORDER
#if ! defined(__BYTE_ORDER) && defined(_BYTE_ORDER)
#define __BYTE_ORDER _BYTE_ORDER
#define __LITTLE_ENDIAN _LITTLE_ENDIAN
#define __BIG_ENDIAN _BIG_ENDIAN
#define __BIG_ENDIAN _BIG_ENDIAN
#endif
#define ZT_VA_ARGS(...) , ##__VA_ARGS__
#ifdef ZT_DEBUG_SPEW
#define ZT_SPEW(f,...) fprintf(stderr,"%s:%d(%s): " f ZT_EOL_S,__FILE__,__LINE__,__FUNCTION__ ZT_VA_ARGS(__VA_ARGS__))
#define ZT_SPEW(f, ...) \
fprintf(stderr, "%s:%d(%s): " f ZT_EOL_S, __FILE__, __LINE__, __FUNCTION__ ZT_VA_ARGS(__VA_ARGS__))
#else
#define ZT_SPEW(f,...)
#define ZT_SPEW(f, ...)
#endif
#endif

View file

@ -15,112 +15,117 @@
namespace ZeroTier {
void OwnershipCredential::addThing(const InetAddress &ip)
void OwnershipCredential::addThing(const InetAddress& ip)
{
if (m_thingCount >= ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS)
return;
if (ip.as.sa.sa_family == AF_INET) {
m_thingTypes[m_thingCount] = THING_IPV4_ADDRESS;
Utils::copy<4>(m_thingValues[m_thingCount], &(reinterpret_cast<const struct sockaddr_in *>(&ip)->sin_addr.s_addr));
++m_thingCount;
} else if (ip.as.sa.sa_family == AF_INET6) {
m_thingTypes[m_thingCount] = THING_IPV6_ADDRESS;
Utils::copy<16>(m_thingValues[m_thingCount], reinterpret_cast<const struct sockaddr_in6 *>(&ip)->sin6_addr.s6_addr);
++m_thingCount;
}
if (m_thingCount >= ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS)
return;
if (ip.as.sa.sa_family == AF_INET) {
m_thingTypes[m_thingCount] = THING_IPV4_ADDRESS;
Utils::copy<4>(
m_thingValues[m_thingCount],
&(reinterpret_cast<const struct sockaddr_in*>(&ip)->sin_addr.s_addr));
++m_thingCount;
}
else if (ip.as.sa.sa_family == AF_INET6) {
m_thingTypes[m_thingCount] = THING_IPV6_ADDRESS;
Utils::copy<16>(
m_thingValues[m_thingCount],
reinterpret_cast<const struct sockaddr_in6*>(&ip)->sin6_addr.s6_addr);
++m_thingCount;
}
}
void OwnershipCredential::addThing(const MAC &mac)
void OwnershipCredential::addThing(const MAC& mac)
{
if (m_thingCount >= ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS)
return;
m_thingTypes[m_thingCount] = THING_MAC_ADDRESS;
mac.copyTo(m_thingValues[m_thingCount]);
++m_thingCount;
if (m_thingCount >= ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS)
return;
m_thingTypes[m_thingCount] = THING_MAC_ADDRESS;
mac.copyTo(m_thingValues[m_thingCount]);
++m_thingCount;
}
bool OwnershipCredential::sign(const Identity &signer)
bool OwnershipCredential::sign(const Identity& signer)
{
uint8_t buf[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX + 16];
if (signer.hasPrivate()) {
m_signedBy = signer.address();
m_signatureLength = signer.sign(buf, (unsigned int) marshal(buf, true), m_signature, sizeof(m_signature));
return true;
}
return false;
uint8_t buf[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX + 16];
if (signer.hasPrivate()) {
m_signedBy = signer.address();
m_signatureLength = signer.sign(buf, (unsigned int)marshal(buf, true), m_signature, sizeof(m_signature));
return true;
}
return false;
}
int OwnershipCredential::marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX], bool forSign) const noexcept
{
int p = 0;
if (forSign) {
for (int k = 0;k < 16;++k)
data[p++] = 0x7f;
}
Utils::storeBigEndian<uint64_t>(data + p, m_networkId);
Utils::storeBigEndian<uint64_t>(data + p + 8, (uint64_t) m_ts);
Utils::storeBigEndian<uint64_t>(data + p + 16, m_flags);
Utils::storeBigEndian<uint32_t>(data + p + 24, m_id);
Utils::storeBigEndian<uint16_t>(data + p + 28, (uint16_t) m_thingCount);
p += 30;
for (unsigned int i = 0, j = m_thingCount;i < j;++i) {
data[p++] = m_thingTypes[i];
Utils::copy<ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE>(data + p, m_thingValues[i]);
p += ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE;
}
m_issuedTo.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
m_signedBy.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
if (!forSign) {
data[p++] = 1;
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t) m_signatureLength);
p += 2;
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int) m_signatureLength;
}
data[p++] = 0;
data[p++] = 0;
if (forSign) {
for (int k = 0;k < 16;++k)
data[p++] = 0x7f;
}
return p;
int p = 0;
if (forSign) {
for (int k = 0; k < 16; ++k)
data[p++] = 0x7f;
}
Utils::storeBigEndian<uint64_t>(data + p, m_networkId);
Utils::storeBigEndian<uint64_t>(data + p + 8, (uint64_t)m_ts);
Utils::storeBigEndian<uint64_t>(data + p + 16, m_flags);
Utils::storeBigEndian<uint32_t>(data + p + 24, m_id);
Utils::storeBigEndian<uint16_t>(data + p + 28, (uint16_t)m_thingCount);
p += 30;
for (unsigned int i = 0, j = m_thingCount; i < j; ++i) {
data[p++] = m_thingTypes[i];
Utils::copy<ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE>(data + p, m_thingValues[i]);
p += ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE;
}
m_issuedTo.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
m_signedBy.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
if (! forSign) {
data[p++] = 1;
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_signatureLength);
p += 2;
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
}
data[p++] = 0;
data[p++] = 0;
if (forSign) {
for (int k = 0; k < 16; ++k)
data[p++] = 0x7f;
}
return p;
}
int OwnershipCredential::unmarshal(const uint8_t *data, int len) noexcept
int OwnershipCredential::unmarshal(const uint8_t* data, int len) noexcept
{
if (len < 30)
return -1;
if (len < 30)
return -1;
m_networkId = Utils::loadBigEndian<uint64_t>(data);
m_ts = (int64_t) Utils::loadBigEndian<uint64_t>(data + 8);
m_flags = Utils::loadBigEndian<uint64_t>(data + 16);
m_id = Utils::loadBigEndian<uint32_t>(data + 24);
m_thingCount = Utils::loadBigEndian<uint16_t>(data + 28);
if (m_thingCount > ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS)
return -1;
int p = 30;
m_networkId = Utils::loadBigEndian<uint64_t>(data);
m_ts = (int64_t)Utils::loadBigEndian<uint64_t>(data + 8);
m_flags = Utils::loadBigEndian<uint64_t>(data + 16);
m_id = Utils::loadBigEndian<uint32_t>(data + 24);
m_thingCount = Utils::loadBigEndian<uint16_t>(data + 28);
if (m_thingCount > ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS)
return -1;
int p = 30;
for (unsigned int i = 0, j = m_thingCount;i < j;++i) {
if ((p + 1 + ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE) > len)
return -1;
m_thingTypes[i] = data[p++];
Utils::copy<ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE>(m_thingValues[i], data + p);
p += ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE;
}
for (unsigned int i = 0, j = m_thingCount; i < j; ++i) {
if ((p + 1 + ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE) > len)
return -1;
m_thingTypes[i] = data[p++];
Utils::copy<ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE>(m_thingValues[i], data + p);
p += ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE;
}
if ((p + ZT_ADDRESS_LENGTH + ZT_ADDRESS_LENGTH + 1 + 2) > len)
return -1;
m_issuedTo.setTo(data + p);
p += ZT_ADDRESS_LENGTH;
m_signedBy.setTo(data + p);
p += ZT_ADDRESS_LENGTH + 1;
if ((p + ZT_ADDRESS_LENGTH + ZT_ADDRESS_LENGTH + 1 + 2) > len)
return -1;
m_issuedTo.setTo(data + p);
p += ZT_ADDRESS_LENGTH;
m_signedBy.setTo(data + p);
p += ZT_ADDRESS_LENGTH + 1;
p += 2 + Utils::loadBigEndian<uint16_t>(data + p);
if (p > len)
return -1;
return p;
p += 2 + Utils::loadBigEndian<uint16_t>(data + p);
if (p > len)
return -1;
return p;
}
} // namespace ZeroTier
} // namespace ZeroTier

View file

@ -14,10 +14,10 @@
#ifndef ZT_CERTIFICATEOFOWNERSHIP_HPP
#define ZT_CERTIFICATEOFOWNERSHIP_HPP
#include "Address.hpp"
#include "C25519.hpp"
#include "Constants.hpp"
#include "Credential.hpp"
#include "C25519.hpp"
#include "Address.hpp"
#include "Identity.hpp"
#include "InetAddress.hpp"
#include "MAC.hpp"
@ -28,7 +28,9 @@
// Maximum size of a thing's value field in bytes
#define ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE 16
#define ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX (8 + 8 + 8 + 4 + 2 + ((1 + ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE) * ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS) + 5 + 5 + 1 + 2 + ZT_SIGNATURE_BUFFER_SIZE + 2)
#define ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX \
(8 + 8 + 8 + 4 + 2 + ((1 + ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE) * ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS) \
+ 5 + 5 + 1 + 2 + ZT_SIGNATURE_BUFFER_SIZE + 2)
namespace ZeroTier {
@ -40,164 +42,201 @@ class Context;
* These are used in conjunction with the rules engine to make IP addresses and
* other identifiers un-spoofable.
*/
class OwnershipCredential : public Credential
{
friend class Credential;
class OwnershipCredential : public Credential {
friend class Credential;
public:
static constexpr ZT_CredentialType credentialType() noexcept
{ return ZT_CREDENTIAL_TYPE_COO; }
public:
static constexpr ZT_CredentialType credentialType() noexcept
{
return ZT_CREDENTIAL_TYPE_COO;
}
enum Thing
{
THING_NULL = 0,
THING_MAC_ADDRESS = 1,
THING_IPV4_ADDRESS = 2,
THING_IPV6_ADDRESS = 3
};
enum Thing { THING_NULL = 0, THING_MAC_ADDRESS = 1, THING_IPV4_ADDRESS = 2, THING_IPV6_ADDRESS = 3 };
ZT_INLINE OwnershipCredential() noexcept
{ memoryZero(this); }
ZT_INLINE OwnershipCredential() noexcept
{
memoryZero(this);
}
ZT_INLINE OwnershipCredential(const uint64_t nwid, const int64_t ts, const Address &issuedTo, const uint32_t id) noexcept
{
memoryZero(this);
m_networkId = nwid;
m_ts = ts;
m_id = id;
m_issuedTo = issuedTo;
}
ZT_INLINE
OwnershipCredential(const uint64_t nwid, const int64_t ts, const Address& issuedTo, const uint32_t id) noexcept
{
memoryZero(this);
m_networkId = nwid;
m_ts = ts;
m_id = id;
m_issuedTo = issuedTo;
}
ZT_INLINE uint64_t networkId() const noexcept
{ return m_networkId; }
ZT_INLINE uint64_t networkId() const noexcept
{
return m_networkId;
}
ZT_INLINE int64_t timestamp() const noexcept
{ return m_ts; }
ZT_INLINE int64_t timestamp() const noexcept
{
return m_ts;
}
ZT_INLINE int64_t revision() const noexcept
{ return m_ts; }
ZT_INLINE int64_t revision() const noexcept
{
return m_ts;
}
ZT_INLINE uint32_t id() const noexcept
{ return m_id; }
ZT_INLINE uint32_t id() const noexcept
{
return m_id;
}
ZT_INLINE const Address &issuedTo() const noexcept
{ return m_issuedTo; }
ZT_INLINE const Address& issuedTo() const noexcept
{
return m_issuedTo;
}
ZT_INLINE const Address &signer() const noexcept
{ return m_signedBy; }
ZT_INLINE const Address& signer() const noexcept
{
return m_signedBy;
}
ZT_INLINE const uint8_t *signature() const noexcept
{ return m_signature; }
ZT_INLINE const uint8_t* signature() const noexcept
{
return m_signature;
}
ZT_INLINE unsigned int signatureLength() const noexcept
{ return m_signatureLength; }
ZT_INLINE unsigned int signatureLength() const noexcept
{
return m_signatureLength;
}
ZT_INLINE unsigned int thingCount() const noexcept
{ return (unsigned int)m_thingCount; }
ZT_INLINE unsigned int thingCount() const noexcept
{
return (unsigned int)m_thingCount;
}
ZT_INLINE Thing thingType(const unsigned int i) const noexcept
{ return (Thing)m_thingTypes[i]; }
ZT_INLINE Thing thingType(const unsigned int i) const noexcept
{
return (Thing)m_thingTypes[i];
}
ZT_INLINE const uint8_t *thingValue(const unsigned int i) const noexcept
{ return m_thingValues[i]; }
ZT_INLINE const uint8_t* thingValue(const unsigned int i) const noexcept
{
return m_thingValues[i];
}
ZT_INLINE bool owns(const InetAddress &ip) const noexcept
{
if (ip.as.sa.sa_family == AF_INET)
return this->_owns(THING_IPV4_ADDRESS, &(reinterpret_cast<const struct sockaddr_in *>(&ip)->sin_addr.s_addr), 4);
else if (ip.as.sa.sa_family == AF_INET6)
return this->_owns(THING_IPV6_ADDRESS, reinterpret_cast<const struct sockaddr_in6 *>(&ip)->sin6_addr.s6_addr, 16);
else return false;
}
ZT_INLINE bool owns(const InetAddress& ip) const noexcept
{
if (ip.as.sa.sa_family == AF_INET)
return this->_owns(
THING_IPV4_ADDRESS,
&(reinterpret_cast<const struct sockaddr_in*>(&ip)->sin_addr.s_addr),
4);
else if (ip.as.sa.sa_family == AF_INET6)
return this->_owns(
THING_IPV6_ADDRESS,
reinterpret_cast<const struct sockaddr_in6*>(&ip)->sin6_addr.s6_addr,
16);
else
return false;
}
ZT_INLINE bool owns(const MAC &mac) const noexcept
{
uint8_t tmp[6];
mac.copyTo(tmp);
return this->_owns(THING_MAC_ADDRESS, tmp, 6);
}
ZT_INLINE bool owns(const MAC& mac) const noexcept
{
uint8_t tmp[6];
mac.copyTo(tmp);
return this->_owns(THING_MAC_ADDRESS, tmp, 6);
}
/**
* Add an IP address to this certificate
*
* @param ip IPv4 or IPv6 address
*/
void addThing(const InetAddress &ip);
/**
* Add an IP address to this certificate
*
* @param ip IPv4 or IPv6 address
*/
void addThing(const InetAddress& ip);
/**
* Add an Ethernet MAC address
*
* ZeroTier MAC addresses are always un-spoofable. This could in theory be
* used to make bridged MAC addresses un-spoofable as well, but it's not
* currently implemented.
*
* @param mac 48-bit MAC address
*/
void addThing(const MAC &mac);
/**
* Add an Ethernet MAC address
*
* ZeroTier MAC addresses are always un-spoofable. This could in theory be
* used to make bridged MAC addresses un-spoofable as well, but it's not
* currently implemented.
*
* @param mac 48-bit MAC address
*/
void addThing(const MAC& mac);
/**
* Sign this certificate
*
* @param signer Signing identity, must have private key
* @return True if signature was successful
*/
bool sign(const Identity &signer);
/**
* Sign this certificate
*
* @param signer Signing identity, must have private key
* @return True if signature was successful
*/
bool sign(const Identity& signer);
/**
* Verify certificate signature
*
* @return Credential verification result: OK, bad signature, or identity needed
*/
ZT_INLINE Credential::VerifyResult verify(const Context &ctx, const CallContext &cc) const
{ return s_verify(ctx, cc, *this); }
/**
* Verify certificate signature
*
* @return Credential verification result: OK, bad signature, or identity needed
*/
ZT_INLINE Credential::VerifyResult verify(const Context& ctx, const CallContext& cc) const
{
return s_verify(ctx, cc, *this);
}
static constexpr int marshalSizeMax() noexcept
{ return ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX; }
static constexpr int marshalSizeMax() noexcept
{
return ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX;
}
int marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX], bool forSign = false) const noexcept;
int unmarshal(const uint8_t *data, int len) noexcept;
int marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX], bool forSign = false) const noexcept;
int unmarshal(const uint8_t* data, int len) noexcept;
// Provides natural sort order by ID
ZT_INLINE bool operator<(const OwnershipCredential &coo) const noexcept
{ return (m_id < coo.m_id); }
// Provides natural sort order by ID
ZT_INLINE bool operator<(const OwnershipCredential& coo) const noexcept
{
return (m_id < coo.m_id);
}
ZT_INLINE bool operator==(const OwnershipCredential &coo) const noexcept
{ return (memcmp(this, &coo, sizeof(OwnershipCredential)) == 0); }
ZT_INLINE bool operator==(const OwnershipCredential& coo) const noexcept
{
return (memcmp(this, &coo, sizeof(OwnershipCredential)) == 0);
}
ZT_INLINE bool operator!=(const OwnershipCredential &coo) const noexcept
{ return (memcmp(this, &coo, sizeof(OwnershipCredential)) != 0); }
ZT_INLINE bool operator!=(const OwnershipCredential& coo) const noexcept
{
return (memcmp(this, &coo, sizeof(OwnershipCredential)) != 0);
}
private:
ZT_INLINE bool _owns(const Thing &t, const void *v, unsigned int l) const noexcept
{
for (unsigned int i = 0, j = m_thingCount; i < j; ++i) {
if (m_thingTypes[i] == (uint8_t)t) {
unsigned int k = 0;
while (k < l) {
if (reinterpret_cast<const uint8_t *>(v)[k] != m_thingValues[i][k])
break;
++k;
}
if (k == l)
return true;
}
}
return false;
}
private:
ZT_INLINE bool _owns(const Thing& t, const void* v, unsigned int l) const noexcept
{
for (unsigned int i = 0, j = m_thingCount; i < j; ++i) {
if (m_thingTypes[i] == (uint8_t)t) {
unsigned int k = 0;
while (k < l) {
if (reinterpret_cast<const uint8_t*>(v)[k] != m_thingValues[i][k])
break;
++k;
}
if (k == l)
return true;
}
}
return false;
}
uint64_t m_networkId;
int64_t m_ts;
uint64_t m_flags;
uint32_t m_id;
uint16_t m_thingCount;
uint8_t m_thingTypes[ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS];
uint8_t m_thingValues[ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS][ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE];
Address m_issuedTo;
Address m_signedBy;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
uint64_t m_networkId;
int64_t m_ts;
uint64_t m_flags;
uint32_t m_id;
uint16_t m_thingCount;
uint8_t m_thingTypes[ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS];
uint8_t m_thingValues[ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS][ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE];
Address m_issuedTo;
Address m_signedBy;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -12,19 +12,30 @@
/****/
#include "Path.hpp"
#include "Context.hpp"
#include "Node.hpp"
namespace ZeroTier {
bool Path::send(const Context &ctx, const CallContext &cc, const void *const data, const unsigned int len) noexcept
bool Path::send(const Context& ctx, const CallContext& cc, const void* const data, const unsigned int len) noexcept
{
if (likely(ctx.cb.wirePacketSendFunction(reinterpret_cast<ZT_Node *>(ctx.node), ctx.uPtr, cc.tPtr, m_localSocket, reinterpret_cast<const ZT_InetAddress *>(&m_addr), data, len, 0) == 0)) {
m_lastOut = cc.ticks;
m_outMeter.log(cc.ticks, len);
return true;
}
return false;
if (likely(
ctx.cb.wirePacketSendFunction(
reinterpret_cast<ZT_Node*>(ctx.node),
ctx.uPtr,
cc.tPtr,
m_localSocket,
reinterpret_cast<const ZT_InetAddress*>(&m_addr),
data,
len,
0)
== 0)) {
m_lastOut = cc.ticks;
m_outMeter.log(cc.ticks, len);
return true;
}
return false;
}
} // namespace ZeroTier
} // namespace ZeroTier

View file

@ -14,219 +14,249 @@
#ifndef ZT_PATH_HPP
#define ZT_PATH_HPP
#include "CallContext.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "InetAddress.hpp"
#include "Meter.hpp"
#include "Mutex.hpp"
#include "SharedPtr.hpp"
#include "Utils.hpp"
#include "Mutex.hpp"
#include "Meter.hpp"
#include "Containers.hpp"
#include "CallContext.hpp"
namespace ZeroTier {
class Context;
template< unsigned int MF, unsigned int MFP, unsigned int GCT, unsigned int GCS, typename P >
class Defragmenter;
template <unsigned int MF, unsigned int MFP, unsigned int GCT, unsigned int GCS, typename P> class Defragmenter;
/**
* A path across the physical network
*/
class Path
{
friend class SharedPtr< Path >;
class Path {
friend class SharedPtr<Path>;
// Allow defragmenter to access fragment-in-flight info stored in Path for performance reasons.
template< unsigned int MF, unsigned int MFP, unsigned int GCT, unsigned int GCS, typename P >
friend
class Defragmenter;
// Allow defragmenter to access fragment-in-flight info stored in Path for performance reasons.
template <unsigned int MF, unsigned int MFP, unsigned int GCT, unsigned int GCS, typename P>
friend class Defragmenter;
public:
/**
* Map key for paths designed for very fast lookup
*/
class Key
{
public:
ZT_INLINE Key() noexcept
{}
public:
/**
* Map key for paths designed for very fast lookup
*/
class Key {
public:
ZT_INLINE Key() noexcept
{
}
ZT_INLINE Key(const InetAddress &ip) noexcept
{
const unsigned int family = ip.as.sa.sa_family;
if (family == AF_INET) {
const uint16_t p = (uint16_t)ip.as.sa_in.sin_port;
m_hashCode = Utils::hash64((((uint64_t)ip.as.sa_in.sin_addr.s_addr) << 16U) ^ ((uint64_t)p) ^ Utils::s_mapNonce);
m_ipv6Net64 = 0; // 0 for IPv4, never 0 for IPv6
m_port = p;
} else {
if (likely(family == AF_INET6)) {
const uint64_t a = Utils::loadMachineEndian< uint64_t >(reinterpret_cast<const uint8_t *>(ip.as.sa_in6.sin6_addr.s6_addr));
const uint64_t b = Utils::loadMachineEndian< uint64_t >(reinterpret_cast<const uint8_t *>(ip.as.sa_in6.sin6_addr.s6_addr) + 8);
const uint16_t p = ip.as.sa_in6.sin6_port;
m_hashCode = Utils::hash64(a ^ b ^ ((uint64_t)p) ^ Utils::s_mapNonce);
m_ipv6Net64 = a; // IPv6 /64
m_port = p;
} else {
// This is not reachable since InetAddress can only be AF_INET or AF_INET6, but implement something.
m_hashCode = Utils::fnv1a32(&ip, sizeof(InetAddress));
m_ipv6Net64 = 0;
m_port = (uint16_t)family;
}
}
}
ZT_INLINE Key(const InetAddress& ip) noexcept
{
const unsigned int family = ip.as.sa.sa_family;
if (family == AF_INET) {
const uint16_t p = (uint16_t)ip.as.sa_in.sin_port;
m_hashCode =
Utils::hash64((((uint64_t)ip.as.sa_in.sin_addr.s_addr) << 16U) ^ ((uint64_t)p) ^ Utils::s_mapNonce);
m_ipv6Net64 = 0; // 0 for IPv4, never 0 for IPv6
m_port = p;
}
else {
if (likely(family == AF_INET6)) {
const uint64_t a = Utils::loadMachineEndian<uint64_t>(
reinterpret_cast<const uint8_t*>(ip.as.sa_in6.sin6_addr.s6_addr));
const uint64_t b = Utils::loadMachineEndian<uint64_t>(
reinterpret_cast<const uint8_t*>(ip.as.sa_in6.sin6_addr.s6_addr) + 8);
const uint16_t p = ip.as.sa_in6.sin6_port;
m_hashCode = Utils::hash64(a ^ b ^ ((uint64_t)p) ^ Utils::s_mapNonce);
m_ipv6Net64 = a; // IPv6 /64
m_port = p;
}
else {
// This is not reachable since InetAddress can only be AF_INET or AF_INET6, but implement something.
m_hashCode = Utils::fnv1a32(&ip, sizeof(InetAddress));
m_ipv6Net64 = 0;
m_port = (uint16_t)family;
}
}
}
ZT_INLINE unsigned long hashCode() const noexcept
{ return (unsigned long)m_hashCode; }
ZT_INLINE unsigned long hashCode() const noexcept
{
return (unsigned long)m_hashCode;
}
ZT_INLINE bool operator==(const Key &k) const noexcept
{ return (m_hashCode == k.m_hashCode) && (m_ipv6Net64 == k.m_ipv6Net64) && (m_port == k.m_port); }
ZT_INLINE bool operator==(const Key& k) const noexcept
{
return (m_hashCode == k.m_hashCode) && (m_ipv6Net64 == k.m_ipv6Net64) && (m_port == k.m_port);
}
ZT_INLINE bool operator!=(const Key &k) const noexcept
{ return (!(*this == k)); }
ZT_INLINE bool operator!=(const Key& k) const noexcept
{
return (! (*this == k));
}
ZT_INLINE bool operator<(const Key &k) const noexcept
{
if (m_hashCode < k.m_hashCode) {
return true;
} else if (m_hashCode == k.m_hashCode) {
if (m_ipv6Net64 < k.m_ipv6Net64) {
return true;
} else if (m_ipv6Net64 == k.m_ipv6Net64) {
return (m_port < k.m_port);
}
}
return false;
}
ZT_INLINE bool operator<(const Key& k) const noexcept
{
if (m_hashCode < k.m_hashCode) {
return true;
}
else if (m_hashCode == k.m_hashCode) {
if (m_ipv6Net64 < k.m_ipv6Net64) {
return true;
}
else if (m_ipv6Net64 == k.m_ipv6Net64) {
return (m_port < k.m_port);
}
}
return false;
}
ZT_INLINE bool operator>(const Key &k) const noexcept
{ return (k < *this); }
ZT_INLINE bool operator>(const Key& k) const noexcept
{
return (k < *this);
}
ZT_INLINE bool operator<=(const Key &k) const noexcept
{ return !(k < *this); }
ZT_INLINE bool operator<=(const Key& k) const noexcept
{
return ! (k < *this);
}
ZT_INLINE bool operator>=(const Key &k) const noexcept
{ return !(*this < k); }
ZT_INLINE bool operator>=(const Key& k) const noexcept
{
return ! (*this < k);
}
private:
uint64_t m_hashCode;
uint64_t m_ipv6Net64;
uint16_t m_port;
};
private:
uint64_t m_hashCode;
uint64_t m_ipv6Net64;
uint16_t m_port;
};
ZT_INLINE Path(const int64_t l, const InetAddress &r) noexcept:
m_localSocket(l),
m_lastIn(0),
m_lastOut(0),
m_latency(-1),
m_addr(r)
{}
ZT_INLINE Path(const int64_t l, const InetAddress& r) noexcept
: m_localSocket(l)
, m_lastIn(0)
, m_lastOut(0)
, m_latency(-1)
, m_addr(r)
{
}
/**
* Send a packet via this path (last out time is also updated)
*
* @param data Packet data
* @param len Packet length
* @return True if transport reported success
*/
bool send(const Context &ctx, const CallContext &cc, const void *data, unsigned int len) noexcept;
/**
* Send a packet via this path (last out time is also updated)
*
* @param data Packet data
* @param len Packet length
* @return True if transport reported success
*/
bool send(const Context& ctx, const CallContext& cc, const void* data, unsigned int len) noexcept;
/**
* Explicitly update last sent time
*
* @param now Time of send
* @param bytes Bytes sent
*/
ZT_INLINE void sent(const CallContext &cc, const unsigned int bytes) noexcept
{
m_lastOut.store(cc.ticks, std::memory_order_relaxed);
m_outMeter.log(cc.ticks, bytes);
}
/**
* Explicitly update last sent time
*
* @param now Time of send
* @param bytes Bytes sent
*/
ZT_INLINE void sent(const CallContext& cc, const unsigned int bytes) noexcept
{
m_lastOut.store(cc.ticks, std::memory_order_relaxed);
m_outMeter.log(cc.ticks, bytes);
}
/**
* Called when a packet is received from this remote path, regardless of content
*
* @param now Time of receive
* @param bytes Bytes received
*/
ZT_INLINE void received(const CallContext &cc, const unsigned int bytes) noexcept
{
m_lastIn.store(cc.ticks, std::memory_order_relaxed);
m_inMeter.log(cc.ticks, bytes);
}
/**
* Called when a packet is received from this remote path, regardless of content
*
* @param now Time of receive
* @param bytes Bytes received
*/
ZT_INLINE void received(const CallContext& cc, const unsigned int bytes) noexcept
{
m_lastIn.store(cc.ticks, std::memory_order_relaxed);
m_inMeter.log(cc.ticks, bytes);
}
/**
* Update latency with a new measurement
*
* @param newMeasurement New latency measurement in milliseconds
*/
ZT_INLINE void updateLatency(const unsigned int newMeasurement) noexcept
{
const int lat = m_latency.load(std::memory_order_relaxed);
if (likely(lat > 0)) {
m_latency.store((lat + (int)newMeasurement) >> 1U, std::memory_order_relaxed);
} else {
m_latency.store((int)newMeasurement, std::memory_order_relaxed);
}
}
/**
* Update latency with a new measurement
*
* @param newMeasurement New latency measurement in milliseconds
*/
ZT_INLINE void updateLatency(const unsigned int newMeasurement) noexcept
{
const int lat = m_latency.load(std::memory_order_relaxed);
if (likely(lat > 0)) {
m_latency.store((lat + (int)newMeasurement) >> 1U, std::memory_order_relaxed);
}
else {
m_latency.store((int)newMeasurement, std::memory_order_relaxed);
}
}
/**
* @return Latency in milliseconds or -1 if unknown
*/
ZT_INLINE int latency() const noexcept
{ return m_latency.load(std::memory_order_relaxed); }
/**
* @return Latency in milliseconds or -1 if unknown
*/
ZT_INLINE int latency() const noexcept
{
return m_latency.load(std::memory_order_relaxed);
}
/**
* Check path aliveness
*
* @param now Current time
*/
ZT_INLINE bool alive(const CallContext &cc) const noexcept
{ return ((cc.ticks - m_lastIn.load(std::memory_order_relaxed)) < ZT_PATH_ALIVE_TIMEOUT); }
/**
* Check path aliveness
*
* @param now Current time
*/
ZT_INLINE bool alive(const CallContext& cc) const noexcept
{
return ((cc.ticks - m_lastIn.load(std::memory_order_relaxed)) < ZT_PATH_ALIVE_TIMEOUT);
}
/**
* @return Physical address
*/
ZT_INLINE const InetAddress &address() const noexcept
{ return m_addr; }
/**
* @return Physical address
*/
ZT_INLINE const InetAddress& address() const noexcept
{
return m_addr;
}
/**
* @return Local socket as specified by external code
*/
ZT_INLINE int64_t localSocket() const noexcept
{ return m_localSocket; }
/**
* @return Local socket as specified by external code
*/
ZT_INLINE int64_t localSocket() const noexcept
{
return m_localSocket;
}
/**
* @return Last time we received anything
*/
ZT_INLINE int64_t lastIn() const noexcept
{ return m_lastIn.load(std::memory_order_relaxed); }
/**
* @return Last time we received anything
*/
ZT_INLINE int64_t lastIn() const noexcept
{
return m_lastIn.load(std::memory_order_relaxed);
}
/**
* @return Last time we sent something
*/
ZT_INLINE int64_t lastOut() const noexcept
{ return m_lastOut.load(std::memory_order_relaxed); }
/**
* @return Last time we sent something
*/
ZT_INLINE int64_t lastOut() const noexcept
{
return m_lastOut.load(std::memory_order_relaxed);
}
private:
const int64_t m_localSocket;
std::atomic< int64_t > m_lastIn;
std::atomic< int64_t > m_lastOut;
std::atomic< int > m_latency;
const InetAddress m_addr;
Meter<> m_inMeter;
Meter<> m_outMeter;
private:
const int64_t m_localSocket;
std::atomic<int64_t> m_lastIn;
std::atomic<int64_t> m_lastOut;
std::atomic<int> m_latency;
const InetAddress m_addr;
Meter<> m_inMeter;
Meter<> m_outMeter;
// These fields belong to Defragmenter but are kept in Path for performance
// as it's much faster this way than having Defragmenter maintain another
// mapping from paths to inbound message IDs.
Set< uint64_t > m_inboundFragmentedMessages;
Mutex m_inboundFragmentedMessages_l;
// These fields belong to Defragmenter but are kept in Path for performance
// as it's much faster this way than having Defragmenter maintain another
// mapping from paths to inbound message IDs.
Set<uint64_t> m_inboundFragmentedMessages;
Mutex m_inboundFragmentedMessages_l;
std::atomic< int > __refCount;
std::atomic<int> __refCount;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -6,439 +6,524 @@ Public domain.
// Small modifications have been made for ZeroTier, but this code remains in the public domain.
#include "Constants.hpp"
#include "Poly1305.hpp"
#include "Constants.hpp"
#include "Utils.hpp"
#include <cstring>
#ifdef __WINDOWS__
#pragma warning(disable: 4146)
#pragma warning(disable : 4146)
#endif
#define U8TO64(p) Utils::loadLittleEndian<uint64_t>(p)
#define U64TO8(p,v) Utils::storeLittleEndian<uint64_t>(p,v)
#define U8TO32(p) Utils::loadLittleEndian<uint32_t>(p)
#define U32TO8(p,v) Utils::storeLittleEndian<uint32_t>(p,v)
#define U8TO64(p) Utils::loadLittleEndian<uint64_t>(p)
#define U64TO8(p, v) Utils::storeLittleEndian<uint64_t>(p, v)
#define U8TO32(p) Utils::loadLittleEndian<uint32_t>(p)
#define U32TO8(p, v) Utils::storeLittleEndian<uint32_t>(p, v)
namespace ZeroTier {
namespace {
typedef struct poly1305_context {
size_t aligner;
unsigned char opaque[136];
size_t aligner;
unsigned char opaque[136];
} poly1305_context;
#ifdef ZT_HAVE_UINT128
#define MUL(out, x, y) out = ((uint128_t)x * y)
#define ADD(out, in) out += in
#define ADD(out, in) out += in
#define ADDLO(out, in) out += in
#define SHR(in, shift) (unsigned long long)(in >> (shift))
#define LO(in) (unsigned long long)(in)
#define LO(in) (unsigned long long)(in)
#define poly1305_block_size 16
typedef struct poly1305_state_internal_t {
unsigned long long r[3];
unsigned long long h[3];
unsigned long long pad[2];
size_t leftover;
unsigned char buffer[poly1305_block_size];
unsigned char final;
unsigned long long r[3];
unsigned long long h[3];
unsigned long long pad[2];
size_t leftover;
unsigned char buffer[poly1305_block_size];
unsigned char final;
} poly1305_state_internal_t;
ZT_INLINE void poly1305_init(poly1305_context *ctx,const unsigned char key[32])
ZT_INLINE void poly1305_init(poly1305_context* ctx, const unsigned char key[32])
{
poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
unsigned long long t0,t1;
poly1305_state_internal_t* st = (poly1305_state_internal_t*)ctx;
unsigned long long t0, t1;
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
t0 = U8TO64(&key[0]);
t1 = U8TO64(&key[8]);
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
t0 = U8TO64(&key[0]);
t1 = U8TO64(&key[8]);
st->r[0] = ( t0 ) & 0xffc0fffffff;
st->r[1] = ((t0 >> 44) | (t1 << 20)) & 0xfffffc0ffff;
st->r[2] = ((t1 >> 24) ) & 0x00ffffffc0f;
st->r[0] = (t0)&0xffc0fffffff;
st->r[1] = ((t0 >> 44) | (t1 << 20)) & 0xfffffc0ffff;
st->r[2] = ((t1 >> 24)) & 0x00ffffffc0f;
/* h = 0 */
st->h[0] = 0;
st->h[1] = 0;
st->h[2] = 0;
/* h = 0 */
st->h[0] = 0;
st->h[1] = 0;
st->h[2] = 0;
/* save pad for later */
st->pad[0] = U8TO64(&key[16]);
st->pad[1] = U8TO64(&key[24]);
/* save pad for later */
st->pad[0] = U8TO64(&key[16]);
st->pad[1] = U8TO64(&key[24]);
st->leftover = 0;
st->final = 0;
st->leftover = 0;
st->final = 0;
}
void poly1305_blocks(poly1305_state_internal_t *st, const unsigned char *m, size_t bytes)
void poly1305_blocks(poly1305_state_internal_t* st, const unsigned char* m, size_t bytes)
{
const unsigned long long hibit = (st->final) ? 0 : ((unsigned long long)1 << 40); /* 1 << 128 */
unsigned long long r0,r1,r2;
unsigned long long s1,s2;
unsigned long long h0,h1,h2;
uint128_t d0,d1,d2,d;
const unsigned long long hibit = (st->final) ? 0 : ((unsigned long long)1 << 40); /* 1 << 128 */
unsigned long long r0, r1, r2;
unsigned long long s1, s2;
unsigned long long h0, h1, h2;
uint128_t d0, d1, d2, d;
r0 = st->r[0];
r1 = st->r[1];
r2 = st->r[2];
r0 = st->r[0];
r1 = st->r[1];
r2 = st->r[2];
h0 = st->h[0];
h1 = st->h[1];
h2 = st->h[2];
h0 = st->h[0];
h1 = st->h[1];
h2 = st->h[2];
s1 = r1 * (5 << 2);
s2 = r2 * (5 << 2);
s1 = r1 * (5 << 2);
s2 = r2 * (5 << 2);
while (bytes >= poly1305_block_size) {
unsigned long long t0,t1;
while (bytes >= poly1305_block_size) {
unsigned long long t0, t1;
/* h += m[i] */
t0 = U8TO64(&m[0]);
t1 = U8TO64(&m[8]);
/* h += m[i] */
t0 = U8TO64(&m[0]);
t1 = U8TO64(&m[8]);
h0 += (( t0 ) & 0xfffffffffff);
h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffff);
h2 += (((t1 >> 24) ) & 0x3ffffffffff) | hibit;
h0 += ((t0)&0xfffffffffff);
h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffff);
h2 += (((t1 >> 24)) & 0x3ffffffffff) | hibit;
/* h *= r */
MUL(d0, h0, r0); MUL(d, h1, s2); ADD(d0, d); MUL(d, h2, s1); ADD(d0, d);
MUL(d1, h0, r1); MUL(d, h1, r0); ADD(d1, d); MUL(d, h2, s2); ADD(d1, d);
MUL(d2, h0, r2); MUL(d, h1, r1); ADD(d2, d); MUL(d, h2, r0); ADD(d2, d);
/* h *= r */
MUL(d0, h0, r0);
MUL(d, h1, s2);
ADD(d0, d);
MUL(d, h2, s1);
ADD(d0, d);
MUL(d1, h0, r1);
MUL(d, h1, r0);
ADD(d1, d);
MUL(d, h2, s2);
ADD(d1, d);
MUL(d2, h0, r2);
MUL(d, h1, r1);
ADD(d2, d);
MUL(d, h2, r0);
ADD(d2, d);
/* (partial) h %= p */
unsigned long long c = SHR(d0, 44); h0 = LO(d0) & 0xfffffffffff;
ADDLO(d1, c); c = SHR(d1, 44); h1 = LO(d1) & 0xfffffffffff;
ADDLO(d2, c); c = SHR(d2, 42); h2 = LO(d2) & 0x3ffffffffff;
h0 += c * 5; c = (h0 >> 44); h0 = h0 & 0xfffffffffff;
h1 += c;
/* (partial) h %= p */
unsigned long long c = SHR(d0, 44);
h0 = LO(d0) & 0xfffffffffff;
ADDLO(d1, c);
c = SHR(d1, 44);
h1 = LO(d1) & 0xfffffffffff;
ADDLO(d2, c);
c = SHR(d2, 42);
h2 = LO(d2) & 0x3ffffffffff;
h0 += c * 5;
c = (h0 >> 44);
h0 = h0 & 0xfffffffffff;
h1 += c;
m += poly1305_block_size;
bytes -= poly1305_block_size;
}
m += poly1305_block_size;
bytes -= poly1305_block_size;
}
st->h[0] = h0;
st->h[1] = h1;
st->h[2] = h2;
st->h[0] = h0;
st->h[1] = h1;
st->h[2] = h2;
}
ZT_INLINE void poly1305_finish(poly1305_context *ctx,unsigned char mac[16])
ZT_INLINE void poly1305_finish(poly1305_context* ctx, unsigned char mac[16])
{
poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
unsigned long long h0,h1,h2,c;
unsigned long long g0,g1,g2;
unsigned long long t0,t1;
poly1305_state_internal_t* st = (poly1305_state_internal_t*)ctx;
unsigned long long h0, h1, h2, c;
unsigned long long g0, g1, g2;
unsigned long long t0, t1;
/* process the remaining block */
if (st->leftover) {
size_t i = st->leftover;
st->buffer[i] = 1;
for (i = i + 1; i < poly1305_block_size; i++)
st->buffer[i] = 0;
st->final = 1;
poly1305_blocks(st, st->buffer, poly1305_block_size);
}
/* process the remaining block */
if (st->leftover) {
size_t i = st->leftover;
st->buffer[i] = 1;
for (i = i + 1; i < poly1305_block_size; i++)
st->buffer[i] = 0;
st->final = 1;
poly1305_blocks(st, st->buffer, poly1305_block_size);
}
/* fully carry h */
h0 = st->h[0];
h1 = st->h[1];
h2 = st->h[2];
/* fully carry h */
h0 = st->h[0];
h1 = st->h[1];
h2 = st->h[2];
c = (h1 >> 44); h1 &= 0xfffffffffff;
h2 += c; c = (h2 >> 42); h2 &= 0x3ffffffffff;
h0 += c * 5; c = (h0 >> 44); h0 &= 0xfffffffffff;
h1 += c; c = (h1 >> 44); h1 &= 0xfffffffffff;
h2 += c; c = (h2 >> 42); h2 &= 0x3ffffffffff;
h0 += c * 5; c = (h0 >> 44); h0 &= 0xfffffffffff;
h1 += c;
/* compute h + -p */
g0 = h0 + 5; c = (g0 >> 44); g0 &= 0xfffffffffff;
g1 = h1 + c; c = (g1 >> 44); g1 &= 0xfffffffffff;
g2 = h2 + c - ((unsigned long long)1 << 42);
/* select h if h < p, or h + -p if h >= p */
c = (g2 >> ((sizeof(unsigned long long) * 8) - 1)) - 1;
g0 &= c;
g1 &= c;
g2 &= c;
c = ~c;
h0 = (h0 & c) | g0;
h1 = (h1 & c) | g1;
h2 = (h2 & c) | g2;
/* h = (h + pad) */
t0 = st->pad[0];
t1 = st->pad[1];
h0 += (( t0 ) & 0xfffffffffff) ; c = (h0 >> 44); h0 &= 0xfffffffffff;
h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffff) + c; c = (h1 >> 44); h1 &= 0xfffffffffff;
h2 += (((t1 >> 24) ) & 0x3ffffffffff) + c; h2 &= 0x3ffffffffff;
/* mac = h % (2^128) */
h0 = ((h0 ) | (h1 << 44));
h1 = ((h1 >> 20) | (h2 << 24));
U64TO8(&mac[0], h0);
U64TO8(&mac[8], h1);
/* zero out the state */
st->h[0] = 0;
st->h[1] = 0;
st->h[2] = 0;
st->r[0] = 0;
st->r[1] = 0;
st->r[2] = 0;
st->pad[0] = 0;
st->pad[1] = 0;
}
#else // no uint128_t
#define poly1305_block_size 16
typedef struct poly1305_state_internal_t {
unsigned long r[5];
unsigned long h[5];
unsigned long pad[4];
size_t leftover;
unsigned char buffer[poly1305_block_size];
unsigned char final;
} poly1305_state_internal_t;
ZT_INLINE void poly1305_init(poly1305_context *ctx, const unsigned char key[32])
{
poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
st->r[0] = (U8TO32(&key[ 0]) ) & 0x3ffffff;
st->r[1] = (U8TO32(&key[ 3]) >> 2) & 0x3ffff03;
st->r[2] = (U8TO32(&key[ 6]) >> 4) & 0x3ffc0ff;
st->r[3] = (U8TO32(&key[ 9]) >> 6) & 0x3f03fff;
st->r[4] = (U8TO32(&key[12]) >> 8) & 0x00fffff;
/* h = 0 */
st->h[0] = 0;
st->h[1] = 0;
st->h[2] = 0;
st->h[3] = 0;
st->h[4] = 0;
/* save pad for later */
st->pad[0] = U8TO32(&key[16]);
st->pad[1] = U8TO32(&key[20]);
st->pad[2] = U8TO32(&key[24]);
st->pad[3] = U8TO32(&key[28]);
st->leftover = 0;
st->final = 0;
}
void poly1305_blocks(poly1305_state_internal_t *st, const unsigned char *m, size_t bytes)
{
const unsigned long hibit = (st->final) ? 0 : (1 << 24); /* 1 << 128 */
unsigned long r0,r1,r2,r3,r4;
unsigned long s1,s2,s3,s4;
unsigned long h0,h1,h2,h3,h4;
r0 = st->r[0];
r1 = st->r[1];
r2 = st->r[2];
r3 = st->r[3];
r4 = st->r[4];
s1 = r1 * 5;
s2 = r2 * 5;
s3 = r3 * 5;
s4 = r4 * 5;
h0 = st->h[0];
h1 = st->h[1];
h2 = st->h[2];
h3 = st->h[3];
h4 = st->h[4];
while (bytes >= poly1305_block_size) {
/* h += m[i] */
h0 += (U8TO32(m+ 0) ) & 0x3ffffff;
h1 += (U8TO32(m+ 3) >> 2) & 0x3ffffff;
h2 += (U8TO32(m+ 6) >> 4) & 0x3ffffff;
h3 += (U8TO32(m+ 9) >> 6) & 0x3ffffff;
h4 += (U8TO32(m+12) >> 8) | hibit;
/* h *= r */
unsigned long long d0 = ((unsigned long long)h0 * r0) + ((unsigned long long)h1 * s4) + ((unsigned long long)h2 * s3) + ((unsigned long long)h3 * s2) + ((unsigned long long)h4 * s1);
unsigned long long d1 = ((unsigned long long)h0 * r1) + ((unsigned long long)h1 * r0) + ((unsigned long long)h2 * s4) + ((unsigned long long)h3 * s3) + ((unsigned long long)h4 * s2);
unsigned long long d2 = ((unsigned long long)h0 * r2) + ((unsigned long long)h1 * r1) + ((unsigned long long)h2 * r0) + ((unsigned long long)h3 * s4) + ((unsigned long long)h4 * s3);
unsigned long long d3 = ((unsigned long long)h0 * r3) + ((unsigned long long)h1 * r2) + ((unsigned long long)h2 * r1) + ((unsigned long long)h3 * r0) + ((unsigned long long)h4 * s4);
unsigned long long d4 = ((unsigned long long)h0 * r4) + ((unsigned long long)h1 * r3) + ((unsigned long long)h2 * r2) + ((unsigned long long)h3 * r1) + ((unsigned long long)h4 * r0);
/* (partial) h %= p */
unsigned long c = (unsigned long)(d0 >> 26); h0 = (unsigned long)d0 & 0x3ffffff;
d1 += c; c = (unsigned long)(d1 >> 26); h1 = (unsigned long)d1 & 0x3ffffff;
d2 += c; c = (unsigned long)(d2 >> 26); h2 = (unsigned long)d2 & 0x3ffffff;
d3 += c; c = (unsigned long)(d3 >> 26); h3 = (unsigned long)d3 & 0x3ffffff;
d4 += c; c = (unsigned long)(d4 >> 26); h4 = (unsigned long)d4 & 0x3ffffff;
h0 += c * 5; c = (h0 >> 26); h0 = h0 & 0x3ffffff;
c = (h1 >> 44);
h1 &= 0xfffffffffff;
h2 += c;
c = (h2 >> 42);
h2 &= 0x3ffffffffff;
h0 += c * 5;
c = (h0 >> 44);
h0 &= 0xfffffffffff;
h1 += c;
c = (h1 >> 44);
h1 &= 0xfffffffffff;
h2 += c;
c = (h2 >> 42);
h2 &= 0x3ffffffffff;
h0 += c * 5;
c = (h0 >> 44);
h0 &= 0xfffffffffff;
h1 += c;
m += poly1305_block_size;
bytes -= poly1305_block_size;
}
/* compute h + -p */
g0 = h0 + 5;
c = (g0 >> 44);
g0 &= 0xfffffffffff;
g1 = h1 + c;
c = (g1 >> 44);
g1 &= 0xfffffffffff;
g2 = h2 + c - ((unsigned long long)1 << 42);
st->h[0] = h0;
st->h[1] = h1;
st->h[2] = h2;
st->h[3] = h3;
st->h[4] = h4;
/* select h if h < p, or h + -p if h >= p */
c = (g2 >> ((sizeof(unsigned long long) * 8) - 1)) - 1;
g0 &= c;
g1 &= c;
g2 &= c;
c = ~c;
h0 = (h0 & c) | g0;
h1 = (h1 & c) | g1;
h2 = (h2 & c) | g2;
/* h = (h + pad) */
t0 = st->pad[0];
t1 = st->pad[1];
h0 += ((t0)&0xfffffffffff);
c = (h0 >> 44);
h0 &= 0xfffffffffff;
h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffff) + c;
c = (h1 >> 44);
h1 &= 0xfffffffffff;
h2 += (((t1 >> 24)) & 0x3ffffffffff) + c;
h2 &= 0x3ffffffffff;
/* mac = h % (2^128) */
h0 = ((h0) | (h1 << 44));
h1 = ((h1 >> 20) | (h2 << 24));
U64TO8(&mac[0], h0);
U64TO8(&mac[8], h1);
/* zero out the state */
st->h[0] = 0;
st->h[1] = 0;
st->h[2] = 0;
st->r[0] = 0;
st->r[1] = 0;
st->r[2] = 0;
st->pad[0] = 0;
st->pad[1] = 0;
}
ZT_INLINE void poly1305_finish(poly1305_context *ctx, unsigned char mac[16])
#else // no uint128_t
#define poly1305_block_size 16
typedef struct poly1305_state_internal_t {
unsigned long r[5];
unsigned long h[5];
unsigned long pad[4];
size_t leftover;
unsigned char buffer[poly1305_block_size];
unsigned char final;
} poly1305_state_internal_t;
ZT_INLINE void poly1305_init(poly1305_context* ctx, const unsigned char key[32])
{
poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
unsigned long h0,h1,h2,h3,h4,c;
unsigned long g0,g1,g2,g3,g4;
unsigned long long f;
unsigned long mask;
poly1305_state_internal_t* st = (poly1305_state_internal_t*)ctx;
/* process the remaining block */
if (st->leftover) {
size_t i = st->leftover;
st->buffer[i++] = 1;
for (; i < poly1305_block_size; i++)
st->buffer[i] = 0;
st->final = 1;
poly1305_blocks(st, st->buffer, poly1305_block_size);
}
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
st->r[0] = (U8TO32(&key[0])) & 0x3ffffff;
st->r[1] = (U8TO32(&key[3]) >> 2) & 0x3ffff03;
st->r[2] = (U8TO32(&key[6]) >> 4) & 0x3ffc0ff;
st->r[3] = (U8TO32(&key[9]) >> 6) & 0x3f03fff;
st->r[4] = (U8TO32(&key[12]) >> 8) & 0x00fffff;
/* fully carry h */
h0 = st->h[0];
h1 = st->h[1];
h2 = st->h[2];
h3 = st->h[3];
h4 = st->h[4];
/* h = 0 */
st->h[0] = 0;
st->h[1] = 0;
st->h[2] = 0;
st->h[3] = 0;
st->h[4] = 0;
c = h1 >> 26; h1 = h1 & 0x3ffffff;
h2 += c; c = h2 >> 26; h2 = h2 & 0x3ffffff;
h3 += c; c = h3 >> 26; h3 = h3 & 0x3ffffff;
h4 += c; c = h4 >> 26; h4 = h4 & 0x3ffffff;
h0 += c * 5; c = h0 >> 26; h0 = h0 & 0x3ffffff;
h1 += c;
/* save pad for later */
st->pad[0] = U8TO32(&key[16]);
st->pad[1] = U8TO32(&key[20]);
st->pad[2] = U8TO32(&key[24]);
st->pad[3] = U8TO32(&key[28]);
/* compute h + -p */
g0 = h0 + 5; c = g0 >> 26; g0 &= 0x3ffffff;
g1 = h1 + c; c = g1 >> 26; g1 &= 0x3ffffff;
g2 = h2 + c; c = g2 >> 26; g2 &= 0x3ffffff;
g3 = h3 + c; c = g3 >> 26; g3 &= 0x3ffffff;
g4 = h4 + c - (1 << 26);
/* select h if h < p, or h + -p if h >= p */
mask = (g4 >> ((sizeof(unsigned long) * 8) - 1)) - 1;
g0 &= mask;
g1 &= mask;
g2 &= mask;
g3 &= mask;
g4 &= mask;
mask = ~mask;
h0 = (h0 & mask) | g0;
h1 = (h1 & mask) | g1;
h2 = (h2 & mask) | g2;
h3 = (h3 & mask) | g3;
h4 = (h4 & mask) | g4;
/* h = h % (2^128) */
h0 = ((h0 ) | (h1 << 26)) & 0xffffffff;
h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff;
h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff;
h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff;
/* mac = (h + pad) % (2^128) */
f = (unsigned long long)h0 + st->pad[0] ; h0 = (unsigned long)f;
f = (unsigned long long)h1 + st->pad[1] + (f >> 32); h1 = (unsigned long)f;
f = (unsigned long long)h2 + st->pad[2] + (f >> 32); h2 = (unsigned long)f;
f = (unsigned long long)h3 + st->pad[3] + (f >> 32); h3 = (unsigned long)f;
U32TO8(mac + 0, h0);
U32TO8(mac + 4, h1);
U32TO8(mac + 8, h2);
U32TO8(mac + 12, h3);
/* zero out the state */
st->h[0] = 0;
st->h[1] = 0;
st->h[2] = 0;
st->h[3] = 0;
st->h[4] = 0;
st->r[0] = 0;
st->r[1] = 0;
st->r[2] = 0;
st->r[3] = 0;
st->r[4] = 0;
st->pad[0] = 0;
st->pad[1] = 0;
st->pad[2] = 0;
st->pad[3] = 0;
}
#endif // uint128_t or portable version?
ZT_INLINE void poly1305_update(poly1305_context *ctx,const unsigned char *m,size_t bytes) noexcept
{
poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
size_t i;
/* handle leftover */
if (st->leftover) {
size_t want = (poly1305_block_size - st->leftover);
if (want > bytes)
want = bytes;
for (i = 0; i < want; i++)
st->buffer[st->leftover + i] = m[i];
bytes -= want;
m += want;
st->leftover += want;
if (st->leftover < poly1305_block_size)
return;
poly1305_blocks(st, st->buffer, poly1305_block_size);
st->leftover = 0;
}
/* process full blocks */
if (bytes >= poly1305_block_size) {
size_t want = (bytes & ~(poly1305_block_size - 1));
poly1305_blocks(st, m, want);
m += want;
bytes -= want;
}
/* store leftover */
if (bytes) {
for (i = 0; i < bytes; i++)
st->buffer[st->leftover + i] = m[i];
st->leftover += bytes;
}
st->final = 0;
}
} // anonymous namespace
void Poly1305::init(const void *key) noexcept
void poly1305_blocks(poly1305_state_internal_t* st, const unsigned char* m, size_t bytes)
{
static_assert(sizeof(ctx) >= sizeof(poly1305_context),"buffer in class smaller than required structure size");
poly1305_init(reinterpret_cast<poly1305_context *>(&ctx),reinterpret_cast<const unsigned char *>(key));
const unsigned long hibit = (st->final) ? 0 : (1 << 24); /* 1 << 128 */
unsigned long r0, r1, r2, r3, r4;
unsigned long s1, s2, s3, s4;
unsigned long h0, h1, h2, h3, h4;
r0 = st->r[0];
r1 = st->r[1];
r2 = st->r[2];
r3 = st->r[3];
r4 = st->r[4];
s1 = r1 * 5;
s2 = r2 * 5;
s3 = r3 * 5;
s4 = r4 * 5;
h0 = st->h[0];
h1 = st->h[1];
h2 = st->h[2];
h3 = st->h[3];
h4 = st->h[4];
while (bytes >= poly1305_block_size) {
/* h += m[i] */
h0 += (U8TO32(m + 0)) & 0x3ffffff;
h1 += (U8TO32(m + 3) >> 2) & 0x3ffffff;
h2 += (U8TO32(m + 6) >> 4) & 0x3ffffff;
h3 += (U8TO32(m + 9) >> 6) & 0x3ffffff;
h4 += (U8TO32(m + 12) >> 8) | hibit;
/* h *= r */
unsigned long long d0 = ((unsigned long long)h0 * r0) + ((unsigned long long)h1 * s4)
+ ((unsigned long long)h2 * s3) + ((unsigned long long)h3 * s2)
+ ((unsigned long long)h4 * s1);
unsigned long long d1 = ((unsigned long long)h0 * r1) + ((unsigned long long)h1 * r0)
+ ((unsigned long long)h2 * s4) + ((unsigned long long)h3 * s3)
+ ((unsigned long long)h4 * s2);
unsigned long long d2 = ((unsigned long long)h0 * r2) + ((unsigned long long)h1 * r1)
+ ((unsigned long long)h2 * r0) + ((unsigned long long)h3 * s4)
+ ((unsigned long long)h4 * s3);
unsigned long long d3 = ((unsigned long long)h0 * r3) + ((unsigned long long)h1 * r2)
+ ((unsigned long long)h2 * r1) + ((unsigned long long)h3 * r0)
+ ((unsigned long long)h4 * s4);
unsigned long long d4 = ((unsigned long long)h0 * r4) + ((unsigned long long)h1 * r3)
+ ((unsigned long long)h2 * r2) + ((unsigned long long)h3 * r1)
+ ((unsigned long long)h4 * r0);
/* (partial) h %= p */
unsigned long c = (unsigned long)(d0 >> 26);
h0 = (unsigned long)d0 & 0x3ffffff;
d1 += c;
c = (unsigned long)(d1 >> 26);
h1 = (unsigned long)d1 & 0x3ffffff;
d2 += c;
c = (unsigned long)(d2 >> 26);
h2 = (unsigned long)d2 & 0x3ffffff;
d3 += c;
c = (unsigned long)(d3 >> 26);
h3 = (unsigned long)d3 & 0x3ffffff;
d4 += c;
c = (unsigned long)(d4 >> 26);
h4 = (unsigned long)d4 & 0x3ffffff;
h0 += c * 5;
c = (h0 >> 26);
h0 = h0 & 0x3ffffff;
h1 += c;
m += poly1305_block_size;
bytes -= poly1305_block_size;
}
st->h[0] = h0;
st->h[1] = h1;
st->h[2] = h2;
st->h[3] = h3;
st->h[4] = h4;
}
void Poly1305::update(const void *data,unsigned int len) noexcept
ZT_INLINE void poly1305_finish(poly1305_context* ctx, unsigned char mac[16])
{
poly1305_update(reinterpret_cast<poly1305_context *>(&ctx),reinterpret_cast<const unsigned char *>(data),(size_t)len);
poly1305_state_internal_t* st = (poly1305_state_internal_t*)ctx;
unsigned long h0, h1, h2, h3, h4, c;
unsigned long g0, g1, g2, g3, g4;
unsigned long long f;
unsigned long mask;
/* process the remaining block */
if (st->leftover) {
size_t i = st->leftover;
st->buffer[i++] = 1;
for (; i < poly1305_block_size; i++)
st->buffer[i] = 0;
st->final = 1;
poly1305_blocks(st, st->buffer, poly1305_block_size);
}
/* fully carry h */
h0 = st->h[0];
h1 = st->h[1];
h2 = st->h[2];
h3 = st->h[3];
h4 = st->h[4];
c = h1 >> 26;
h1 = h1 & 0x3ffffff;
h2 += c;
c = h2 >> 26;
h2 = h2 & 0x3ffffff;
h3 += c;
c = h3 >> 26;
h3 = h3 & 0x3ffffff;
h4 += c;
c = h4 >> 26;
h4 = h4 & 0x3ffffff;
h0 += c * 5;
c = h0 >> 26;
h0 = h0 & 0x3ffffff;
h1 += c;
/* compute h + -p */
g0 = h0 + 5;
c = g0 >> 26;
g0 &= 0x3ffffff;
g1 = h1 + c;
c = g1 >> 26;
g1 &= 0x3ffffff;
g2 = h2 + c;
c = g2 >> 26;
g2 &= 0x3ffffff;
g3 = h3 + c;
c = g3 >> 26;
g3 &= 0x3ffffff;
g4 = h4 + c - (1 << 26);
/* select h if h < p, or h + -p if h >= p */
mask = (g4 >> ((sizeof(unsigned long) * 8) - 1)) - 1;
g0 &= mask;
g1 &= mask;
g2 &= mask;
g3 &= mask;
g4 &= mask;
mask = ~mask;
h0 = (h0 & mask) | g0;
h1 = (h1 & mask) | g1;
h2 = (h2 & mask) | g2;
h3 = (h3 & mask) | g3;
h4 = (h4 & mask) | g4;
/* h = h % (2^128) */
h0 = ((h0) | (h1 << 26)) & 0xffffffff;
h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff;
h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff;
h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff;
/* mac = (h + pad) % (2^128) */
f = (unsigned long long)h0 + st->pad[0];
h0 = (unsigned long)f;
f = (unsigned long long)h1 + st->pad[1] + (f >> 32);
h1 = (unsigned long)f;
f = (unsigned long long)h2 + st->pad[2] + (f >> 32);
h2 = (unsigned long)f;
f = (unsigned long long)h3 + st->pad[3] + (f >> 32);
h3 = (unsigned long)f;
U32TO8(mac + 0, h0);
U32TO8(mac + 4, h1);
U32TO8(mac + 8, h2);
U32TO8(mac + 12, h3);
/* zero out the state */
st->h[0] = 0;
st->h[1] = 0;
st->h[2] = 0;
st->h[3] = 0;
st->h[4] = 0;
st->r[0] = 0;
st->r[1] = 0;
st->r[2] = 0;
st->r[3] = 0;
st->r[4] = 0;
st->pad[0] = 0;
st->pad[1] = 0;
st->pad[2] = 0;
st->pad[3] = 0;
}
void Poly1305::finish(void *auth) noexcept
#endif // uint128_t or portable version?
ZT_INLINE void poly1305_update(poly1305_context* ctx, const unsigned char* m, size_t bytes) noexcept
{
poly1305_finish(reinterpret_cast<poly1305_context *>(&ctx),reinterpret_cast<unsigned char *>(auth));
poly1305_state_internal_t* st = (poly1305_state_internal_t*)ctx;
size_t i;
/* handle leftover */
if (st->leftover) {
size_t want = (poly1305_block_size - st->leftover);
if (want > bytes)
want = bytes;
for (i = 0; i < want; i++)
st->buffer[st->leftover + i] = m[i];
bytes -= want;
m += want;
st->leftover += want;
if (st->leftover < poly1305_block_size)
return;
poly1305_blocks(st, st->buffer, poly1305_block_size);
st->leftover = 0;
}
/* process full blocks */
if (bytes >= poly1305_block_size) {
size_t want = (bytes & ~(poly1305_block_size - 1));
poly1305_blocks(st, m, want);
m += want;
bytes -= want;
}
/* store leftover */
if (bytes) {
for (i = 0; i < bytes; i++)
st->buffer[st->leftover + i] = m[i];
st->leftover += bytes;
}
}
} // namespace ZeroTier
} // anonymous namespace
void Poly1305::init(const void* key) noexcept
{
static_assert(sizeof(ctx) >= sizeof(poly1305_context), "buffer in class smaller than required structure size");
poly1305_init(reinterpret_cast<poly1305_context*>(&ctx), reinterpret_cast<const unsigned char*>(key));
}
void Poly1305::update(const void* data, unsigned int len) noexcept
{
poly1305_update(
reinterpret_cast<poly1305_context*>(&ctx),
reinterpret_cast<const unsigned char*>(data),
(size_t)len);
}
void Poly1305::finish(void* auth) noexcept
{
poly1305_finish(reinterpret_cast<poly1305_context*>(&ctx), reinterpret_cast<unsigned char*>(auth));
}
} // namespace ZeroTier

View file

@ -22,34 +22,36 @@ namespace ZeroTier {
/**
* Poly1305 one-time MAC calculator
*/
class Poly1305
{
public:
ZT_INLINE Poly1305()
{}
class Poly1305 {
public:
ZT_INLINE Poly1305()
{
}
ZT_INLINE Poly1305(const void *key)
{ this->init(key); }
ZT_INLINE Poly1305(const void* key)
{
this->init(key);
}
void init(const void *key) noexcept;
void update(const void *data, unsigned int len) noexcept;
void finish(void *auth) noexcept;
void init(const void* key) noexcept;
void update(const void* data, unsigned int len) noexcept;
void finish(void* auth) noexcept;
static ZT_INLINE void compute(void *const auth, const void *const data, const unsigned int len, const void *const key) noexcept
{
Poly1305 p(key);
p.update(data, len);
p.finish(auth);
}
static ZT_INLINE void
compute(void* const auth, const void* const data, const unsigned int len, const void* const key) noexcept
{
Poly1305 p(key);
p.update(data, len);
p.finish(auth);
}
private:
struct
{
size_t aligner;
unsigned char opaque[136];
} ctx;
private:
struct {
size_t aligner;
unsigned char opaque[136];
} ctx;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

File diff suppressed because it is too large Load diff

View file

@ -7,7 +7,7 @@ Give it wire packets and it gives you Ethernet packets, and vice versa. The core
Code in here follows these guidelines:
- Keep it minimal, especially in terms of code footprint and memory use.
- There should be no OS-dependent code here unless absolutely necessary (e.g. getSecureRandom).
- If it's not part of the core virtual Ethernet switch it does not belong here.
- Minimize the use of complex C++ features since at some point we might end up "minus-minus'ing" this code if doing so proves necessary to port to tiny embedded systems.
- Keep it minimal, especially in terms of code footprint and memory use.
- There should be no OS-dependent code here unless absolutely necessary (e.g. getSecureRandom).
- If it's not part of the core virtual Ethernet switch it does not belong here.
- Minimize the use of complex C++ features since at some point we might end up "minus-minus'ing" this code if doing so proves necessary to port to tiny embedded systems.

View file

@ -15,78 +15,78 @@
namespace ZeroTier {
bool RevocationCredential::sign(const Identity &signer) noexcept
bool RevocationCredential::sign(const Identity& signer) noexcept
{
uint8_t buf[ZT_REVOCATION_MARSHAL_SIZE_MAX + 32];
if (signer.hasPrivate()) {
m_signedBy = signer.address();
m_signatureLength = signer.sign(buf, (unsigned int)marshal(buf, true), m_signature, sizeof(m_signature));
return true;
}
return false;
uint8_t buf[ZT_REVOCATION_MARSHAL_SIZE_MAX + 32];
if (signer.hasPrivate()) {
m_signedBy = signer.address();
m_signatureLength = signer.sign(buf, (unsigned int)marshal(buf, true), m_signature, sizeof(m_signature));
return true;
}
return false;
}
int RevocationCredential::marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX], bool forSign) const noexcept
{
int p = 0;
if (forSign) {
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
Utils::storeBigEndian< uint32_t >(data + p, 0);
Utils::storeBigEndian< uint32_t >(data + p + 4, m_id);
Utils::storeBigEndian< uint64_t >(data + p + 8, m_networkId);
Utils::storeBigEndian< uint32_t >(data + p + 16, 0);
Utils::storeBigEndian< uint32_t >(data + p + 20, m_credentialId);
Utils::storeBigEndian< uint64_t >(data + p + 24, (uint64_t)m_threshold);
Utils::storeBigEndian< uint64_t >(data + p + 32, m_flags);
p += 40;
m_target.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
m_signedBy.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
data[p++] = (uint8_t)m_type;
if (!forSign) {
data[p++] = 1;
Utils::storeBigEndian< uint16_t >(data + p, (uint16_t)m_signatureLength);
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
}
data[p++] = 0;
data[p++] = 0;
if (forSign) {
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
return p;
int p = 0;
if (forSign) {
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
Utils::storeBigEndian<uint32_t>(data + p, 0);
Utils::storeBigEndian<uint32_t>(data + p + 4, m_id);
Utils::storeBigEndian<uint64_t>(data + p + 8, m_networkId);
Utils::storeBigEndian<uint32_t>(data + p + 16, 0);
Utils::storeBigEndian<uint32_t>(data + p + 20, m_credentialId);
Utils::storeBigEndian<uint64_t>(data + p + 24, (uint64_t)m_threshold);
Utils::storeBigEndian<uint64_t>(data + p + 32, m_flags);
p += 40;
m_target.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
m_signedBy.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
data[p++] = (uint8_t)m_type;
if (! forSign) {
data[p++] = 1;
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_signatureLength);
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
}
data[p++] = 0;
data[p++] = 0;
if (forSign) {
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
return p;
}
int RevocationCredential::unmarshal(const uint8_t *restrict data, const int len) noexcept
int RevocationCredential::unmarshal(const uint8_t* restrict data, const int len) noexcept
{
if (len < 54)
return -1;
// 4 bytes reserved
m_id = Utils::loadBigEndian< uint32_t >(data + 4);
m_networkId = Utils::loadBigEndian< uint64_t >(data + 8);
// 4 bytes reserved
m_credentialId = Utils::loadBigEndian< uint32_t >(data + 20);
m_threshold = (int64_t)Utils::loadBigEndian< uint64_t >(data + 24);
m_flags = Utils::loadBigEndian< uint64_t >(data + 32);
m_target.setTo(data + 40);
m_signedBy.setTo(data + 45);
m_type = (ZT_CredentialType)data[50];
// 1 byte reserved
m_signatureLength = Utils::loadBigEndian< uint16_t >(data + 52);
int p = 54 + (int)m_signatureLength;
if ((m_signatureLength > ZT_SIGNATURE_BUFFER_SIZE) || (p > len))
return -1;
Utils::copy(m_signature, data + 54, m_signatureLength);
if ((p + 2) > len)
return -1;
p += 2 + Utils::loadBigEndian< uint16_t >(data + p);
if (p > len)
return -1;
return p;
if (len < 54)
return -1;
// 4 bytes reserved
m_id = Utils::loadBigEndian<uint32_t>(data + 4);
m_networkId = Utils::loadBigEndian<uint64_t>(data + 8);
// 4 bytes reserved
m_credentialId = Utils::loadBigEndian<uint32_t>(data + 20);
m_threshold = (int64_t)Utils::loadBigEndian<uint64_t>(data + 24);
m_flags = Utils::loadBigEndian<uint64_t>(data + 32);
m_target.setTo(data + 40);
m_signedBy.setTo(data + 45);
m_type = (ZT_CredentialType)data[50];
// 1 byte reserved
m_signatureLength = Utils::loadBigEndian<uint16_t>(data + 52);
int p = 54 + (int)m_signatureLength;
if ((m_signatureLength > ZT_SIGNATURE_BUFFER_SIZE) || (p > len))
return -1;
Utils::copy(m_signature, data + 54, m_signatureLength);
if ((p + 2) > len)
return -1;
p += 2 + Utils::loadBigEndian<uint16_t>(data + p);
if (p > len)
return -1;
return p;
}
} // namespace ZeroTier
} // namespace ZeroTier

View file

@ -14,12 +14,12 @@
#ifndef ZT_REVOCATION_HPP
#define ZT_REVOCATION_HPP
#include "Constants.hpp"
#include "Credential.hpp"
#include "Address.hpp"
#include "C25519.hpp"
#include "Utils.hpp"
#include "Constants.hpp"
#include "Credential.hpp"
#include "Identity.hpp"
#include "Utils.hpp"
/**
* Flag: fast propagation via rumor mill algorithm
@ -35,102 +35,138 @@ class Context;
/**
* Revocation certificate to instantaneously revoke a COM, capability, or tag
*/
class RevocationCredential : public Credential
{
friend class Credential;
class RevocationCredential : public Credential {
friend class Credential;
public:
static constexpr ZT_CredentialType credentialType() noexcept
{ return ZT_CREDENTIAL_TYPE_REVOCATION; }
public:
static constexpr ZT_CredentialType credentialType() noexcept
{
return ZT_CREDENTIAL_TYPE_REVOCATION;
}
ZT_INLINE RevocationCredential() noexcept
{ memoryZero(this); }
ZT_INLINE RevocationCredential() noexcept
{
memoryZero(this);
}
/**
* @param i ID (arbitrary for revocations, currently random)
* @param nwid Network ID
* @param cid Credential ID being revoked (0 for all or for COMs, which lack IDs)
* @param thr Revocation time threshold before which credentials will be revoked
* @param fl Flags
* @param tgt Target node whose credential(s) are being revoked
* @param ct Credential type being revoked
*/
ZT_INLINE RevocationCredential(const uint32_t i, const uint64_t nwid, const uint32_t cid, const uint64_t thr, const uint64_t fl, const Address &tgt, const ZT_CredentialType ct) noexcept: // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
m_id(i),
m_credentialId(cid),
m_networkId(nwid),
m_threshold(thr),
m_flags(fl),
m_target(tgt),
m_signedBy(),
m_type(ct),
m_signatureLength(0)
{}
/**
* @param i ID (arbitrary for revocations, currently random)
* @param nwid Network ID
* @param cid Credential ID being revoked (0 for all or for COMs, which lack IDs)
* @param thr Revocation time threshold before which credentials will be revoked
* @param fl Flags
* @param tgt Target node whose credential(s) are being revoked
* @param ct Credential type being revoked
*/
ZT_INLINE RevocationCredential(
const uint32_t i,
const uint64_t nwid,
const uint32_t cid,
const uint64_t thr,
const uint64_t fl,
const Address& tgt,
const ZT_CredentialType ct) noexcept
: // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
m_id(i)
, m_credentialId(cid)
, m_networkId(nwid)
, m_threshold(thr)
, m_flags(fl)
, m_target(tgt)
, m_signedBy()
, m_type(ct)
, m_signatureLength(0)
{
}
ZT_INLINE uint32_t id() const noexcept
{ return m_id; }
ZT_INLINE uint32_t id() const noexcept
{
return m_id;
}
ZT_INLINE uint32_t credentialId() const noexcept
{ return m_credentialId; }
ZT_INLINE uint32_t credentialId() const noexcept
{
return m_credentialId;
}
ZT_INLINE uint64_t networkId() const noexcept
{ return m_networkId; }
ZT_INLINE uint64_t networkId() const noexcept
{
return m_networkId;
}
ZT_INLINE int64_t threshold() const noexcept
{ return m_threshold; }
ZT_INLINE int64_t threshold() const noexcept
{
return m_threshold;
}
ZT_INLINE const Address &target() const noexcept
{ return m_target; }
ZT_INLINE const Address& target() const noexcept
{
return m_target;
}
ZT_INLINE const Address &signer() const noexcept
{ return m_signedBy; }
ZT_INLINE const Address& signer() const noexcept
{
return m_signedBy;
}
ZT_INLINE ZT_CredentialType typeBeingRevoked() const noexcept
{ return m_type; }
ZT_INLINE ZT_CredentialType typeBeingRevoked() const noexcept
{
return m_type;
}
ZT_INLINE const uint8_t *signature() const noexcept
{ return m_signature; }
ZT_INLINE const uint8_t* signature() const noexcept
{
return m_signature;
}
ZT_INLINE unsigned int signatureLength() const noexcept
{ return m_signatureLength; }
ZT_INLINE unsigned int signatureLength() const noexcept
{
return m_signatureLength;
}
ZT_INLINE bool fastPropagate() const noexcept
{ return ((m_flags & ZT_REVOCATION_FLAG_FAST_PROPAGATE) != 0); }
ZT_INLINE bool fastPropagate() const noexcept
{
return ((m_flags & ZT_REVOCATION_FLAG_FAST_PROPAGATE) != 0);
}
/**
* @param signer Signing identity, must have private key
* @return True if signature was successful
*/
bool sign(const Identity &signer) noexcept;
/**
* @param signer Signing identity, must have private key
* @return True if signature was successful
*/
bool sign(const Identity& signer) noexcept;
/**
* Verify this revocation's signature
*
* @param RR Runtime environment to provide for peer lookup, etc.
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
*/
ZT_INLINE Credential::VerifyResult verify(const Context &ctx, const CallContext &cc) const noexcept
{ return s_verify(ctx, cc, *this); }
/**
* Verify this revocation's signature
*
* @param RR Runtime environment to provide for peer lookup, etc.
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
*/
ZT_INLINE Credential::VerifyResult verify(const Context& ctx, const CallContext& cc) const noexcept
{
return s_verify(ctx, cc, *this);
}
static constexpr int marshalSizeMax() noexcept
{ return ZT_REVOCATION_MARSHAL_SIZE_MAX; }
static constexpr int marshalSizeMax() noexcept
{
return ZT_REVOCATION_MARSHAL_SIZE_MAX;
}
int marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX], bool forSign = false) const noexcept;
int unmarshal(const uint8_t *restrict data, int len) noexcept;
int marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX], bool forSign = false) const noexcept;
int unmarshal(const uint8_t* restrict data, int len) noexcept;
private:
uint32_t m_id;
uint32_t m_credentialId;
uint64_t m_networkId;
int64_t m_threshold;
uint64_t m_flags;
Address m_target;
Address m_signedBy;
ZT_CredentialType m_type;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
private:
uint32_t m_id;
uint32_t m_credentialId;
uint64_t m_networkId;
int64_t m_threshold;
uint64_t m_flags;
Address m_target;
Address m_signedBy;
ZT_CredentialType m_type;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -1,6 +1,7 @@
// This code is public domain, taken from a PD crypto source file on GitHub.
#include "SHA512.hpp"
#include "Utils.hpp"
namespace ZeroTier {
@ -10,265 +11,267 @@ namespace ZeroTier {
namespace {
struct sha512_state {
uint64_t length,state[8];
unsigned long curlen;
uint8_t buf[128];
uint64_t length, state[8];
unsigned long curlen;
uint8_t buf[128];
};
static const uint64_t K[80] = {
0x428a2f98d728ae22ULL,0x7137449123ef65cdULL,0xb5c0fbcfec4d3b2fULL,0xe9b5dba58189dbbcULL,
0x3956c25bf348b538ULL,0x59f111f1b605d019ULL,0x923f82a4af194f9bULL,0xab1c5ed5da6d8118ULL,
0xd807aa98a3030242ULL,0x12835b0145706fbeULL,0x243185be4ee4b28cULL,0x550c7dc3d5ffb4e2ULL,
0x72be5d74f27b896fULL,0x80deb1fe3b1696b1ULL,0x9bdc06a725c71235ULL,0xc19bf174cf692694ULL,
0xe49b69c19ef14ad2ULL,0xefbe4786384f25e3ULL,0x0fc19dc68b8cd5b5ULL,0x240ca1cc77ac9c65ULL,
0x2de92c6f592b0275ULL,0x4a7484aa6ea6e483ULL,0x5cb0a9dcbd41fbd4ULL,0x76f988da831153b5ULL,
0x983e5152ee66dfabULL,0xa831c66d2db43210ULL,0xb00327c898fb213fULL,0xbf597fc7beef0ee4ULL,
0xc6e00bf33da88fc2ULL,0xd5a79147930aa725ULL,0x06ca6351e003826fULL,0x142929670a0e6e70ULL,
0x27b70a8546d22ffcULL,0x2e1b21385c26c926ULL,0x4d2c6dfc5ac42aedULL,0x53380d139d95b3dfULL,
0x650a73548baf63deULL,0x766a0abb3c77b2a8ULL,0x81c2c92e47edaee6ULL,0x92722c851482353bULL,
0xa2bfe8a14cf10364ULL,0xa81a664bbc423001ULL,0xc24b8b70d0f89791ULL,0xc76c51a30654be30ULL,
0xd192e819d6ef5218ULL,0xd69906245565a910ULL,0xf40e35855771202aULL,0x106aa07032bbd1b8ULL,
0x19a4c116b8d2d0c8ULL,0x1e376c085141ab53ULL,0x2748774cdf8eeb99ULL,0x34b0bcb5e19b48a8ULL,
0x391c0cb3c5c95a63ULL,0x4ed8aa4ae3418acbULL,0x5b9cca4f7763e373ULL,0x682e6ff3d6b2b8a3ULL,
0x748f82ee5defb2fcULL,0x78a5636f43172f60ULL,0x84c87814a1f0ab72ULL,0x8cc702081a6439ecULL,
0x90befffa23631e28ULL,0xa4506cebde82bde9ULL,0xbef9a3f7b2c67915ULL,0xc67178f2e372532bULL,
0xca273eceea26619cULL,0xd186b8c721c0c207ULL,0xeada7dd6cde0eb1eULL,0xf57d4f7fee6ed178ULL,
0x06f067aa72176fbaULL,0x0a637dc5a2c898a6ULL,0x113f9804bef90daeULL,0x1b710b35131c471bULL,
0x28db77f523047d84ULL,0x32caab7b40c72493ULL,0x3c9ebe0a15c9bebcULL,0x431d67c49c100d4cULL,
0x4cc5d4becb3e42b6ULL,0x597f299cfc657e2aULL,0x5fcb6fab3ad6faecULL,0x6c44198c4a475817ULL
0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL,
0x59f111f1b605d019ULL, 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL,
0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL,
0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, 0x983e5152ee66dfabULL,
0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL,
0x53380d139d95b3dfULL, 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL,
0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL,
0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, 0x5b9cca4f7763e373ULL,
0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL, 0xca273eceea26619cULL,
0xd186b8c721c0c207ULL, 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL,
0x113f9804bef90daeULL, 0x1b710b35131c471bULL, 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL,
0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
};
#define STORE64H(x, y) Utils::storeBigEndian<uint64_t>(y,x)
#define LOAD64H(x, y) x = Utils::loadBigEndian<uint64_t>(y)
#define ROL64c(x,y) (((x)<<(y)) | ((x)>>(64-(y))))
#define ROR64c(x,y) (((x)>>(y)) | ((x)<<(64-(y))))
#define Ch(x,y,z) (z ^ (x & (y ^ z)))
#define Maj(x,y,z) (((x | y) & z) | (x & y))
#define S(x, n) ROR64c(x, n)
#define R(x, n) ((x)>>(n))
#define Sigma0(x) (S(x, 28) ^ S(x, 34) ^ S(x, 39))
#define Sigma1(x) (S(x, 14) ^ S(x, 18) ^ S(x, 41))
#define Gamma0(x) (S(x, 1) ^ S(x, 8) ^ R(x, 7))
#define Gamma1(x) (S(x, 19) ^ S(x, 61) ^ R(x, 6))
#define STORE64H(x, y) Utils::storeBigEndian<uint64_t>(y, x)
#define LOAD64H(x, y) x = Utils::loadBigEndian<uint64_t>(y)
#define ROL64c(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
#define ROR64c(x, y) (((x) >> (y)) | ((x) << (64 - (y))))
#define Ch(x, y, z) (z ^ (x & (y ^ z)))
#define Maj(x, y, z) (((x | y) & z) | (x & y))
#define S(x, n) ROR64c(x, n)
#define R(x, n) ((x) >> (n))
#define Sigma0(x) (S(x, 28) ^ S(x, 34) ^ S(x, 39))
#define Sigma1(x) (S(x, 14) ^ S(x, 18) ^ S(x, 41))
#define Gamma0(x) (S(x, 1) ^ S(x, 8) ^ R(x, 7))
#define Gamma1(x) (S(x, 19) ^ S(x, 61) ^ R(x, 6))
static ZT_INLINE void sha512_compress(sha512_state *const md,uint8_t *const buf)
static ZT_INLINE void sha512_compress(sha512_state* const md, uint8_t* const buf)
{
uint64_t S[8], W[80], t0, t1;
int i;
uint64_t S[8], W[80], t0, t1;
int i;
for (i = 0; i < 8; i++)
S[i] = md->state[i];
for (i = 0; i < 16; i++)
LOAD64H(W[i], buf + (8*i));
for (i = 16; i < 80; i++)
W[i] = Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16];
for (i = 0; i < 8; i++)
S[i] = md->state[i];
for (i = 0; i < 16; i++)
LOAD64H(W[i], buf + (8 * i));
for (i = 16; i < 80; i++)
W[i] = Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16];
#define RND(a,b,c,d,e,f,g,h,i) \
t0 = h + Sigma1(e) + Ch(e, f, g) + K[i] + W[i]; \
t1 = Sigma0(a) + Maj(a, b, c); \
d += t0; \
h = t0 + t1;
#define RND(a, b, c, d, e, f, g, h, i) \
t0 = h + Sigma1(e) + Ch(e, f, g) + K[i] + W[i]; \
t1 = Sigma0(a) + Maj(a, b, c); \
d += t0; \
h = t0 + t1;
for (i = 0; i < 80; i += 8) {
RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],i+0);
RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],i+1);
RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],i+2);
RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],i+3);
RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],i+4);
RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],i+5);
RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],i+6);
RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],i+7);
}
for (i = 0; i < 80; i += 8) {
RND(S[0], S[1], S[2], S[3], S[4], S[5], S[6], S[7], i + 0);
RND(S[7], S[0], S[1], S[2], S[3], S[4], S[5], S[6], i + 1);
RND(S[6], S[7], S[0], S[1], S[2], S[3], S[4], S[5], i + 2);
RND(S[5], S[6], S[7], S[0], S[1], S[2], S[3], S[4], i + 3);
RND(S[4], S[5], S[6], S[7], S[0], S[1], S[2], S[3], i + 4);
RND(S[3], S[4], S[5], S[6], S[7], S[0], S[1], S[2], i + 5);
RND(S[2], S[3], S[4], S[5], S[6], S[7], S[0], S[1], i + 6);
RND(S[1], S[2], S[3], S[4], S[5], S[6], S[7], S[0], i + 7);
}
for (i = 0; i < 8; i++)
md->state[i] = md->state[i] + S[i];
for (i = 0; i < 8; i++)
md->state[i] = md->state[i] + S[i];
}
static ZT_INLINE void sha384_init(sha512_state *const md)
static ZT_INLINE void sha384_init(sha512_state* const md)
{
md->curlen = 0;
md->length = 0;
md->state[0] = 0xcbbb9d5dc1059ed8ULL;
md->state[1] = 0x629a292a367cd507ULL;
md->state[2] = 0x9159015a3070dd17ULL;
md->state[3] = 0x152fecd8f70e5939ULL;
md->state[4] = 0x67332667ffc00b31ULL;
md->state[5] = 0x8eb44a8768581511ULL;
md->state[6] = 0xdb0c2e0d64f98fa7ULL;
md->state[7] = 0x47b5481dbefa4fa4ULL;
md->curlen = 0;
md->length = 0;
md->state[0] = 0xcbbb9d5dc1059ed8ULL;
md->state[1] = 0x629a292a367cd507ULL;
md->state[2] = 0x9159015a3070dd17ULL;
md->state[3] = 0x152fecd8f70e5939ULL;
md->state[4] = 0x67332667ffc00b31ULL;
md->state[5] = 0x8eb44a8768581511ULL;
md->state[6] = 0xdb0c2e0d64f98fa7ULL;
md->state[7] = 0x47b5481dbefa4fa4ULL;
}
static ZT_INLINE void sha512_init(sha512_state *const md)
static ZT_INLINE void sha512_init(sha512_state* const md)
{
md->curlen = 0;
md->length = 0;
md->state[0] = 0x6a09e667f3bcc908ULL;
md->state[1] = 0xbb67ae8584caa73bULL;
md->state[2] = 0x3c6ef372fe94f82bULL;
md->state[3] = 0xa54ff53a5f1d36f1ULL;
md->state[4] = 0x510e527fade682d1ULL;
md->state[5] = 0x9b05688c2b3e6c1fULL;
md->state[6] = 0x1f83d9abfb41bd6bULL;
md->state[7] = 0x5be0cd19137e2179ULL;
md->curlen = 0;
md->length = 0;
md->state[0] = 0x6a09e667f3bcc908ULL;
md->state[1] = 0xbb67ae8584caa73bULL;
md->state[2] = 0x3c6ef372fe94f82bULL;
md->state[3] = 0xa54ff53a5f1d36f1ULL;
md->state[4] = 0x510e527fade682d1ULL;
md->state[5] = 0x9b05688c2b3e6c1fULL;
md->state[6] = 0x1f83d9abfb41bd6bULL;
md->state[7] = 0x5be0cd19137e2179ULL;
}
static void sha512_process(sha512_state *const md,const uint8_t *in,unsigned long inlen)
static void sha512_process(sha512_state* const md, const uint8_t* in, unsigned long inlen)
{
while (inlen > 0) {
if (md->curlen == 0 && inlen >= 128) {
sha512_compress(md,(uint8_t *)in);
md->length += 128 * 8;
in += 128;
inlen -= 128;
} else {
unsigned long n = std::min(inlen,(128 - md->curlen));
Utils::copy(md->buf + md->curlen,in,n);
md->curlen += n;
in += n;
inlen -= n;
if (md->curlen == 128) {
sha512_compress(md,md->buf);
md->length += 8*128;
md->curlen = 0;
}
}
}
while (inlen > 0) {
if (md->curlen == 0 && inlen >= 128) {
sha512_compress(md, (uint8_t*)in);
md->length += 128 * 8;
in += 128;
inlen -= 128;
}
else {
unsigned long n = std::min(inlen, (128 - md->curlen));
Utils::copy(md->buf + md->curlen, in, n);
md->curlen += n;
in += n;
inlen -= n;
if (md->curlen == 128) {
sha512_compress(md, md->buf);
md->length += 8 * 128;
md->curlen = 0;
}
}
}
}
static ZT_INLINE void sha512_done(sha512_state *const md,uint8_t *out)
static ZT_INLINE void sha512_done(sha512_state* const md, uint8_t* out)
{
int i;
int i;
md->length += md->curlen * 8ULL;
md->buf[md->curlen++] = (uint8_t)0x80;
md->length += md->curlen * 8ULL;
md->buf[md->curlen++] = (uint8_t)0x80;
if (md->curlen > 112) {
while (md->curlen < 128) {
md->buf[md->curlen++] = (uint8_t)0;
}
sha512_compress(md, md->buf);
md->curlen = 0;
}
if (md->curlen > 112) {
while (md->curlen < 128) {
md->buf[md->curlen++] = (uint8_t)0;
}
sha512_compress(md, md->buf);
md->curlen = 0;
}
while (md->curlen < 120) {
md->buf[md->curlen++] = (uint8_t)0;
}
while (md->curlen < 120) {
md->buf[md->curlen++] = (uint8_t)0;
}
STORE64H(md->length, md->buf+120);
sha512_compress(md, md->buf);
STORE64H(md->length, md->buf + 120);
sha512_compress(md, md->buf);
for (i = 0; i < 8; i++) {
STORE64H(md->state[i], out+(8*i));
}
for (i = 0; i < 8; i++) {
STORE64H(md->state[i], out + (8 * i));
}
}
} // anonymous namespace
} // anonymous namespace
void SHA512(void *digest,const void *data,unsigned int len)
void SHA512(void* digest, const void* data, unsigned int len)
{
sha512_state state;
sha512_init(&state);
sha512_process(&state,(uint8_t *)data,(unsigned long)len);
sha512_done(&state,(uint8_t *)digest);
sha512_state state;
sha512_init(&state);
sha512_process(&state, (uint8_t*)data, (unsigned long)len);
sha512_done(&state, (uint8_t*)digest);
}
void SHA384(void *digest,const void *data,unsigned int len)
void SHA384(void* digest, const void* data, unsigned int len)
{
uint8_t tmp[64];
sha512_state state;
sha384_init(&state);
sha512_process(&state,(uint8_t *)data,(unsigned long)len);
sha512_done(&state,tmp);
Utils::copy<48>(digest,tmp);
uint8_t tmp[64];
sha512_state state;
sha384_init(&state);
sha512_process(&state, (uint8_t*)data, (unsigned long)len);
sha512_done(&state, tmp);
Utils::copy<48>(digest, tmp);
}
void SHA384(void *digest,const void *data0,unsigned int len0,const void *data1,unsigned int len1)
void SHA384(void* digest, const void* data0, unsigned int len0, const void* data1, unsigned int len1)
{
uint8_t tmp[64];
sha512_state state;
sha384_init(&state);
sha512_process(&state,(uint8_t *)data0,(unsigned long)len0);
sha512_process(&state,(uint8_t *)data1,(unsigned long)len1);
sha512_done(&state,tmp);
Utils::copy<48>(digest,tmp);
uint8_t tmp[64];
sha512_state state;
sha384_init(&state);
sha512_process(&state, (uint8_t*)data0, (unsigned long)len0);
sha512_process(&state, (uint8_t*)data1, (unsigned long)len1);
sha512_done(&state, tmp);
Utils::copy<48>(digest, tmp);
}
#endif // !ZT_HAVE_NATIVE_SHA512
#endif // !ZT_HAVE_NATIVE_SHA512
void HMACSHA384(const uint8_t key[ZT_SYMMETRIC_KEY_SIZE],const void *msg,const unsigned int msglen,uint8_t mac[48])
void HMACSHA384(const uint8_t key[ZT_SYMMETRIC_KEY_SIZE], const void* msg, const unsigned int msglen, uint8_t mac[48])
{
uint64_t kInPadded[16]; // input padded key
uint64_t outer[22]; // output padded key | H(input padded key | msg)
uint64_t kInPadded[16]; // input padded key
uint64_t outer[22]; // output padded key | H(input padded key | msg)
const uint64_t k0 = Utils::loadMachineEndian< uint64_t >(key);
const uint64_t k1 = Utils::loadMachineEndian< uint64_t >(key + 8);
const uint64_t k2 = Utils::loadMachineEndian< uint64_t >(key + 16);
const uint64_t k3 = Utils::loadMachineEndian< uint64_t >(key + 24);
const uint64_t k4 = Utils::loadMachineEndian< uint64_t >(key + 32);
const uint64_t k5 = Utils::loadMachineEndian< uint64_t >(key + 40);
const uint64_t k0 = Utils::loadMachineEndian<uint64_t>(key);
const uint64_t k1 = Utils::loadMachineEndian<uint64_t>(key + 8);
const uint64_t k2 = Utils::loadMachineEndian<uint64_t>(key + 16);
const uint64_t k3 = Utils::loadMachineEndian<uint64_t>(key + 24);
const uint64_t k4 = Utils::loadMachineEndian<uint64_t>(key + 32);
const uint64_t k5 = Utils::loadMachineEndian<uint64_t>(key + 40);
const uint64_t ipad = 0x3636363636363636ULL;
kInPadded[0] = k0 ^ ipad;
kInPadded[1] = k1 ^ ipad;
kInPadded[2] = k2 ^ ipad;
kInPadded[3] = k3 ^ ipad;
kInPadded[4] = k4 ^ ipad;
kInPadded[5] = k5 ^ ipad;
kInPadded[6] = ipad;
kInPadded[7] = ipad;
kInPadded[8] = ipad;
kInPadded[9] = ipad;
kInPadded[10] = ipad;
kInPadded[11] = ipad;
kInPadded[12] = ipad;
kInPadded[13] = ipad;
kInPadded[14] = ipad;
kInPadded[15] = ipad;
const uint64_t ipad = 0x3636363636363636ULL;
kInPadded[0] = k0 ^ ipad;
kInPadded[1] = k1 ^ ipad;
kInPadded[2] = k2 ^ ipad;
kInPadded[3] = k3 ^ ipad;
kInPadded[4] = k4 ^ ipad;
kInPadded[5] = k5 ^ ipad;
kInPadded[6] = ipad;
kInPadded[7] = ipad;
kInPadded[8] = ipad;
kInPadded[9] = ipad;
kInPadded[10] = ipad;
kInPadded[11] = ipad;
kInPadded[12] = ipad;
kInPadded[13] = ipad;
kInPadded[14] = ipad;
kInPadded[15] = ipad;
const uint64_t opad = 0x5c5c5c5c5c5c5c5cULL;
outer[0] = k0 ^ opad;
outer[1] = k1 ^ opad;
outer[2] = k2 ^ opad;
outer[3] = k3 ^ opad;
outer[4] = k4 ^ opad;
outer[5] = k5 ^ opad;
outer[6] = opad;
outer[7] = opad;
outer[8] = opad;
outer[9] = opad;
outer[10] = opad;
outer[11] = opad;
outer[12] = opad;
outer[13] = opad;
outer[14] = opad;
outer[15] = opad;
const uint64_t opad = 0x5c5c5c5c5c5c5c5cULL;
outer[0] = k0 ^ opad;
outer[1] = k1 ^ opad;
outer[2] = k2 ^ opad;
outer[3] = k3 ^ opad;
outer[4] = k4 ^ opad;
outer[5] = k5 ^ opad;
outer[6] = opad;
outer[7] = opad;
outer[8] = opad;
outer[9] = opad;
outer[10] = opad;
outer[11] = opad;
outer[12] = opad;
outer[13] = opad;
outer[14] = opad;
outer[15] = opad;
// H(output padded key | H(input padded key | msg))
SHA384(reinterpret_cast<uint8_t *>(outer) + 128,kInPadded,128,msg,msglen);
SHA384(mac,outer,176);
// H(output padded key | H(input padded key | msg))
SHA384(reinterpret_cast<uint8_t*>(outer) + 128, kInPadded, 128, msg, msglen);
SHA384(mac, outer, 176);
}
void KBKDFHMACSHA384(const uint8_t key[ZT_SYMMETRIC_KEY_SIZE],const char label,const char context,const uint32_t iter,uint8_t out[ZT_SYMMETRIC_KEY_SIZE])
void KBKDFHMACSHA384(
const uint8_t key[ZT_SYMMETRIC_KEY_SIZE],
const char label,
const char context,
const uint32_t iter,
uint8_t out[ZT_SYMMETRIC_KEY_SIZE])
{
uint8_t kbkdfMsg[13];
uint8_t kbkdfMsg[13];
Utils::storeBigEndian<uint32_t>(kbkdfMsg,(uint32_t)iter);
Utils::storeBigEndian<uint32_t>(kbkdfMsg, (uint32_t)iter);
kbkdfMsg[4] = (uint8_t)'Z';
kbkdfMsg[5] = (uint8_t)'T'; // preface our labels with something ZT-specific
kbkdfMsg[6] = (uint8_t)label;
kbkdfMsg[7] = 0;
kbkdfMsg[4] = (uint8_t)'Z';
kbkdfMsg[5] = (uint8_t)'T'; // preface our labels with something ZT-specific
kbkdfMsg[6] = (uint8_t)label;
kbkdfMsg[7] = 0;
kbkdfMsg[8] = (uint8_t)context;
kbkdfMsg[8] = (uint8_t)context;
// Output key length: 384 bits (as 32-bit big-endian value)
kbkdfMsg[9] = 0;
kbkdfMsg[10] = 0;
kbkdfMsg[11] = 0x01;
kbkdfMsg[12] = 0x80;
// Output key length: 384 bits (as 32-bit big-endian value)
kbkdfMsg[9] = 0;
kbkdfMsg[10] = 0;
kbkdfMsg[11] = 0x01;
kbkdfMsg[12] = 0x80;
static_assert(ZT_SYMMETRIC_KEY_SIZE == ZT_SHA384_DIGEST_SIZE,"sizeof(out) != ZT_SHA384_DIGEST_SIZE");
HMACSHA384(key,&kbkdfMsg,sizeof(kbkdfMsg),out);
static_assert(ZT_SYMMETRIC_KEY_SIZE == ZT_SHA384_DIGEST_SIZE, "sizeof(out) != ZT_SHA384_DIGEST_SIZE");
HMACSHA384(key, &kbkdfMsg, sizeof(kbkdfMsg), out);
}
} // namespace ZeroTier
} // namespace ZeroTier

View file

@ -22,8 +22,8 @@
#define ZT_SHA512_DIGEST_SIZE 64
#define ZT_SHA384_DIGEST_SIZE 48
#define ZT_SHA512_BLOCK_SIZE 128
#define ZT_SHA384_BLOCK_SIZE 128
#define ZT_SHA512_BLOCK_SIZE 128
#define ZT_SHA384_BLOCK_SIZE 128
#define ZT_HMACSHA384_LEN 48
@ -32,34 +32,34 @@ namespace ZeroTier {
// SHA384 and SHA512 are actually in the standard libraries on MacOS and iOS
#ifdef __APPLE__
#define ZT_HAVE_NATIVE_SHA512 1
static ZT_INLINE void SHA512(void *digest,const void *data,unsigned int len)
static ZT_INLINE void SHA512(void* digest, const void* data, unsigned int len)
{
CC_SHA512_CTX ctx;
CC_SHA512_Init(&ctx);
CC_SHA512_Update(&ctx,data,len);
CC_SHA512_Final(reinterpret_cast<unsigned char *>(digest),&ctx);
CC_SHA512_CTX ctx;
CC_SHA512_Init(&ctx);
CC_SHA512_Update(&ctx, data, len);
CC_SHA512_Final(reinterpret_cast<unsigned char*>(digest), &ctx);
}
static ZT_INLINE void SHA384(void *digest,const void *data,unsigned int len)
static ZT_INLINE void SHA384(void* digest, const void* data, unsigned int len)
{
CC_SHA512_CTX ctx;
CC_SHA384_Init(&ctx);
CC_SHA384_Update(&ctx,data,len);
CC_SHA384_Final(reinterpret_cast<unsigned char *>(digest),&ctx);
CC_SHA512_CTX ctx;
CC_SHA384_Init(&ctx);
CC_SHA384_Update(&ctx, data, len);
CC_SHA384_Final(reinterpret_cast<unsigned char*>(digest), &ctx);
}
static ZT_INLINE void SHA384(void *digest,const void *data0,unsigned int len0,const void *data1,unsigned int len1)
static ZT_INLINE void SHA384(void* digest, const void* data0, unsigned int len0, const void* data1, unsigned int len1)
{
CC_SHA512_CTX ctx;
CC_SHA384_Init(&ctx);
CC_SHA384_Update(&ctx,data0,len0);
CC_SHA384_Update(&ctx,data1,len1);
CC_SHA384_Final(reinterpret_cast<unsigned char *>(digest),&ctx);
CC_SHA512_CTX ctx;
CC_SHA384_Init(&ctx);
CC_SHA384_Update(&ctx, data0, len0);
CC_SHA384_Update(&ctx, data1, len1);
CC_SHA384_Final(reinterpret_cast<unsigned char*>(digest), &ctx);
}
#endif
#ifndef ZT_HAVE_NATIVE_SHA512
void SHA512(void *digest,const void *data,unsigned int len);
void SHA384(void *digest,const void *data,unsigned int len);
void SHA384(void *digest,const void *data0,unsigned int len0,const void *data1,unsigned int len1);
void SHA512(void* digest, const void* data, unsigned int len);
void SHA384(void* digest, const void* data, unsigned int len);
void SHA384(void* digest, const void* data0, unsigned int len0, const void* data1, unsigned int len1);
#endif
/**
@ -70,7 +70,7 @@ void SHA384(void *digest,const void *data0,unsigned int len0,const void *data1,u
* @param msglen Length of message
* @param mac Buffer to fill with result
*/
void HMACSHA384(const uint8_t key[ZT_SYMMETRIC_KEY_SIZE],const void *msg,unsigned int msglen,uint8_t mac[48]);
void HMACSHA384(const uint8_t key[ZT_SYMMETRIC_KEY_SIZE], const void* msg, unsigned int msglen, uint8_t mac[48]);
/**
* Compute KBKDF (key-based key derivation function) using HMAC-SHA-384 as a PRF
@ -81,8 +81,13 @@ void HMACSHA384(const uint8_t key[ZT_SYMMETRIC_KEY_SIZE],const void *msg,unsigne
* @param iter Key iteration for generation of multiple keys for the same label/context
* @param out Output to receive derived key
*/
void KBKDFHMACSHA384(const uint8_t key[ZT_SYMMETRIC_KEY_SIZE],char label,char context,uint32_t iter,uint8_t out[ZT_SYMMETRIC_KEY_SIZE]);
void KBKDFHMACSHA384(
const uint8_t key[ZT_SYMMETRIC_KEY_SIZE],
char label,
char context,
uint32_t iter,
uint8_t out[ZT_SYMMETRIC_KEY_SIZE]);
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -7,323 +7,353 @@
* Since the original was public domain, this is too.
*/
#include "Constants.hpp"
#include "Salsa20.hpp"
#include "Constants.hpp"
#define ROTATE(v, c) (((v) << (c)) | ((v) >> (32 - (c))))
#define XOR(v, w) ((v) ^ (w))
#define PLUS(v, w) ((uint32_t)((v) + (w)))
#define XOR(v, w) ((v) ^ (w))
#define PLUS(v, w) ((uint32_t)((v) + (w)))
#ifndef ZT_SALSA20_SSE
#if __BYTE_ORDER == __LITTLE_ENDIAN
#ifdef ZT_NO_UNALIGNED_ACCESS
// Slower version that does not use type punning
#define U8TO32_LITTLE(p) ( ((uint32_t)(p)[0]) | ((uint32_t)(p)[1] << 8) | ((uint32_t)(p)[2] << 16) | ((uint32_t)(p)[3] << 24) )
static ZT_INLINE void U32TO8_LITTLE(uint8_t *const c,const uint32_t v) { c[0] = (uint8_t)v; c[1] = (uint8_t)(v >> 8); c[2] = (uint8_t)(v >> 16); c[3] = (uint8_t)(v >> 24); }
#define U8TO32_LITTLE(p) \
(((uint32_t)(p)[0]) | ((uint32_t)(p)[1] << 8) | ((uint32_t)(p)[2] << 16) | ((uint32_t)(p)[3] << 24))
static ZT_INLINE void U32TO8_LITTLE(uint8_t* const c, const uint32_t v)
{
c[0] = (uint8_t)v;
c[1] = (uint8_t)(v >> 8);
c[2] = (uint8_t)(v >> 16);
c[3] = (uint8_t)(v >> 24);
}
#else
// Fast version that just does 32-bit load/store
#define U8TO32_LITTLE(p) (*((const uint32_t *)((const void *)(p))))
#define U32TO8_LITTLE(c,v) *((uint32_t *)((void *)(c))) = (v)
#endif // ZT_NO_UNALIGNED_ACCESS
#else // __BYTE_ORDER == __BIG_ENDIAN (we don't support anything else... does MIDDLE_ENDIAN even still exist?)
#define U8TO32_LITTLE(p) (*((const uint32_t*)((const void*)(p))))
#define U32TO8_LITTLE(c, v) *((uint32_t*)((void*)(c))) = (v)
#endif // ZT_NO_UNALIGNED_ACCESS
#else // __BYTE_ORDER == __BIG_ENDIAN (we don't support anything else... does MIDDLE_ENDIAN even still exist?)
#ifdef __GNUC__
// Use GNUC builtin bswap macros on big-endian machines if available
#define U8TO32_LITTLE(p) __builtin_bswap32(*((const uint32_t *)((const void *)(p))))
#define U32TO8_LITTLE(c,v) *((uint32_t *)((void *)(c))) = __builtin_bswap32((v))
#else // no __GNUC__
#define U8TO32_LITTLE(p) __builtin_bswap32(*((const uint32_t*)((const void*)(p))))
#define U32TO8_LITTLE(c, v) *((uint32_t*)((void*)(c))) = __builtin_bswap32((v))
#else // no __GNUC__
// Otherwise do it the slow, manual way on BE machines
#define U8TO32_LITTLE(p) ( ((uint32_t)(p)[0]) | ((uint32_t)(p)[1] << 8) | ((uint32_t)(p)[2] << 16) | ((uint32_t)(p)[3] << 24) )
static ZT_INLINE void U32TO8_LITTLE(uint8_t *const c,const uint32_t v) { c[0] = (uint8_t)v; c[1] = (uint8_t)(v >> 8); c[2] = (uint8_t)(v >> 16); c[3] = (uint8_t)(v >> 24); }
#endif // __GNUC__ or not
#endif // __BYTE_ORDER little or big?
#endif // !ZT_SALSA20_SSE
#define U8TO32_LITTLE(p) \
(((uint32_t)(p)[0]) | ((uint32_t)(p)[1] << 8) | ((uint32_t)(p)[2] << 16) | ((uint32_t)(p)[3] << 24))
static ZT_INLINE void U32TO8_LITTLE(uint8_t* const c, const uint32_t v)
{
c[0] = (uint8_t)v;
c[1] = (uint8_t)(v >> 8);
c[2] = (uint8_t)(v >> 16);
c[3] = (uint8_t)(v >> 24);
}
#endif // __GNUC__ or not
#endif // __BYTE_ORDER little or big?
#endif // !ZT_SALSA20_SSE
#ifdef ZT_SALSA20_SSE
class _s20sseconsts
{
public:
_s20sseconsts() noexcept
{
maskLo32 = _mm_shuffle_epi32(_mm_cvtsi32_si128(-1), _MM_SHUFFLE(1, 0, 1, 0));
maskHi32 = _mm_slli_epi64(maskLo32, 32);
}
__m128i maskLo32, maskHi32;
class _s20sseconsts {
public:
_s20sseconsts() noexcept
{
maskLo32 = _mm_shuffle_epi32(_mm_cvtsi32_si128(-1), _MM_SHUFFLE(1, 0, 1, 0));
maskHi32 = _mm_slli_epi64(maskLo32, 32);
}
__m128i maskLo32, maskHi32;
};
static const _s20sseconsts s_S20SSECONSTANTS;
#endif
namespace ZeroTier {
void Salsa20::init(const void *key, const void *iv) noexcept
void Salsa20::init(const void* key, const void* iv) noexcept
{
#ifdef ZT_SALSA20_SSE
const uint32_t *const k = (const uint32_t *)key;
_state.i[0] = 0x61707865;
_state.i[1] = 0x3320646e;
_state.i[2] = 0x79622d32;
_state.i[3] = 0x6b206574;
_state.i[4] = k[3];
_state.i[5] = 0;
_state.i[6] = k[7];
_state.i[7] = k[2];
_state.i[8] = 0;
_state.i[9] = k[6];
_state.i[10] = k[1];
_state.i[11] = ((const uint32_t *)iv)[1];
_state.i[12] = k[5];
_state.i[13] = k[0];
_state.i[14] = ((const uint32_t *)iv)[0];
_state.i[15] = k[4];
const uint32_t* const k = (const uint32_t*)key;
_state.i[0] = 0x61707865;
_state.i[1] = 0x3320646e;
_state.i[2] = 0x79622d32;
_state.i[3] = 0x6b206574;
_state.i[4] = k[3];
_state.i[5] = 0;
_state.i[6] = k[7];
_state.i[7] = k[2];
_state.i[8] = 0;
_state.i[9] = k[6];
_state.i[10] = k[1];
_state.i[11] = ((const uint32_t*)iv)[1];
_state.i[12] = k[5];
_state.i[13] = k[0];
_state.i[14] = ((const uint32_t*)iv)[0];
_state.i[15] = k[4];
#else
const char *const constants = "expand 32-byte k";
const uint8_t *const k = (const uint8_t *)key;
_state.i[0] = U8TO32_LITTLE(constants + 0);
_state.i[1] = U8TO32_LITTLE(k + 0);
_state.i[2] = U8TO32_LITTLE(k + 4);
_state.i[3] = U8TO32_LITTLE(k + 8);
_state.i[4] = U8TO32_LITTLE(k + 12);
_state.i[5] = U8TO32_LITTLE(constants + 4);
_state.i[6] = U8TO32_LITTLE(((const uint8_t *)iv) + 0);
_state.i[7] = U8TO32_LITTLE(((const uint8_t *)iv) + 4);
_state.i[8] = 0;
_state.i[9] = 0;
_state.i[10] = U8TO32_LITTLE(constants + 8);
_state.i[11] = U8TO32_LITTLE(k + 16);
_state.i[12] = U8TO32_LITTLE(k + 20);
_state.i[13] = U8TO32_LITTLE(k + 24);
_state.i[14] = U8TO32_LITTLE(k + 28);
_state.i[15] = U8TO32_LITTLE(constants + 12);
const char* const constants = "expand 32-byte k";
const uint8_t* const k = (const uint8_t*)key;
_state.i[0] = U8TO32_LITTLE(constants + 0);
_state.i[1] = U8TO32_LITTLE(k + 0);
_state.i[2] = U8TO32_LITTLE(k + 4);
_state.i[3] = U8TO32_LITTLE(k + 8);
_state.i[4] = U8TO32_LITTLE(k + 12);
_state.i[5] = U8TO32_LITTLE(constants + 4);
_state.i[6] = U8TO32_LITTLE(((const uint8_t*)iv) + 0);
_state.i[7] = U8TO32_LITTLE(((const uint8_t*)iv) + 4);
_state.i[8] = 0;
_state.i[9] = 0;
_state.i[10] = U8TO32_LITTLE(constants + 8);
_state.i[11] = U8TO32_LITTLE(k + 16);
_state.i[12] = U8TO32_LITTLE(k + 20);
_state.i[13] = U8TO32_LITTLE(k + 24);
_state.i[14] = U8TO32_LITTLE(k + 28);
_state.i[15] = U8TO32_LITTLE(constants + 12);
#endif
}
union p_SalsaState
{
union p_SalsaState {
#ifdef ZT_SALSA20_SSE
__m128i v[4];
#endif // ZT_SALSA20_SSE
uint32_t i[16];
__m128i v[4];
#endif // ZT_SALSA20_SSE
uint32_t i[16];
};
template< unsigned int R >
static ZT_INLINE void p_salsaCrypt(p_SalsaState *const state, const uint8_t *m, uint8_t *c, unsigned int bytes) noexcept
template <unsigned int R>
static ZT_INLINE void p_salsaCrypt(p_SalsaState* const state, const uint8_t* m, uint8_t* c, unsigned int bytes) noexcept
{
if (unlikely(bytes == 0))
return;
if (unlikely(bytes == 0))
return;
uint8_t tmp[64];
uint8_t *ctarget = c;
uint8_t tmp[64];
uint8_t* ctarget = c;
#ifdef ZT_SALSA20_SSE
__m128i X0 = state->v[0];
__m128i X1 = state->v[1];
__m128i X2 = state->v[2];
__m128i X3 = state->v[3];
const __m128i maskLo32 = s_S20SSECONSTANTS.maskLo32;
const __m128i maskHi32 = s_S20SSECONSTANTS.maskHi32;
const __m128i add1 = _mm_set_epi32(0, 0, 0, 1);
__m128i X0 = state->v[0];
__m128i X1 = state->v[1];
__m128i X2 = state->v[2];
__m128i X3 = state->v[3];
const __m128i maskLo32 = s_S20SSECONSTANTS.maskLo32;
const __m128i maskHi32 = s_S20SSECONSTANTS.maskHi32;
const __m128i add1 = _mm_set_epi32(0, 0, 0, 1);
#else
uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
uint32_t j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
j0 = state->i[0];
j1 = state->i[1];
j2 = state->i[2];
j3 = state->i[3];
j4 = state->i[4];
j5 = state->i[5];
j6 = state->i[6];
j7 = state->i[7];
j8 = state->i[8];
j9 = state->i[9];
j10 = state->i[10];
j11 = state->i[11];
j12 = state->i[12];
j13 = state->i[13];
j14 = state->i[14];
j15 = state->i[15];
uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
uint32_t j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
j0 = state->i[0];
j1 = state->i[1];
j2 = state->i[2];
j3 = state->i[3];
j4 = state->i[4];
j5 = state->i[5];
j6 = state->i[6];
j7 = state->i[7];
j8 = state->i[8];
j9 = state->i[9];
j10 = state->i[10];
j11 = state->i[11];
j12 = state->i[12];
j13 = state->i[13];
j14 = state->i[14];
j15 = state->i[15];
#endif
for (;;) {
if (unlikely(bytes < 64)) {
for (unsigned int i = 0; i < bytes; ++i)
tmp[i] = m[i];
m = tmp;
ctarget = c;
c = tmp;
}
for (;;) {
if (unlikely(bytes < 64)) {
for (unsigned int i = 0; i < bytes; ++i)
tmp[i] = m[i];
m = tmp;
ctarget = c;
c = tmp;
}
#ifdef ZT_SALSA20_SSE
__m128i X0s = X0;
__m128i X1s = X1;
__m128i X2s = X2;
__m128i X3s = X3;
__m128i T;
__m128i X0s = X0;
__m128i X1s = X1;
__m128i X2s = X2;
__m128i X3s = X3;
__m128i T;
for (unsigned int rr = 0; rr < (R / 2); ++rr) {
T = _mm_add_epi32(X0, X3);
X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
T = _mm_add_epi32(X1, X0);
X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
T = _mm_add_epi32(X2, X1);
X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
T = _mm_add_epi32(X3, X2);
X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
X1 = _mm_shuffle_epi32(X1, 0x93);
X2 = _mm_shuffle_epi32(X2, 0x4E);
X3 = _mm_shuffle_epi32(X3, 0x39);
T = _mm_add_epi32(X0, X1);
X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
T = _mm_add_epi32(X3, X0);
X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
T = _mm_add_epi32(X2, X3);
X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
T = _mm_add_epi32(X1, X2);
X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
X1 = _mm_shuffle_epi32(X1, 0x39);
X2 = _mm_shuffle_epi32(X2, 0x4E);
X3 = _mm_shuffle_epi32(X3, 0x93);
}
for (unsigned int rr = 0; rr < (R / 2); ++rr) {
T = _mm_add_epi32(X0, X3);
X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
T = _mm_add_epi32(X1, X0);
X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
T = _mm_add_epi32(X2, X1);
X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
T = _mm_add_epi32(X3, X2);
X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
X1 = _mm_shuffle_epi32(X1, 0x93);
X2 = _mm_shuffle_epi32(X2, 0x4E);
X3 = _mm_shuffle_epi32(X3, 0x39);
T = _mm_add_epi32(X0, X1);
X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
T = _mm_add_epi32(X3, X0);
X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
T = _mm_add_epi32(X2, X3);
X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
T = _mm_add_epi32(X1, X2);
X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
X1 = _mm_shuffle_epi32(X1, 0x39);
X2 = _mm_shuffle_epi32(X2, 0x4E);
X3 = _mm_shuffle_epi32(X3, 0x93);
}
X0 = _mm_add_epi32(X0s, X0);
X1 = _mm_add_epi32(X1s, X1);
X2 = _mm_add_epi32(X2s, X2);
X3 = _mm_add_epi32(X3s, X3);
X0 = _mm_add_epi32(X0s, X0);
X1 = _mm_add_epi32(X1s, X1);
X2 = _mm_add_epi32(X2s, X2);
X3 = _mm_add_epi32(X3s, X3);
__m128i k02 = _mm_or_si128(_mm_slli_epi64(X0, 32), _mm_srli_epi64(X3, 32));
__m128i k20 = _mm_or_si128(_mm_and_si128(X2, maskLo32), _mm_and_si128(X1, maskHi32));
__m128i k13 = _mm_or_si128(_mm_slli_epi64(X1, 32), _mm_srli_epi64(X0, 32));
__m128i k31 = _mm_or_si128(_mm_and_si128(X3, maskLo32), _mm_and_si128(X2, maskHi32));
k02 = _mm_shuffle_epi32(k02, _MM_SHUFFLE(0, 1, 2, 3));
k13 = _mm_shuffle_epi32(k13, _MM_SHUFFLE(0, 1, 2, 3));
__m128i k02 = _mm_or_si128(_mm_slli_epi64(X0, 32), _mm_srli_epi64(X3, 32));
__m128i k20 = _mm_or_si128(_mm_and_si128(X2, maskLo32), _mm_and_si128(X1, maskHi32));
__m128i k13 = _mm_or_si128(_mm_slli_epi64(X1, 32), _mm_srli_epi64(X0, 32));
__m128i k31 = _mm_or_si128(_mm_and_si128(X3, maskLo32), _mm_and_si128(X2, maskHi32));
k02 = _mm_shuffle_epi32(k02, _MM_SHUFFLE(0, 1, 2, 3));
k13 = _mm_shuffle_epi32(k13, _MM_SHUFFLE(0, 1, 2, 3));
_mm_storeu_si128(reinterpret_cast<__m128i *>(c), _mm_xor_si128(_mm_unpackhi_epi64(k02, k20), _mm_loadu_si128(reinterpret_cast<const __m128i *>(m))));
_mm_storeu_si128(reinterpret_cast<__m128i *>(c) + 1, _mm_xor_si128(_mm_unpackhi_epi64(k13, k31), _mm_loadu_si128(reinterpret_cast<const __m128i *>(m) + 1)));
_mm_storeu_si128(reinterpret_cast<__m128i *>(c) + 2, _mm_xor_si128(_mm_unpacklo_epi64(k20, k02), _mm_loadu_si128(reinterpret_cast<const __m128i *>(m) + 2)));
_mm_storeu_si128(reinterpret_cast<__m128i *>(c) + 3, _mm_xor_si128(_mm_unpacklo_epi64(k31, k13), _mm_loadu_si128(reinterpret_cast<const __m128i *>(m) + 3)));
_mm_storeu_si128(
reinterpret_cast<__m128i*>(c),
_mm_xor_si128(_mm_unpackhi_epi64(k02, k20), _mm_loadu_si128(reinterpret_cast<const __m128i*>(m))));
_mm_storeu_si128(
reinterpret_cast<__m128i*>(c) + 1,
_mm_xor_si128(_mm_unpackhi_epi64(k13, k31), _mm_loadu_si128(reinterpret_cast<const __m128i*>(m) + 1)));
_mm_storeu_si128(
reinterpret_cast<__m128i*>(c) + 2,
_mm_xor_si128(_mm_unpacklo_epi64(k20, k02), _mm_loadu_si128(reinterpret_cast<const __m128i*>(m) + 2)));
_mm_storeu_si128(
reinterpret_cast<__m128i*>(c) + 3,
_mm_xor_si128(_mm_unpacklo_epi64(k31, k13), _mm_loadu_si128(reinterpret_cast<const __m128i*>(m) + 3)));
X0 = X0s;
X1 = X1s;
X2 = _mm_add_epi32(X2s, add1);
X3 = X3s;
X0 = X0s;
X1 = X1s;
X2 = _mm_add_epi32(X2s, add1);
X3 = X3s;
#else
x0 = j0;
x1 = j1;
x2 = j2;
x3 = j3;
x4 = j4;
x5 = j5;
x6 = j6;
x7 = j7;
x8 = j8;
x9 = j9;
x10 = j10;
x11 = j11;
x12 = j12;
x13 = j13;
x14 = j14;
x15 = j15;
x0 = j0;
x1 = j1;
x2 = j2;
x3 = j3;
x4 = j4;
x5 = j5;
x6 = j6;
x7 = j7;
x8 = j8;
x9 = j9;
x10 = j10;
x11 = j11;
x12 = j12;
x13 = j13;
x14 = j14;
x15 = j15;
for(unsigned int rr=0;rr<(R/2);++rr) {
x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
}
for (unsigned int rr = 0; rr < (R / 2); ++rr) {
x4 = XOR(x4, ROTATE(PLUS(x0, x12), 7));
x8 = XOR(x8, ROTATE(PLUS(x4, x0), 9));
x12 = XOR(x12, ROTATE(PLUS(x8, x4), 13));
x0 = XOR(x0, ROTATE(PLUS(x12, x8), 18));
x9 = XOR(x9, ROTATE(PLUS(x5, x1), 7));
x13 = XOR(x13, ROTATE(PLUS(x9, x5), 9));
x1 = XOR(x1, ROTATE(PLUS(x13, x9), 13));
x5 = XOR(x5, ROTATE(PLUS(x1, x13), 18));
x14 = XOR(x14, ROTATE(PLUS(x10, x6), 7));
x2 = XOR(x2, ROTATE(PLUS(x14, x10), 9));
x6 = XOR(x6, ROTATE(PLUS(x2, x14), 13));
x10 = XOR(x10, ROTATE(PLUS(x6, x2), 18));
x3 = XOR(x3, ROTATE(PLUS(x15, x11), 7));
x7 = XOR(x7, ROTATE(PLUS(x3, x15), 9));
x11 = XOR(x11, ROTATE(PLUS(x7, x3), 13));
x15 = XOR(x15, ROTATE(PLUS(x11, x7), 18));
x1 = XOR(x1, ROTATE(PLUS(x0, x3), 7));
x2 = XOR(x2, ROTATE(PLUS(x1, x0), 9));
x3 = XOR(x3, ROTATE(PLUS(x2, x1), 13));
x0 = XOR(x0, ROTATE(PLUS(x3, x2), 18));
x6 = XOR(x6, ROTATE(PLUS(x5, x4), 7));
x7 = XOR(x7, ROTATE(PLUS(x6, x5), 9));
x4 = XOR(x4, ROTATE(PLUS(x7, x6), 13));
x5 = XOR(x5, ROTATE(PLUS(x4, x7), 18));
x11 = XOR(x11, ROTATE(PLUS(x10, x9), 7));
x8 = XOR(x8, ROTATE(PLUS(x11, x10), 9));
x9 = XOR(x9, ROTATE(PLUS(x8, x11), 13));
x10 = XOR(x10, ROTATE(PLUS(x9, x8), 18));
x12 = XOR(x12, ROTATE(PLUS(x15, x14), 7));
x13 = XOR(x13, ROTATE(PLUS(x12, x15), 9));
x14 = XOR(x14, ROTATE(PLUS(x13, x12), 13));
x15 = XOR(x15, ROTATE(PLUS(x14, x13), 18));
}
x0 = PLUS(x0,j0);
x1 = PLUS(x1,j1);
x2 = PLUS(x2,j2);
x3 = PLUS(x3,j3);
x4 = PLUS(x4,j4);
x5 = PLUS(x5,j5);
x6 = PLUS(x6,j6);
x7 = PLUS(x7,j7);
x8 = PLUS(x8,j8);
x9 = PLUS(x9,j9);
x10 = PLUS(x10,j10);
x11 = PLUS(x11,j11);
x12 = PLUS(x12,j12);
x13 = PLUS(x13,j13);
x14 = PLUS(x14,j14);
x15 = PLUS(x15,j15);
x0 = PLUS(x0, j0);
x1 = PLUS(x1, j1);
x2 = PLUS(x2, j2);
x3 = PLUS(x3, j3);
x4 = PLUS(x4, j4);
x5 = PLUS(x5, j5);
x6 = PLUS(x6, j6);
x7 = PLUS(x7, j7);
x8 = PLUS(x8, j8);
x9 = PLUS(x9, j9);
x10 = PLUS(x10, j10);
x11 = PLUS(x11, j11);
x12 = PLUS(x12, j12);
x13 = PLUS(x13, j13);
x14 = PLUS(x14, j14);
x15 = PLUS(x15, j15);
U32TO8_LITTLE(c + 0,XOR(x0,U8TO32_LITTLE(m + 0)));
U32TO8_LITTLE(c + 4,XOR(x1,U8TO32_LITTLE(m + 4)));
U32TO8_LITTLE(c + 8,XOR(x2,U8TO32_LITTLE(m + 8)));
U32TO8_LITTLE(c + 12,XOR(x3,U8TO32_LITTLE(m + 12)));
U32TO8_LITTLE(c + 16,XOR(x4,U8TO32_LITTLE(m + 16)));
U32TO8_LITTLE(c + 20,XOR(x5,U8TO32_LITTLE(m + 20)));
U32TO8_LITTLE(c + 24,XOR(x6,U8TO32_LITTLE(m + 24)));
U32TO8_LITTLE(c + 28,XOR(x7,U8TO32_LITTLE(m + 28)));
U32TO8_LITTLE(c + 32,XOR(x8,U8TO32_LITTLE(m + 32)));
U32TO8_LITTLE(c + 36,XOR(x9,U8TO32_LITTLE(m + 36)));
U32TO8_LITTLE(c + 40,XOR(x10,U8TO32_LITTLE(m + 40)));
U32TO8_LITTLE(c + 44,XOR(x11,U8TO32_LITTLE(m + 44)));
U32TO8_LITTLE(c + 48,XOR(x12,U8TO32_LITTLE(m + 48)));
U32TO8_LITTLE(c + 52,XOR(x13,U8TO32_LITTLE(m + 52)));
U32TO8_LITTLE(c + 56,XOR(x14,U8TO32_LITTLE(m + 56)));
U32TO8_LITTLE(c + 60,XOR(x15,U8TO32_LITTLE(m + 60)));
U32TO8_LITTLE(c + 0, XOR(x0, U8TO32_LITTLE(m + 0)));
U32TO8_LITTLE(c + 4, XOR(x1, U8TO32_LITTLE(m + 4)));
U32TO8_LITTLE(c + 8, XOR(x2, U8TO32_LITTLE(m + 8)));
U32TO8_LITTLE(c + 12, XOR(x3, U8TO32_LITTLE(m + 12)));
U32TO8_LITTLE(c + 16, XOR(x4, U8TO32_LITTLE(m + 16)));
U32TO8_LITTLE(c + 20, XOR(x5, U8TO32_LITTLE(m + 20)));
U32TO8_LITTLE(c + 24, XOR(x6, U8TO32_LITTLE(m + 24)));
U32TO8_LITTLE(c + 28, XOR(x7, U8TO32_LITTLE(m + 28)));
U32TO8_LITTLE(c + 32, XOR(x8, U8TO32_LITTLE(m + 32)));
U32TO8_LITTLE(c + 36, XOR(x9, U8TO32_LITTLE(m + 36)));
U32TO8_LITTLE(c + 40, XOR(x10, U8TO32_LITTLE(m + 40)));
U32TO8_LITTLE(c + 44, XOR(x11, U8TO32_LITTLE(m + 44)));
U32TO8_LITTLE(c + 48, XOR(x12, U8TO32_LITTLE(m + 48)));
U32TO8_LITTLE(c + 52, XOR(x13, U8TO32_LITTLE(m + 52)));
U32TO8_LITTLE(c + 56, XOR(x14, U8TO32_LITTLE(m + 56)));
U32TO8_LITTLE(c + 60, XOR(x15, U8TO32_LITTLE(m + 60)));
++j8;
++j8;
#endif
if (likely(bytes > 64)) {
bytes -= 64;
c += 64;
m += 64;
} else {
if (bytes < 64) {
for (unsigned int i = 0; i < bytes; ++i)
ctarget[i] = c[i];
}
if (likely(bytes > 64)) {
bytes -= 64;
c += 64;
m += 64;
}
else {
if (bytes < 64) {
for (unsigned int i = 0; i < bytes; ++i)
ctarget[i] = c[i];
}
#ifdef ZT_SALSA20_SSE
state->v[2] = X2;
state->v[2] = X2;
#else
state->i[8] = j8;
state->i[8] = j8;
#endif
return;
}
}
return;
}
}
}
void Salsa20::crypt12(const void *in, void *out, unsigned int bytes) noexcept
void Salsa20::crypt12(const void* in, void* out, unsigned int bytes) noexcept
{
p_salsaCrypt< 12 >(reinterpret_cast<p_SalsaState *>(&_state), reinterpret_cast<const uint8_t *>(in), reinterpret_cast<uint8_t *>(out), bytes);
p_salsaCrypt<12>(
reinterpret_cast<p_SalsaState*>(&_state),
reinterpret_cast<const uint8_t*>(in),
reinterpret_cast<uint8_t*>(out),
bytes);
}
void Salsa20::crypt20(const void *in, void *out, unsigned int bytes) noexcept
void Salsa20::crypt20(const void* in, void* out, unsigned int bytes) noexcept
{
p_salsaCrypt< 20 >(reinterpret_cast<p_SalsaState *>(&_state), reinterpret_cast<const uint8_t *>(in), reinterpret_cast<uint8_t *>(out), bytes);
p_salsaCrypt<20>(
reinterpret_cast<p_SalsaState*>(&_state),
reinterpret_cast<const uint8_t*>(in),
reinterpret_cast<uint8_t*>(out),
bytes);
}
} // namespace ZeroTier
} // namespace ZeroTier

View file

@ -15,8 +15,8 @@
#define ZT_SALSA20_HPP
#include "Constants.hpp"
#include "Utils.hpp"
#include "TriviallyCopyable.hpp"
#include "Utils.hpp"
#ifdef ZT_ARCH_X64
#define ZT_SALSA20_SSE 1
@ -35,67 +35,75 @@ namespace ZeroTier {
* a minor optimization done here because ZeroTier messages are
* nowhere near this large.
*/
class Salsa20 : public TriviallyCopyable
{
public:
class Salsa20 : public TriviallyCopyable {
public:
#ifdef ZT_SALSA20_SSE
static constexpr bool accelerated() noexcept
{ return true; }
static constexpr bool accelerated() noexcept
{
return true;
}
#else
static constexpr bool accelerated() noexcept { return false; }
static constexpr bool accelerated() noexcept
{
return false;
}
#endif
ZT_INLINE Salsa20() noexcept
{}
ZT_INLINE Salsa20() noexcept
{
}
ZT_INLINE ~Salsa20() noexcept
{ Utils::burn(&_state, sizeof(_state)); }
ZT_INLINE ~Salsa20() noexcept
{
Utils::burn(&_state, sizeof(_state));
}
/**
* @param key 256-bit (32 byte) key
* @param iv 64-bit initialization vector
*/
ZT_INLINE Salsa20(const void *key, const void *iv) noexcept
{ init(key, iv); }
/**
* @param key 256-bit (32 byte) key
* @param iv 64-bit initialization vector
*/
ZT_INLINE Salsa20(const void* key, const void* iv) noexcept
{
init(key, iv);
}
/**
* Initialize cipher
*
* @param key Key bits
* @param iv 64-bit initialization vector
*/
void init(const void *key, const void *iv) noexcept;
/**
* Initialize cipher
*
* @param key Key bits
* @param iv 64-bit initialization vector
*/
void init(const void* key, const void* iv) noexcept;
/**
* Encrypt/decrypt data using Salsa20/12
*
* @param in Input data
* @param out Output buffer
* @param bytes Length of data
*/
void crypt12(const void *in, void *out, unsigned int bytes) noexcept;
/**
* Encrypt/decrypt data using Salsa20/12
*
* @param in Input data
* @param out Output buffer
* @param bytes Length of data
*/
void crypt12(const void* in, void* out, unsigned int bytes) noexcept;
/**
* Encrypt/decrypt data using Salsa20/20
*
* @param in Input data
* @param out Output buffer
* @param bytes Length of data
*/
void crypt20(const void *in, void *out, unsigned int bytes) noexcept;
/**
* Encrypt/decrypt data using Salsa20/20
*
* @param in Input data
* @param out Output buffer
* @param bytes Length of data
*/
void crypt20(const void* in, void* out, unsigned int bytes) noexcept;
private:
union
{
private:
union {
#ifdef ZT_SALSA20_SSE
__m128i v[4];
#endif // ZT_SALSA20_SSE
uint32_t i[16];
} _state;
__m128i v[4];
#endif // ZT_SALSA20_SSE
uint32_t i[16];
} _state;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -24,66 +24,88 @@ namespace ZeroTier {
*
* This is used in the core to avoid requiring C++11 and because auto_ptr is weird.
*/
template< typename T >
class ScopedPtr : public TriviallyCopyable
{
public:
explicit ZT_INLINE ScopedPtr(T *const p) noexcept: m_ptr(p)
{}
template <typename T> class ScopedPtr : public TriviallyCopyable {
public:
explicit ZT_INLINE ScopedPtr(T* const p) noexcept : m_ptr(p)
{
}
ZT_INLINE ~ScopedPtr()
{ delete m_ptr; }
ZT_INLINE ~ScopedPtr()
{
delete m_ptr;
}
ZT_INLINE T *operator->() const noexcept
{ return m_ptr; }
ZT_INLINE T* operator->() const noexcept
{
return m_ptr;
}
ZT_INLINE T &operator*() const noexcept
{ return *m_ptr; }
ZT_INLINE T& operator*() const noexcept
{
return *m_ptr;
}
ZT_INLINE T *ptr() const noexcept
{ return m_ptr; }
ZT_INLINE T* ptr() const noexcept
{
return m_ptr;
}
ZT_INLINE void swap(const ScopedPtr &p) noexcept
{
T *const tmp = m_ptr;
m_ptr = p.m_ptr;
p.m_ptr = tmp;
}
ZT_INLINE void swap(const ScopedPtr& p) noexcept
{
T* const tmp = m_ptr;
m_ptr = p.m_ptr;
p.m_ptr = tmp;
}
explicit ZT_INLINE operator bool() const noexcept
{ return (m_ptr != (T *)0); }
explicit ZT_INLINE operator bool() const noexcept
{
return (m_ptr != (T*)0);
}
ZT_INLINE bool operator==(const ScopedPtr &p) const noexcept
{ return (m_ptr == p.m_ptr); }
ZT_INLINE bool operator==(const ScopedPtr& p) const noexcept
{
return (m_ptr == p.m_ptr);
}
ZT_INLINE bool operator!=(const ScopedPtr &p) const noexcept
{ return (m_ptr != p.m_ptr); }
ZT_INLINE bool operator!=(const ScopedPtr& p) const noexcept
{
return (m_ptr != p.m_ptr);
}
ZT_INLINE bool operator==(T *const p) const noexcept
{ return (m_ptr == p); }
ZT_INLINE bool operator==(T* const p) const noexcept
{
return (m_ptr == p);
}
ZT_INLINE bool operator!=(T *const p) const noexcept
{ return (m_ptr != p); }
ZT_INLINE bool operator!=(T* const p) const noexcept
{
return (m_ptr != p);
}
private:
ZT_INLINE ScopedPtr() noexcept
{}
private:
ZT_INLINE ScopedPtr() noexcept
{
}
ZT_INLINE ScopedPtr(const ScopedPtr &p) noexcept: m_ptr(nullptr)
{}
ZT_INLINE ScopedPtr(const ScopedPtr& p) noexcept : m_ptr(nullptr)
{
}
ZT_INLINE ScopedPtr &operator=(const ScopedPtr &p) noexcept
{ return *this; }
ZT_INLINE ScopedPtr& operator=(const ScopedPtr& p) noexcept
{
return *this;
}
T *const m_ptr;
T* const m_ptr;
};
} // namespace ZeroTier
} // namespace ZeroTier
namespace std {
template< typename T >
ZT_INLINE void swap(ZeroTier::ScopedPtr< T > &a, ZeroTier::ScopedPtr< T > &b) noexcept
{ a.swap(b); }
} // namespace std
template <typename T> ZT_INLINE void swap(ZeroTier::ScopedPtr<T>& a, ZeroTier::ScopedPtr<T>& b) noexcept
{
a.swap(b);
}
} // namespace std
#endif

View file

@ -11,92 +11,112 @@
*/
/****/
#include "Constants.hpp"
#include "SelfAwareness.hpp"
#include "Context.hpp"
#include "Topology.hpp"
#include "Peer.hpp"
#include "Trace.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Context.hpp"
#include "Peer.hpp"
#include "Topology.hpp"
#include "Trace.hpp"
// Entry timeout -- make it fairly long since this is just to prevent stale buildup
#define ZT_SELFAWARENESS_ENTRY_TIMEOUT 300000
namespace ZeroTier {
SelfAwareness::SelfAwareness(const Context &ctx) :
m_ctx(ctx)
{}
void SelfAwareness::iam(const CallContext &cc, const Identity &reporter, const int64_t receivedOnLocalSocket, const InetAddress &reporterPhysicalAddress, const InetAddress &myPhysicalAddress, bool trusted)
SelfAwareness::SelfAwareness(const Context& ctx) : m_ctx(ctx)
{
const InetAddress::IpScope scope = myPhysicalAddress.ipScope();
if ((scope != reporterPhysicalAddress.ipScope()) || (scope == ZT_IP_SCOPE_NONE) || (scope == ZT_IP_SCOPE_LOOPBACK) || (scope == ZT_IP_SCOPE_MULTICAST))
return;
Mutex::Lock l(m_phy_l);
p_PhySurfaceEntry &entry = m_phy[p_PhySurfaceKey(reporter.address(), receivedOnLocalSocket, reporterPhysicalAddress, scope)];
if ((trusted) && ((cc.ticks - entry.timestampTicks) < ZT_SELFAWARENESS_ENTRY_TIMEOUT) && (!entry.mySurface.ipsEqual(myPhysicalAddress))) {
// Changes to external surface reported by trusted peers causes path reset in this scope
entry.mySurface = myPhysicalAddress;
entry.timestampTicks = cc.ticks;
entry.trusted = trusted;
// Erase all entries in this scope that were not reported from this remote address to prevent 'thrashing'
// due to multiple reports of endpoint change.
// Don't use 'entry' after this since hash table gets modified.
for (Map< p_PhySurfaceKey, p_PhySurfaceEntry >::iterator i(m_phy.begin()); i != m_phy.end();) {
if ((i->first.scope == scope) && (i->first.reporterPhysicalAddress != reporterPhysicalAddress))
m_phy.erase(i++);
else ++i;
}
// Reset all paths within this scope and address family
Vector< SharedPtr< Peer > > peers, rootPeers;
m_ctx.topology->allPeers(peers, rootPeers);
for(Vector< SharedPtr< Peer > >::const_iterator p(peers.begin());p!=peers.end();++p)
(*p)->resetWithinScope(m_ctx, cc, (InetAddress::IpScope)scope, myPhysicalAddress.as.sa.sa_family);
m_ctx.t->resettingPathsInScope(cc, 0x9afff100, reporter, reporterPhysicalAddress, entry.mySurface, myPhysicalAddress, scope);
} else {
// Otherwise just update DB to use to determine external surface info
entry.mySurface = myPhysicalAddress;
entry.timestampTicks = cc.ticks;
entry.trusted = trusted;
}
}
void SelfAwareness::clean(const CallContext &cc)
void SelfAwareness::iam(
const CallContext& cc,
const Identity& reporter,
const int64_t receivedOnLocalSocket,
const InetAddress& reporterPhysicalAddress,
const InetAddress& myPhysicalAddress,
bool trusted)
{
Mutex::Lock l(m_phy_l);
for (Map< p_PhySurfaceKey, p_PhySurfaceEntry >::iterator i(m_phy.begin()); i != m_phy.end();) {
if ((cc.ticks - i->second.timestampTicks) >= ZT_SELFAWARENESS_ENTRY_TIMEOUT)
m_phy.erase(i++);
else ++i;
}
const InetAddress::IpScope scope = myPhysicalAddress.ipScope();
if ((scope != reporterPhysicalAddress.ipScope()) || (scope == ZT_IP_SCOPE_NONE) || (scope == ZT_IP_SCOPE_LOOPBACK)
|| (scope == ZT_IP_SCOPE_MULTICAST))
return;
Mutex::Lock l(m_phy_l);
p_PhySurfaceEntry& entry =
m_phy[p_PhySurfaceKey(reporter.address(), receivedOnLocalSocket, reporterPhysicalAddress, scope)];
if ((trusted) && ((cc.ticks - entry.timestampTicks) < ZT_SELFAWARENESS_ENTRY_TIMEOUT)
&& (! entry.mySurface.ipsEqual(myPhysicalAddress))) {
// Changes to external surface reported by trusted peers causes path reset in this scope
entry.mySurface = myPhysicalAddress;
entry.timestampTicks = cc.ticks;
entry.trusted = trusted;
// Erase all entries in this scope that were not reported from this remote address to prevent 'thrashing'
// due to multiple reports of endpoint change.
// Don't use 'entry' after this since hash table gets modified.
for (Map<p_PhySurfaceKey, p_PhySurfaceEntry>::iterator i(m_phy.begin()); i != m_phy.end();) {
if ((i->first.scope == scope) && (i->first.reporterPhysicalAddress != reporterPhysicalAddress))
m_phy.erase(i++);
else
++i;
}
// Reset all paths within this scope and address family
Vector<SharedPtr<Peer> > peers, rootPeers;
m_ctx.topology->allPeers(peers, rootPeers);
for (Vector<SharedPtr<Peer> >::const_iterator p(peers.begin()); p != peers.end(); ++p)
(*p)->resetWithinScope(m_ctx, cc, (InetAddress::IpScope)scope, myPhysicalAddress.as.sa.sa_family);
m_ctx.t->resettingPathsInScope(
cc,
0x9afff100,
reporter,
reporterPhysicalAddress,
entry.mySurface,
myPhysicalAddress,
scope);
}
else {
// Otherwise just update DB to use to determine external surface info
entry.mySurface = myPhysicalAddress;
entry.timestampTicks = cc.ticks;
entry.trusted = trusted;
}
}
MultiMap< unsigned int, InetAddress > SelfAwareness::externalAddresses(CallContext &cc) const
void SelfAwareness::clean(const CallContext& cc)
{
MultiMap< unsigned int, InetAddress > r;
// Count endpoints reporting each IP/port combo
Map< InetAddress, unsigned long > counts;
{
Mutex::Lock l(m_phy_l);
for (Map< p_PhySurfaceKey, p_PhySurfaceEntry >::const_iterator i(m_phy.begin()); i != m_phy.end(); ++i) {
if ((cc.ticks - i->second.timestampTicks) < ZT_SELFAWARENESS_ENTRY_TIMEOUT)
++counts[i->second.mySurface];
}
}
// Invert to create a map from count to address
for (Map< InetAddress, unsigned long >::iterator i(counts.begin()); i != counts.end(); ++i)
r.insert(std::pair< unsigned long, InetAddress >(i->second, i->first));
return r;
Mutex::Lock l(m_phy_l);
for (Map<p_PhySurfaceKey, p_PhySurfaceEntry>::iterator i(m_phy.begin()); i != m_phy.end();) {
if ((cc.ticks - i->second.timestampTicks) >= ZT_SELFAWARENESS_ENTRY_TIMEOUT)
m_phy.erase(i++);
else
++i;
}
}
} // namespace ZeroTier
MultiMap<unsigned int, InetAddress> SelfAwareness::externalAddresses(CallContext& cc) const
{
MultiMap<unsigned int, InetAddress> r;
// Count endpoints reporting each IP/port combo
Map<InetAddress, unsigned long> counts;
{
Mutex::Lock l(m_phy_l);
for (Map<p_PhySurfaceKey, p_PhySurfaceEntry>::const_iterator i(m_phy.begin()); i != m_phy.end(); ++i) {
if ((cc.ticks - i->second.timestampTicks) < ZT_SELFAWARENESS_ENTRY_TIMEOUT)
++counts[i->second.mySurface];
}
}
// Invert to create a map from count to address
for (Map<InetAddress, unsigned long>::iterator i(counts.begin()); i != counts.end(); ++i)
r.insert(std::pair<unsigned long, InetAddress>(i->second, i->first));
return r;
}
} // namespace ZeroTier

View file

@ -14,12 +14,12 @@
#ifndef ZT_SELFAWARENESS_HPP
#define ZT_SELFAWARENESS_HPP
#include "Constants.hpp"
#include "InetAddress.hpp"
#include "Containers.hpp"
#include "Address.hpp"
#include "Mutex.hpp"
#include "CallContext.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "InetAddress.hpp"
#include "Mutex.hpp"
namespace ZeroTier {
@ -31,94 +31,123 @@ class Context;
*
* Name aside, it shouldn't be capable of achieving sentience.
*/
class SelfAwareness
{
public:
explicit SelfAwareness(const Context &ctx);
class SelfAwareness {
public:
explicit SelfAwareness(const Context& ctx);
/**
* Called when a remote peer informs us of our external network address
*
* @param reporter Identity of reporting peer
* @param receivedOnLocalAddress Local address on which report was received
* @param reporterPhysicalAddress Physical address that reporting peer seems to have
* @param myPhysicalAddress Physical address that peer says we have
* @param trusted True if this peer is trusted as an authority to inform us of external address changes
*/
void iam(const CallContext &cc, const Identity &reporter, int64_t receivedOnLocalSocket, const InetAddress &reporterPhysicalAddress, const InetAddress &myPhysicalAddress, bool trusted);
/**
* Called when a remote peer informs us of our external network address
*
* @param reporter Identity of reporting peer
* @param receivedOnLocalAddress Local address on which report was received
* @param reporterPhysicalAddress Physical address that reporting peer seems to have
* @param myPhysicalAddress Physical address that peer says we have
* @param trusted True if this peer is trusted as an authority to inform us of external address changes
*/
void
iam(const CallContext& cc,
const Identity& reporter,
int64_t receivedOnLocalSocket,
const InetAddress& reporterPhysicalAddress,
const InetAddress& myPhysicalAddress,
bool trusted);
/**
* Clean up database periodically
*/
void clean(const CallContext &cc);
/**
* Clean up database periodically
*/
void clean(const CallContext& cc);
/**
* Get external address consensus, which is the statistical "mode" of external addresses.
*
* @return Map of count to IP/port representing how many endpoints reported each address
*/
MultiMap< unsigned int, InetAddress > externalAddresses(CallContext &cc) const;
/**
* Get external address consensus, which is the statistical "mode" of external addresses.
*
* @return Map of count to IP/port representing how many endpoints reported each address
*/
MultiMap<unsigned int, InetAddress> externalAddresses(CallContext& cc) const;
private:
struct p_PhySurfaceKey
{
Address reporter;
int64_t receivedOnLocalSocket;
InetAddress reporterPhysicalAddress;
InetAddress::IpScope scope;
private:
struct p_PhySurfaceKey {
Address reporter;
int64_t receivedOnLocalSocket;
InetAddress reporterPhysicalAddress;
InetAddress::IpScope scope;
ZT_INLINE p_PhySurfaceKey() noexcept
{}
ZT_INLINE p_PhySurfaceKey() noexcept
{
}
ZT_INLINE p_PhySurfaceKey(const Address &r, const int64_t rol, const InetAddress &ra, InetAddress::IpScope s) noexcept: reporter(r), receivedOnLocalSocket(rol), reporterPhysicalAddress(ra), scope(s)
{}
ZT_INLINE
p_PhySurfaceKey(const Address& r, const int64_t rol, const InetAddress& ra, InetAddress::IpScope s) noexcept
: reporter(r)
, receivedOnLocalSocket(rol)
, reporterPhysicalAddress(ra)
, scope(s)
{
}
ZT_INLINE unsigned long hashCode() const noexcept
{ return ((unsigned long)reporter.toInt() + (unsigned long)receivedOnLocalSocket + (unsigned long)scope); }
ZT_INLINE unsigned long hashCode() const noexcept
{
return ((unsigned long)reporter.toInt() + (unsigned long)receivedOnLocalSocket + (unsigned long)scope);
}
ZT_INLINE bool operator==(const p_PhySurfaceKey &k) const noexcept
{ return ((reporter == k.reporter) && (receivedOnLocalSocket == k.receivedOnLocalSocket) && (reporterPhysicalAddress == k.reporterPhysicalAddress) && (scope == k.scope)); }
ZT_INLINE bool operator==(const p_PhySurfaceKey& k) const noexcept
{
return (
(reporter == k.reporter) && (receivedOnLocalSocket == k.receivedOnLocalSocket)
&& (reporterPhysicalAddress == k.reporterPhysicalAddress) && (scope == k.scope));
}
ZT_INLINE bool operator!=(const p_PhySurfaceKey &k) const noexcept
{ return (!(*this == k)); }
ZT_INLINE bool operator!=(const p_PhySurfaceKey& k) const noexcept
{
return (! (*this == k));
}
ZT_INLINE bool operator<(const p_PhySurfaceKey &k) const noexcept
{
if (reporter < k.reporter) {
return true;
} else if (reporter == k.reporter) {
if (receivedOnLocalSocket < k.receivedOnLocalSocket) {
return true;
} else if (receivedOnLocalSocket == k.receivedOnLocalSocket) {
if (reporterPhysicalAddress < k.reporterPhysicalAddress) {
return true;
} else if (reporterPhysicalAddress == k.reporterPhysicalAddress) {
return scope < k.scope;
}
}
}
return false;
}
};
ZT_INLINE bool operator<(const p_PhySurfaceKey& k) const noexcept
{
if (reporter < k.reporter) {
return true;
}
else if (reporter == k.reporter) {
if (receivedOnLocalSocket < k.receivedOnLocalSocket) {
return true;
}
else if (receivedOnLocalSocket == k.receivedOnLocalSocket) {
if (reporterPhysicalAddress < k.reporterPhysicalAddress) {
return true;
}
else if (reporterPhysicalAddress == k.reporterPhysicalAddress) {
return scope < k.scope;
}
}
}
return false;
}
};
struct p_PhySurfaceEntry
{
InetAddress mySurface;
int64_t timestampTicks;
bool trusted;
struct p_PhySurfaceEntry {
InetAddress mySurface;
int64_t timestampTicks;
bool trusted;
ZT_INLINE p_PhySurfaceEntry() noexcept: mySurface(), timestampTicks(0), trusted(false)
{}
ZT_INLINE p_PhySurfaceEntry() noexcept
: mySurface()
, timestampTicks(0)
, trusted(false)
{
}
ZT_INLINE p_PhySurfaceEntry(const InetAddress &a, const int64_t t) noexcept: mySurface(a), timestampTicks(t), trusted(false)
{}
};
ZT_INLINE p_PhySurfaceEntry(const InetAddress& a, const int64_t t) noexcept
: mySurface(a)
, timestampTicks(t)
, trusted(false)
{
}
};
const Context &m_ctx;
Map< p_PhySurfaceKey, p_PhySurfaceEntry > m_phy;
Mutex m_phy_l;
const Context& m_ctx;
Map<p_PhySurfaceKey, p_PhySurfaceEntry> m_phy;
Mutex m_phy_l;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -25,173 +25,205 @@ namespace ZeroTier {
* Classes must have an atomic<int> field called __refCount and set this class
* as a friend to be used with this.
*/
template< typename T >
class SharedPtr : public TriviallyCopyable
{
public:
ZT_INLINE SharedPtr() noexcept: m_ptr(nullptr)
{}
template <typename T> class SharedPtr : public TriviallyCopyable {
public:
ZT_INLINE SharedPtr() noexcept : m_ptr(nullptr)
{
}
explicit ZT_INLINE SharedPtr(T *obj) noexcept: m_ptr(obj)
{ if (likely(obj != nullptr)) const_cast<std::atomic< int > *>(&(obj->__refCount))->fetch_add(1, std::memory_order_acquire); }
explicit ZT_INLINE SharedPtr(T* obj) noexcept : m_ptr(obj)
{
if (likely(obj != nullptr))
const_cast<std::atomic<int>*>(&(obj->__refCount))->fetch_add(1, std::memory_order_acquire);
}
ZT_INLINE SharedPtr(const SharedPtr &sp) noexcept: m_ptr(sp.m_acquire())
{}
ZT_INLINE SharedPtr(const SharedPtr& sp) noexcept : m_ptr(sp.m_acquire())
{
}
ZT_INLINE ~SharedPtr()
{ m_release(); }
ZT_INLINE ~SharedPtr()
{
m_release();
}
ZT_INLINE SharedPtr &operator=(const SharedPtr &sp)
{
if (likely(m_ptr != sp.m_ptr)) {
T *const p = sp.m_acquire();
m_release();
m_ptr = p;
}
return *this;
}
ZT_INLINE SharedPtr& operator=(const SharedPtr& sp)
{
if (likely(m_ptr != sp.m_ptr)) {
T* const p = sp.m_acquire();
m_release();
m_ptr = p;
}
return *this;
}
ZT_INLINE void set(T *ptr) noexcept
{
m_release();
m_ptr = ptr;
const_cast<std::atomic< int > *>(&(ptr->__refCount))->fetch_add(1, std::memory_order_acquire);
}
ZT_INLINE void set(T* ptr) noexcept
{
m_release();
m_ptr = ptr;
const_cast<std::atomic<int>*>(&(ptr->__refCount))->fetch_add(1, std::memory_order_acquire);
}
/**
* Swap with another pointer.
*
* This is much faster than using assignment as it requires no atomic
* operations at all.
*
* @param with Pointer to swap with
*/
ZT_INLINE void swap(SharedPtr &with) noexcept
{
T *const tmp = m_ptr;
m_ptr = with.m_ptr;
with.m_ptr = tmp;
}
/**
* Swap with another pointer.
*
* This is much faster than using assignment as it requires no atomic
* operations at all.
*
* @param with Pointer to swap with
*/
ZT_INLINE void swap(SharedPtr& with) noexcept
{
T* const tmp = m_ptr;
m_ptr = with.m_ptr;
with.m_ptr = tmp;
}
/**
* Move pointer from another SharedPtr to this one, zeroing target.
*
* This is faster than assignment as it saves one atomically synchronized
* increment. If this pointer is null there are no atomic operations at
* all.
*
* @param from Source pointer; will be changed to NULL
*/
ZT_INLINE void move(SharedPtr &from)
{
m_release();
m_ptr = from.m_ptr;
from.m_ptr = nullptr;
}
/**
* Move pointer from another SharedPtr to this one, zeroing target.
*
* This is faster than assignment as it saves one atomically synchronized
* increment. If this pointer is null there are no atomic operations at
* all.
*
* @param from Source pointer; will be changed to NULL
*/
ZT_INLINE void move(SharedPtr& from)
{
m_release();
m_ptr = from.m_ptr;
from.m_ptr = nullptr;
}
ZT_INLINE operator bool() const noexcept
{ return (m_ptr != nullptr); }
ZT_INLINE operator bool() const noexcept
{
return (m_ptr != nullptr);
}
ZT_INLINE T &operator*() const noexcept
{ return *m_ptr; }
ZT_INLINE T& operator*() const noexcept
{
return *m_ptr;
}
ZT_INLINE T *operator->() const noexcept
{ return m_ptr; }
ZT_INLINE T* operator->() const noexcept
{
return m_ptr;
}
/**
* @return Raw pointer to held object
*/
ZT_INLINE T *ptr() const noexcept
{ return m_ptr; }
/**
* @return Raw pointer to held object
*/
ZT_INLINE T* ptr() const noexcept
{
return m_ptr;
}
/**
* Set this pointer to NULL
*/
ZT_INLINE void zero()
{
m_release();
m_ptr = nullptr;
}
/**
* Set this pointer to NULL
*/
ZT_INLINE void zero()
{
m_release();
m_ptr = nullptr;
}
/**
* Return held object and null this pointer if reference count is one.
*
* If the reference count is one, the reference count is changed to zero
* and the object is returned. It is not deleted; the caller must do that
* if that is desired. This pointer will be set to NULL. If the reference
* count is not one nothing happens and NULL is returned.
*
* @return Pointer or NULL if more than one reference
*/
ZT_INLINE T *weakGC()
{
if (likely(m_ptr != nullptr)) {
int one = 1;
if (const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->compare_exchange_strong(one, (int)0)) {
T *const ptr = m_ptr;
m_ptr = nullptr;
return ptr;
} else {
return nullptr;
}
} else {
return nullptr;
}
}
/**
* Return held object and null this pointer if reference count is one.
*
* If the reference count is one, the reference count is changed to zero
* and the object is returned. It is not deleted; the caller must do that
* if that is desired. This pointer will be set to NULL. If the reference
* count is not one nothing happens and NULL is returned.
*
* @return Pointer or NULL if more than one reference
*/
ZT_INLINE T* weakGC()
{
if (likely(m_ptr != nullptr)) {
int one = 1;
if (const_cast<std::atomic<int>*>(&(m_ptr->__refCount))->compare_exchange_strong(one, (int)0)) {
T* const ptr = m_ptr;
m_ptr = nullptr;
return ptr;
}
else {
return nullptr;
}
}
else {
return nullptr;
}
}
ZT_INLINE unsigned long hashCode() const noexcept
{ return (unsigned long)((uintptr_t)m_ptr + (uintptr_t)Utils::hash32((uint32_t)m_ptr)); }
ZT_INLINE unsigned long hashCode() const noexcept
{
return (unsigned long)((uintptr_t)m_ptr + (uintptr_t)Utils::hash32((uint32_t)m_ptr));
}
ZT_INLINE bool operator==(const SharedPtr &sp) const noexcept
{ return (m_ptr == sp.m_ptr); }
ZT_INLINE bool operator==(const SharedPtr& sp) const noexcept
{
return (m_ptr == sp.m_ptr);
}
ZT_INLINE bool operator!=(const SharedPtr &sp) const noexcept
{ return (m_ptr != sp.m_ptr); }
ZT_INLINE bool operator!=(const SharedPtr& sp) const noexcept
{
return (m_ptr != sp.m_ptr);
}
ZT_INLINE bool operator>(const SharedPtr &sp) const noexcept
{ return (reinterpret_cast<const uint8_t *>(m_ptr) > reinterpret_cast<const uint8_t *>(sp.m_ptr)); }
ZT_INLINE bool operator>(const SharedPtr& sp) const noexcept
{
return (reinterpret_cast<const uint8_t*>(m_ptr) > reinterpret_cast<const uint8_t*>(sp.m_ptr));
}
ZT_INLINE bool operator<(const SharedPtr &sp) const noexcept
{ return (reinterpret_cast<const uint8_t *>(m_ptr) < reinterpret_cast<const uint8_t *>(sp.m_ptr)); }
ZT_INLINE bool operator<(const SharedPtr& sp) const noexcept
{
return (reinterpret_cast<const uint8_t*>(m_ptr) < reinterpret_cast<const uint8_t*>(sp.m_ptr));
}
ZT_INLINE bool operator>=(const SharedPtr &sp) const noexcept
{ return (reinterpret_cast<const uint8_t *>(m_ptr) >= reinterpret_cast<const uint8_t *>(sp.m_ptr)); }
ZT_INLINE bool operator>=(const SharedPtr& sp) const noexcept
{
return (reinterpret_cast<const uint8_t*>(m_ptr) >= reinterpret_cast<const uint8_t*>(sp.m_ptr));
}
ZT_INLINE bool operator<=(const SharedPtr &sp) const noexcept
{ return (reinterpret_cast<const uint8_t *>(m_ptr) <= reinterpret_cast<const uint8_t *>(sp.m_ptr)); }
ZT_INLINE bool operator<=(const SharedPtr& sp) const noexcept
{
return (reinterpret_cast<const uint8_t*>(m_ptr) <= reinterpret_cast<const uint8_t*>(sp.m_ptr));
}
private:
ZT_INLINE T *m_acquire() const noexcept
{
if (likely(m_ptr != nullptr))
const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->fetch_add(1, std::memory_order_acquire);
return m_ptr;
}
private:
ZT_INLINE T* m_acquire() const noexcept
{
if (likely(m_ptr != nullptr))
const_cast<std::atomic<int>*>(&(m_ptr->__refCount))->fetch_add(1, std::memory_order_acquire);
return m_ptr;
}
ZT_INLINE void m_release() const noexcept
{
if (likely(m_ptr != nullptr)) {
if (unlikely(const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->fetch_sub(1, std::memory_order_release) <= 1))
delete m_ptr;
}
}
ZT_INLINE void m_release() const noexcept
{
if (likely(m_ptr != nullptr)) {
if (unlikely(
const_cast<std::atomic<int>*>(&(m_ptr->__refCount))->fetch_sub(1, std::memory_order_release) <= 1))
delete m_ptr;
}
}
T *m_ptr;
T* m_ptr;
};
} // namespace ZeroTier
} // namespace ZeroTier
// Augment std::swap to speed up some operations with SharedPtr.
namespace std {
template< typename T >
ZT_MAYBE_UNUSED ZT_INLINE void swap(ZeroTier::SharedPtr< T > &a, ZeroTier::SharedPtr< T > &b) noexcept
{ a.swap(b); }
template <typename T> ZT_MAYBE_UNUSED ZT_INLINE void swap(ZeroTier::SharedPtr<T>& a, ZeroTier::SharedPtr<T>& b) noexcept
{
a.swap(b);
}
template< typename T >
ZT_MAYBE_UNUSED ZT_INLINE void move(ZeroTier::SharedPtr< T > &a, ZeroTier::SharedPtr< T > &b) noexcept
{ a.move(b); }
template <typename T> ZT_MAYBE_UNUSED ZT_INLINE void move(ZeroTier::SharedPtr<T>& a, ZeroTier::SharedPtr<T>& b) noexcept
{
a.move(b);
}
} // namespace std
} // namespace std
#endif

View file

@ -29,47 +29,54 @@
* This can be used in place of Mutex to lock things that are extremely fast
* to access. It should be used very sparingly.
*/
class Spinlock
{
public:
/**
* Pause current thread using whatever methods might be available
*
* This is broken out since it's used in a few other places where
* spinlock-like constructions are used.
*/
ZT_INLINE static void pause() noexcept
{
class Spinlock {
public:
/**
* Pause current thread using whatever methods might be available
*
* This is broken out since it's used in a few other places where
* spinlock-like constructions are used.
*/
ZT_INLINE static void pause() noexcept
{
#ifdef ZT_ARCH_X64
_mm_pause();
_mm_pause();
#endif
#ifdef __LINUX__
sched_yield();
sched_yield();
#else
std::this_thread::yield();
std::this_thread::yield();
#endif
}
}
ZT_INLINE Spinlock() noexcept: m_locked(false)
{}
ZT_INLINE Spinlock() noexcept : m_locked(false)
{
}
ZT_INLINE void lock() noexcept
{
if (unlikely(m_locked.test_and_set(std::memory_order_acquire))) {
do {
Spinlock::pause();
} while (m_locked.test_and_set(std::memory_order_acquire));
}
}
ZT_INLINE void lock() noexcept
{
if (unlikely(m_locked.test_and_set(std::memory_order_acquire))) {
do {
Spinlock::pause();
} while (m_locked.test_and_set(std::memory_order_acquire));
}
}
ZT_INLINE void unlock() noexcept
{ m_locked.clear(std::memory_order_release); }
ZT_INLINE void unlock() noexcept
{
m_locked.clear(std::memory_order_release);
}
private:
ZT_INLINE Spinlock(const Spinlock &) noexcept {}
ZT_INLINE const Spinlock &operator=(const Spinlock &) noexcept { return *this; }
private:
ZT_INLINE Spinlock(const Spinlock&) noexcept
{
}
ZT_INLINE const Spinlock& operator=(const Spinlock&) noexcept
{
return *this;
}
std::atomic_flag m_locked;
std::atomic_flag m_locked;
};
#endif

View file

@ -14,69 +14,90 @@
#ifndef ZT_STORE_HPP
#define ZT_STORE_HPP
#include "CallContext.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Context.hpp"
#include "CallContext.hpp"
namespace ZeroTier {
/**
* Wrapper around API callbacks for data store
*/
class Store
{
public:
ZT_INLINE Store(const Context &ctx): m_ctx(ctx)
{}
class Store {
public:
ZT_INLINE Store(const Context& ctx) : m_ctx(ctx)
{
}
/**
* Get a state object
*
* @param type Object type
* @param id Object ID
* @param idSize Size of object ID in qwords
* @return Data or empty vector if not found
*/
ZT_INLINE Vector< uint8_t > get(const CallContext &cc, ZT_StateObjectType type, const uint64_t *const id, unsigned int idSize) const
{
Vector< uint8_t > dv;
void *data = nullptr;
void (*freeFunc)(void *) = nullptr;
const int r = m_ctx.cb.stateGetFunction(reinterpret_cast<ZT_Node *>(m_ctx.node), m_ctx.uPtr, cc.tPtr, type, id, idSize, &data, &freeFunc);
if (r > 0)
dv.assign(reinterpret_cast<const uint8_t *>(data), reinterpret_cast<const uint8_t *>(data) + r);
if ((data) && (freeFunc))
freeFunc(data);
return dv;
}
/**
* Get a state object
*
* @param type Object type
* @param id Object ID
* @param idSize Size of object ID in qwords
* @return Data or empty vector if not found
*/
ZT_INLINE Vector<uint8_t>
get(const CallContext& cc, ZT_StateObjectType type, const uint64_t* const id, unsigned int idSize) const
{
Vector<uint8_t> dv;
void* data = nullptr;
void (*freeFunc)(void*) = nullptr;
const int r = m_ctx.cb.stateGetFunction(
reinterpret_cast<ZT_Node*>(m_ctx.node),
m_ctx.uPtr,
cc.tPtr,
type,
id,
idSize,
&data,
&freeFunc);
if (r > 0)
dv.assign(reinterpret_cast<const uint8_t*>(data), reinterpret_cast<const uint8_t*>(data) + r);
if ((data) && (freeFunc))
freeFunc(data);
return dv;
}
/**
* Store a state object
*
* @param type Object type
* @param id Object ID
* @param idSize Size of object ID in qwords
* @param data Data to store
* @param len Length of data
*/
ZT_INLINE void put(const CallContext &cc, ZT_StateObjectType type, const uint64_t *const id, const unsigned int idSize, const void *const data, const unsigned int len) noexcept
{ m_ctx.cb.statePutFunction(reinterpret_cast<ZT_Node *>(this), m_ctx.uPtr, cc.tPtr, type, id, idSize, data, (int)len); }
/**
* Store a state object
*
* @param type Object type
* @param id Object ID
* @param idSize Size of object ID in qwords
* @param data Data to store
* @param len Length of data
*/
ZT_INLINE void
put(const CallContext& cc,
ZT_StateObjectType type,
const uint64_t* const id,
const unsigned int idSize,
const void* const data,
const unsigned int len) noexcept
{
m_ctx.cb
.statePutFunction(reinterpret_cast<ZT_Node*>(this), m_ctx.uPtr, cc.tPtr, type, id, idSize, data, (int)len);
}
/**
* Erase a state object from the object store
*
* @param type Object type
* @param id Object ID
* @param idSize Size of object ID in qwords
*/
ZT_INLINE void erase(const CallContext &cc, ZT_StateObjectType type, const uint64_t *const id, const unsigned int idSize) noexcept
{ m_ctx.cb.statePutFunction(reinterpret_cast<ZT_Node *>(this), m_ctx.uPtr, cc.tPtr, type, id, idSize, nullptr, -1); }
/**
* Erase a state object from the object store
*
* @param type Object type
* @param id Object ID
* @param idSize Size of object ID in qwords
*/
ZT_INLINE void
erase(const CallContext& cc, ZT_StateObjectType type, const uint64_t* const id, const unsigned int idSize) noexcept
{
m_ctx.cb.statePutFunction(reinterpret_cast<ZT_Node*>(this), m_ctx.uPtr, cc.tPtr, type, id, idSize, nullptr, -1);
}
private:
const Context &m_ctx;
private:
const Context& m_ctx;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -14,130 +14,139 @@
#ifndef ZT_SYMMETRICKEY_HPP
#define ZT_SYMMETRICKEY_HPP
#include "Constants.hpp"
#include "Utils.hpp"
#include "AES.hpp"
#include "Address.hpp"
#include "Constants.hpp"
#include "Utils.hpp"
namespace ZeroTier {
/**
* Container for symmetric keys and ciphers initialized with them.
*/
class SymmetricKey
{
public:
/**
* Construct an uninitialized key (init() must be called)
*/
ZT_INLINE SymmetricKey():
m_secret(),
m_ts(-1),
m_initialNonce(0),
m_cipher(),
m_nonce(0)
{}
class SymmetricKey {
public:
/**
* Construct an uninitialized key (init() must be called)
*/
ZT_INLINE SymmetricKey() : m_secret(), m_ts(-1), m_initialNonce(0), m_cipher(), m_nonce(0)
{
}
/**
* Construct a new symmetric key
*
* SECURITY: the MSB of the nonce is always 0 because this bit is set to 0
* or 1 depending on which "direction" data is moving. See nextMessage().
*
* @param ts Key timestamp
* @param key Key (must be 48 bytes / 384 bits)
*/
ZT_INLINE SymmetricKey(const int64_t ts, const void *const key) noexcept:
m_secret(key),
m_ts(ts),
m_initialNonce(Utils::getSecureRandomU64() >> 1U),
m_cipher(key),
m_nonce(m_initialNonce)
{}
/**
* Construct a new symmetric key
*
* SECURITY: the MSB of the nonce is always 0 because this bit is set to 0
* or 1 depending on which "direction" data is moving. See nextMessage().
*
* @param ts Key timestamp
* @param key Key (must be 48 bytes / 384 bits)
*/
ZT_INLINE SymmetricKey(const int64_t ts, const void* const key) noexcept
: m_secret(key)
, m_ts(ts)
, m_initialNonce(Utils::getSecureRandomU64() >> 1U)
, m_cipher(key)
, m_nonce(m_initialNonce)
{
}
ZT_INLINE SymmetricKey(const SymmetricKey &k) noexcept:
m_secret(k.m_secret),
m_ts(k.m_ts),
m_initialNonce(k.m_initialNonce),
m_cipher(k.m_secret.data),
m_nonce(k.m_nonce.load(std::memory_order_relaxed))
{}
ZT_INLINE SymmetricKey(const SymmetricKey& k) noexcept
: m_secret(k.m_secret)
, m_ts(k.m_ts)
, m_initialNonce(k.m_initialNonce)
, m_cipher(k.m_secret.data)
, m_nonce(k.m_nonce.load(std::memory_order_relaxed))
{
}
ZT_INLINE ~SymmetricKey() noexcept
{ Utils::burn(m_secret.data, ZT_SYMMETRIC_KEY_SIZE); }
ZT_INLINE ~SymmetricKey() noexcept
{
Utils::burn(m_secret.data, ZT_SYMMETRIC_KEY_SIZE);
}
ZT_INLINE SymmetricKey &operator=(const SymmetricKey &k) noexcept
{
m_secret = k.m_secret;
m_ts = k.m_ts;
m_initialNonce = k.m_initialNonce;
m_cipher.init(k.m_secret.data);
m_nonce.store(k.m_nonce.load(std::memory_order_relaxed), std::memory_order_relaxed);
return *this;
}
ZT_INLINE SymmetricKey& operator=(const SymmetricKey& k) noexcept
{
m_secret = k.m_secret;
m_ts = k.m_ts;
m_initialNonce = k.m_initialNonce;
m_cipher.init(k.m_secret.data);
m_nonce.store(k.m_nonce.load(std::memory_order_relaxed), std::memory_order_relaxed);
return *this;
}
/**
* Initialize or re-initialize a symmetric key
*
* @param ts Key timestamp
* @param key Key (must be 48 bytes / 384 bits)
*/
ZT_INLINE void init(const int64_t ts, const void *const key) noexcept
{
Utils::copy< ZT_SYMMETRIC_KEY_SIZE >(m_secret.data, key);
m_ts = ts;
m_initialNonce = Utils::getSecureRandomU64() >> 1U;
m_cipher.init(key);
m_nonce.store(m_initialNonce, std::memory_order_relaxed);
}
/**
* Initialize or re-initialize a symmetric key
*
* @param ts Key timestamp
* @param key Key (must be 48 bytes / 384 bits)
*/
ZT_INLINE void init(const int64_t ts, const void* const key) noexcept
{
Utils::copy<ZT_SYMMETRIC_KEY_SIZE>(m_secret.data, key);
m_ts = ts;
m_initialNonce = Utils::getSecureRandomU64() >> 1U;
m_cipher.init(key);
m_nonce.store(m_initialNonce, std::memory_order_relaxed);
}
/**
* Advance usage counter by one and return the next IV / packet ID.
*
* @param sender Sending ZeroTier address
* @param receiver Receiving ZeroTier address
* @return Next unique IV for next message
*/
ZT_INLINE uint64_t nextMessage(const Address sender, const Address receiver) noexcept
{ return m_nonce.fetch_add(1, std::memory_order_relaxed) ^ (((uint64_t)(sender > receiver)) << 63U); }
/**
* Advance usage counter by one and return the next IV / packet ID.
*
* @param sender Sending ZeroTier address
* @param receiver Receiving ZeroTier address
* @return Next unique IV for next message
*/
ZT_INLINE uint64_t nextMessage(const Address sender, const Address receiver) noexcept
{
return m_nonce.fetch_add(1, std::memory_order_relaxed) ^ (((uint64_t)(sender > receiver)) << 63U);
}
/**
* Get the number of times this key has been used.
*
* This is used along with the key's initial timestamp to determine key age
* for ephemeral key rotation.
*
* @return Number of times nextMessage() has been called since object creation
*/
ZT_INLINE uint64_t odometer() const noexcept
{ return m_nonce.load(std::memory_order_relaxed) - m_initialNonce; }
/**
* Get the number of times this key has been used.
*
* This is used along with the key's initial timestamp to determine key age
* for ephemeral key rotation.
*
* @return Number of times nextMessage() has been called since object creation
*/
ZT_INLINE uint64_t odometer() const noexcept
{
return m_nonce.load(std::memory_order_relaxed) - m_initialNonce;
}
/**
* @return Key creation timestamp or -1 if this is a long-lived key
*/
ZT_INLINE int64_t timestamp() const noexcept
{ return m_ts; }
/**
* @return Key creation timestamp or -1 if this is a long-lived key
*/
ZT_INLINE int64_t timestamp() const noexcept
{
return m_ts;
}
/**
* @return 48-byte / 384-bit secret key
*/
ZT_INLINE const uint8_t *key() const noexcept
{ return m_secret.data; }
/**
* @return 48-byte / 384-bit secret key
*/
ZT_INLINE const uint8_t* key() const noexcept
{
return m_secret.data;
}
/**
* @return AES cipher (already initialized with secret key)
*/
ZT_INLINE const AES &aes() const noexcept
{ return m_cipher; }
/**
* @return AES cipher (already initialized with secret key)
*/
ZT_INLINE const AES& aes() const noexcept
{
return m_cipher;
}
private:
Blob< ZT_SYMMETRIC_KEY_SIZE > m_secret;
int64_t m_ts;
uint64_t m_initialNonce;
AES m_cipher;
std::atomic< uint64_t > m_nonce;
private:
Blob<ZT_SYMMETRIC_KEY_SIZE> m_secret;
int64_t m_ts;
uint64_t m_initialNonce;
AES m_cipher;
std::atomic<uint64_t> m_nonce;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -15,71 +15,71 @@
namespace ZeroTier {
bool TagCredential::sign(const Identity &signer) noexcept
bool TagCredential::sign(const Identity& signer) noexcept
{
uint8_t buf[ZT_TAG_MARSHAL_SIZE_MAX];
if (signer.hasPrivate()) {
m_signedBy = signer.address();
m_signatureLength = signer.sign(buf, (unsigned int)marshal(buf, true), m_signature, sizeof(m_signature));
return true;
}
return false;
uint8_t buf[ZT_TAG_MARSHAL_SIZE_MAX];
if (signer.hasPrivate()) {
m_signedBy = signer.address();
m_signatureLength = signer.sign(buf, (unsigned int)marshal(buf, true), m_signature, sizeof(m_signature));
return true;
}
return false;
}
int TagCredential::marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX], bool forSign) const noexcept
{
int p = 0;
if (forSign) {
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
Utils::storeBigEndian< uint64_t >(data + p, m_networkId);
Utils::storeBigEndian< uint64_t >(data + p + 8, (uint64_t)m_ts);
Utils::storeBigEndian< uint32_t >(data + p + 16, m_id);
Utils::storeBigEndian< uint32_t >(data + p + 20, m_value);
p += 24;
m_issuedTo.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
m_signedBy.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
if (!forSign) {
data[p++] = 1;
Utils::storeBigEndian< uint16_t >(data + p, (uint16_t)m_signatureLength);
p += 2;
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
}
data[p++] = 0;
data[p++] = 0;
if (forSign) {
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
return p;
int p = 0;
if (forSign) {
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
Utils::storeBigEndian<uint64_t>(data + p, m_networkId);
Utils::storeBigEndian<uint64_t>(data + p + 8, (uint64_t)m_ts);
Utils::storeBigEndian<uint32_t>(data + p + 16, m_id);
Utils::storeBigEndian<uint32_t>(data + p + 20, m_value);
p += 24;
m_issuedTo.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
m_signedBy.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
if (! forSign) {
data[p++] = 1;
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_signatureLength);
p += 2;
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
}
data[p++] = 0;
data[p++] = 0;
if (forSign) {
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
return p;
}
int TagCredential::unmarshal(const uint8_t *data, int len) noexcept
int TagCredential::unmarshal(const uint8_t* data, int len) noexcept
{
if (len < 37)
return -1;
m_networkId = Utils::loadBigEndian< uint64_t >(data);
m_ts = (int64_t)Utils::loadBigEndian< uint64_t >(data + 8);
m_id = Utils::loadBigEndian< uint32_t >(data + 16);
m_value = Utils::loadBigEndian< uint32_t >(data + 20);
m_issuedTo.setTo(data + 24);
m_signedBy.setTo(data + 29);
// 1 byte reserved
m_signatureLength = Utils::loadBigEndian< uint16_t >(data + 35);
int p = 37 + (int)m_signatureLength;
if ((m_signatureLength > ZT_SIGNATURE_BUFFER_SIZE) || (p > len))
return -1;
Utils::copy(m_signature, data + p, m_signatureLength);
if ((p + 2) > len)
return -1;
p += 2 + Utils::loadBigEndian< uint16_t >(data + p);
if (p > len)
return -1;
return p;
if (len < 37)
return -1;
m_networkId = Utils::loadBigEndian<uint64_t>(data);
m_ts = (int64_t)Utils::loadBigEndian<uint64_t>(data + 8);
m_id = Utils::loadBigEndian<uint32_t>(data + 16);
m_value = Utils::loadBigEndian<uint32_t>(data + 20);
m_issuedTo.setTo(data + 24);
m_signedBy.setTo(data + 29);
// 1 byte reserved
m_signatureLength = Utils::loadBigEndian<uint16_t>(data + 35);
int p = 37 + (int)m_signatureLength;
if ((m_signatureLength > ZT_SIGNATURE_BUFFER_SIZE) || (p > len))
return -1;
Utils::copy(m_signature, data + p, m_signatureLength);
if ((p + 2) > len)
return -1;
p += 2 + Utils::loadBigEndian<uint16_t>(data + p);
if (p > len)
return -1;
return p;
}
} // namespace ZeroTier
} // namespace ZeroTier

View file

@ -14,10 +14,10 @@
#ifndef ZT_TAG_HPP
#define ZT_TAG_HPP
#include "Address.hpp"
#include "C25519.hpp"
#include "Constants.hpp"
#include "Credential.hpp"
#include "C25519.hpp"
#include "Address.hpp"
#include "Identity.hpp"
#define ZT_TAG_MARSHAL_SIZE_MAX (8 + 8 + 4 + 4 + 5 + 5 + 1 + 2 + ZT_SIGNATURE_BUFFER_SIZE + 2)
@ -43,137 +43,191 @@ class Context;
* Unlike capabilities tags are signed only by the issuer and are never
* transferable.
*/
class TagCredential : public Credential
{
friend class Credential;
class TagCredential : public Credential {
friend class Credential;
public:
static constexpr ZT_CredentialType credentialType() noexcept
{ return ZT_CREDENTIAL_TYPE_TAG; }
public:
static constexpr ZT_CredentialType credentialType() noexcept
{
return ZT_CREDENTIAL_TYPE_TAG;
}
ZT_INLINE TagCredential() noexcept
{ memoryZero(this); }
ZT_INLINE TagCredential() noexcept
{
memoryZero(this);
}
/**
* @param nwid Network ID
* @param ts Timestamp
* @param issuedTo Address to which this tag was issued
* @param id Tag ID
* @param value Tag value
*/
ZT_INLINE TagCredential(const uint64_t nwid, const int64_t ts, const Address &issuedTo, const uint32_t id, const uint32_t value) noexcept:
m_id(id),
m_value(value),
m_networkId(nwid),
m_ts(ts),
m_issuedTo(issuedTo),
m_signedBy(),
m_signatureLength(0)
{}
/**
* @param nwid Network ID
* @param ts Timestamp
* @param issuedTo Address to which this tag was issued
* @param id Tag ID
* @param value Tag value
*/
ZT_INLINE TagCredential(
const uint64_t nwid,
const int64_t ts,
const Address& issuedTo,
const uint32_t id,
const uint32_t value) noexcept
: m_id(id)
, m_value(value)
, m_networkId(nwid)
, m_ts(ts)
, m_issuedTo(issuedTo)
, m_signedBy()
, m_signatureLength(0)
{
}
ZT_INLINE uint32_t id() const noexcept
{ return m_id; }
ZT_INLINE uint32_t id() const noexcept
{
return m_id;
}
ZT_INLINE const uint32_t &value() const noexcept
{ return m_value; }
ZT_INLINE const uint32_t& value() const noexcept
{
return m_value;
}
ZT_INLINE uint64_t networkId() const noexcept
{ return m_networkId; }
ZT_INLINE uint64_t networkId() const noexcept
{
return m_networkId;
}
ZT_INLINE int64_t timestamp() const noexcept
{ return m_ts; }
ZT_INLINE int64_t timestamp() const noexcept
{
return m_ts;
}
ZT_INLINE int64_t revision() const noexcept
{ return m_ts; }
ZT_INLINE int64_t revision() const noexcept
{
return m_ts;
}
ZT_INLINE const Address &issuedTo() const noexcept
{ return m_issuedTo; }
ZT_INLINE const Address& issuedTo() const noexcept
{
return m_issuedTo;
}
ZT_INLINE const Address &signer() const noexcept
{ return m_signedBy; }
ZT_INLINE const Address& signer() const noexcept
{
return m_signedBy;
}
ZT_INLINE const uint8_t *signature() const noexcept
{ return m_signature; }
ZT_INLINE const uint8_t* signature() const noexcept
{
return m_signature;
}
ZT_INLINE unsigned int signatureLength() const noexcept
{ return m_signatureLength; }
ZT_INLINE unsigned int signatureLength() const noexcept
{
return m_signatureLength;
}
/**
* Sign this tag
*
* @param signer Signing identity, must have private key
* @return True if signature was successful
*/
bool sign(const Identity &signer) noexcept;
/**
* Sign this tag
*
* @param signer Signing identity, must have private key
* @return True if signature was successful
*/
bool sign(const Identity& signer) noexcept;
/**
* Check this tag's signature
*
* @param RR Runtime environment to allow identity lookup for signedBy
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
*/
ZT_INLINE Credential::VerifyResult verify(const Context &ctx, const CallContext &cc) const noexcept
{ return s_verify(ctx, cc, *this); }
/**
* Check this tag's signature
*
* @param RR Runtime environment to allow identity lookup for signedBy
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
*/
ZT_INLINE Credential::VerifyResult verify(const Context& ctx, const CallContext& cc) const noexcept
{
return s_verify(ctx, cc, *this);
}
static constexpr int marshalSizeMax() noexcept
{ return ZT_TAG_MARSHAL_SIZE_MAX; }
static constexpr int marshalSizeMax() noexcept
{
return ZT_TAG_MARSHAL_SIZE_MAX;
}
int marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX], bool forSign = false) const noexcept;
int marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX], bool forSign = false) const noexcept;
int unmarshal(const uint8_t *data, int len) noexcept;
int unmarshal(const uint8_t* data, int len) noexcept;
// Provides natural sort order by ID
ZT_INLINE bool operator<(const TagCredential &t) const noexcept
{ return (m_id < t.m_id); }
// Provides natural sort order by ID
ZT_INLINE bool operator<(const TagCredential& t) const noexcept
{
return (m_id < t.m_id);
}
ZT_INLINE bool operator==(const TagCredential &t) const noexcept
{ return (memcmp(this, &t, sizeof(TagCredential)) == 0); }
ZT_INLINE bool operator==(const TagCredential& t) const noexcept
{
return (memcmp(this, &t, sizeof(TagCredential)) == 0);
}
ZT_INLINE bool operator!=(const TagCredential &t) const noexcept
{ return (memcmp(this, &t, sizeof(TagCredential)) != 0); }
ZT_INLINE bool operator!=(const TagCredential& t) const noexcept
{
return (memcmp(this, &t, sizeof(TagCredential)) != 0);
}
// For searching sorted arrays or lists of Tags by ID
struct IdComparePredicate
{
ZT_INLINE bool operator()(const TagCredential &a, const TagCredential &b) const noexcept
{ return (a.id() < b.id()); }
// For searching sorted arrays or lists of Tags by ID
struct IdComparePredicate {
ZT_INLINE bool operator()(const TagCredential& a, const TagCredential& b) const noexcept
{
return (a.id() < b.id());
}
ZT_INLINE bool operator()(const uint32_t a, const TagCredential &b) const noexcept
{ return (a < b.id()); }
ZT_INLINE bool operator()(const uint32_t a, const TagCredential& b) const noexcept
{
return (a < b.id());
}
ZT_INLINE bool operator()(const TagCredential &a, const uint32_t b) const noexcept
{ return (a.id() < b); }
ZT_INLINE bool operator()(const TagCredential& a, const uint32_t b) const noexcept
{
return (a.id() < b);
}
ZT_INLINE bool operator()(const TagCredential *a, const TagCredential *b) const noexcept
{ return (a->id() < b->id()); }
ZT_INLINE bool operator()(const TagCredential* a, const TagCredential* b) const noexcept
{
return (a->id() < b->id());
}
ZT_INLINE bool operator()(const TagCredential *a, const TagCredential &b) const noexcept
{ return (a->id() < b.id()); }
ZT_INLINE bool operator()(const TagCredential* a, const TagCredential& b) const noexcept
{
return (a->id() < b.id());
}
ZT_INLINE bool operator()(const TagCredential &a, const TagCredential *b) const noexcept
{ return (a.id() < b->id()); }
ZT_INLINE bool operator()(const TagCredential& a, const TagCredential* b) const noexcept
{
return (a.id() < b->id());
}
ZT_INLINE bool operator()(const uint32_t a, const TagCredential *b) const noexcept
{ return (a < b->id()); }
ZT_INLINE bool operator()(const uint32_t a, const TagCredential* b) const noexcept
{
return (a < b->id());
}
ZT_INLINE bool operator()(const TagCredential *a, const uint32_t b) const noexcept
{ return (a->id() < b); }
ZT_INLINE bool operator()(const TagCredential* a, const uint32_t b) const noexcept
{
return (a->id() < b);
}
ZT_INLINE bool operator()(const uint32_t a, const uint32_t b) const noexcept
{ return (a < b); }
};
ZT_INLINE bool operator()(const uint32_t a, const uint32_t b) const noexcept
{
return (a < b);
}
};
private:
uint32_t m_id;
uint32_t m_value;
uint64_t m_networkId;
int64_t m_ts;
Address m_issuedTo;
Address m_signedBy;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
private:
uint32_t m_id;
uint32_t m_value;
uint64_t m_networkId;
int64_t m_ts;
Address m_issuedTo;
Address m_signedBy;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

File diff suppressed because it is too large Load diff

View file

@ -47,7 +47,7 @@
#include <stdio.h>
#ifndef ZT_T_PRINTF
#define ZT_T_PRINTF(fmt,...) printf((fmt),##__VA_ARGS__),fflush(stdout)
#define ZT_T_PRINTF(fmt, ...) printf((fmt), ##__VA_ARGS__), fflush(stdout)
#endif
#ifdef __cplusplus
@ -57,7 +57,7 @@ extern "C" {
/**
* Test platform, compiler behavior, utility functions, and core classes
*/
const char *ZTT_general();
const char* ZTT_general();
/**
* Test crypto using test vectors and simple scenarios
@ -65,17 +65,17 @@ const char *ZTT_general();
* This is not an absolutely exhaustive test, just a sanity check to make sure
* crypto routines are basically working.
*/
const char *ZTT_crypto();
const char* ZTT_crypto();
/**
* Run benchmarks of cryptographic routines and common constructions
*/
const char *ZTT_benchmarkCrypto();
const char* ZTT_benchmarkCrypto();
#ifdef __cplusplus
}
#endif
#endif // ZT_ENABLE_TESTS
#endif // ZT_ENABLE_TESTS
#endif

View file

@ -16,14 +16,14 @@
#include "Constants.hpp"
#include "Containers.hpp"
#include "SharedPtr.hpp"
#include "Network.hpp"
#include "SharedPtr.hpp"
#include "Spinlock.hpp"
// The number of buckets must be a power of two.
#define ZT_TINYMAP_BUCKETS 1024
#define ZT_TINYMAP_BUCKETS_MASK (ZT_TINYMAP_BUCKETS - 1)
#define ZT_TINYMAP_BUCKETS_MASK (ZT_TINYMAP_BUCKETS - 1)
#define ZT_TINYMAP_LOCKED_POINTER (~((uintptr_t)0))
namespace ZeroTier {
@ -36,117 +36,129 @@ namespace ZeroTier {
* fast lookup, with lookups sometimes requiring only a few instructions. It
* uses a "lock free" (actually pointer-as-spinlock) design.
*/
template< typename V >
class TinyMap
{
private:
typedef Vector< std::pair< uint64_t, V > > EV;
template <typename V> class TinyMap {
private:
typedef Vector<std::pair<uint64_t, V> > EV;
public:
ZT_INLINE TinyMap()
{}
public:
ZT_INLINE TinyMap()
{
}
ZT_INLINE ~TinyMap()
{ this->clear(); }
ZT_INLINE ~TinyMap()
{
this->clear();
}
ZT_INLINE void clear()
{
for(unsigned int i=0; i < ZT_TINYMAP_BUCKETS; ++i) {
for(;;) {
const uintptr_t vptr = m_buckets[i].exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
if (vptr != 0)
delete reinterpret_cast<EV *>(vptr);
m_buckets[i].store(0, std::memory_order_release);
break;
} else {
Spinlock::pause();
}
}
}
}
ZT_INLINE void clear()
{
for (unsigned int i = 0; i < ZT_TINYMAP_BUCKETS; ++i) {
for (;;) {
const uintptr_t vptr = m_buckets[i].exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
if (vptr != 0)
delete reinterpret_cast<EV*>(vptr);
m_buckets[i].store(0, std::memory_order_release);
break;
}
else {
Spinlock::pause();
}
}
}
}
ZT_INLINE V get(const uint64_t key) noexcept
{
V tmp;
std::atomic<uintptr_t> &bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKETS_MASK];
for(;;) {
const uintptr_t vptr = bucket.exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
if (likely(vptr != 0)) {
for(typename EV::const_iterator n(reinterpret_cast<const EV *>(vptr)->begin()); n != reinterpret_cast<const EV *>(vptr)->end(); ++n) {
if (likely(n->first == key)) {
tmp = n->second;
break;
}
}
}
bucket.store(vptr, std::memory_order_release);
return tmp;
} else {
Spinlock::pause();
}
}
}
ZT_INLINE V get(const uint64_t key) noexcept
{
V tmp;
std::atomic<uintptr_t>& bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKETS_MASK];
for (;;) {
const uintptr_t vptr = bucket.exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
if (likely(vptr != 0)) {
for (typename EV::const_iterator n(reinterpret_cast<const EV*>(vptr)->begin());
n != reinterpret_cast<const EV*>(vptr)->end();
++n) {
if (likely(n->first == key)) {
tmp = n->second;
break;
}
}
}
bucket.store(vptr, std::memory_order_release);
return tmp;
}
else {
Spinlock::pause();
}
}
}
ZT_INLINE void set(const uint64_t key, const V &value)
{
std::atomic<uintptr_t> &bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKETS_MASK];
for(;;) {
uintptr_t vptr = bucket.exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
if (vptr == 0) {
vptr = reinterpret_cast<uintptr_t>(new EV());
} else {
for (typename EV::iterator n(reinterpret_cast<EV *>(vptr)->begin()); n != reinterpret_cast<EV *>(vptr)->end(); ++n) {
if (n->first == key) {
n->second = value;
bucket.store(vptr, std::memory_order_release);
return;
}
}
}
reinterpret_cast<EV *>(vptr)->push_back(std::pair< uint64_t, V >(key, value));
bucket.store(vptr, std::memory_order_release);
return;
} else {
Spinlock::pause();
}
}
}
ZT_INLINE void set(const uint64_t key, const V& value)
{
std::atomic<uintptr_t>& bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKETS_MASK];
for (;;) {
uintptr_t vptr = bucket.exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
if (vptr == 0) {
vptr = reinterpret_cast<uintptr_t>(new EV());
}
else {
for (typename EV::iterator n(reinterpret_cast<EV*>(vptr)->begin());
n != reinterpret_cast<EV*>(vptr)->end();
++n) {
if (n->first == key) {
n->second = value;
bucket.store(vptr, std::memory_order_release);
return;
}
}
}
reinterpret_cast<EV*>(vptr)->push_back(std::pair<uint64_t, V>(key, value));
bucket.store(vptr, std::memory_order_release);
return;
}
else {
Spinlock::pause();
}
}
}
ZT_INLINE void erase(const uint64_t key)
{
std::atomic<uintptr_t> &bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKETS_MASK];
for(;;) {
uintptr_t vptr = bucket.exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
if (likely(vptr != 0)) {
for (typename EV::iterator n(reinterpret_cast<EV *>(vptr)->begin()); n != reinterpret_cast<EV *>(vptr)->end(); ++n) {
if (n->first == key) {
reinterpret_cast<EV *>(vptr)->erase(n);
break;
}
}
if (reinterpret_cast<EV *>(vptr)->empty()) {
delete reinterpret_cast<EV *>(vptr);
vptr = 0;
}
}
bucket.store(vptr, std::memory_order_release);
return;
} else {
Spinlock::pause();
}
}
}
ZT_INLINE void erase(const uint64_t key)
{
std::atomic<uintptr_t>& bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKETS_MASK];
for (;;) {
uintptr_t vptr = bucket.exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
if (likely(vptr != 0)) {
for (typename EV::iterator n(reinterpret_cast<EV*>(vptr)->begin());
n != reinterpret_cast<EV*>(vptr)->end();
++n) {
if (n->first == key) {
reinterpret_cast<EV*>(vptr)->erase(n);
break;
}
}
if (reinterpret_cast<EV*>(vptr)->empty()) {
delete reinterpret_cast<EV*>(vptr);
vptr = 0;
}
}
bucket.store(vptr, std::memory_order_release);
return;
}
else {
Spinlock::pause();
}
}
}
private:
std::atomic<uintptr_t> m_buckets[ZT_TINYMAP_BUCKETS];
private:
std::atomic<uintptr_t> m_buckets[ZT_TINYMAP_BUCKETS];
};
static_assert((ZT_TINYMAP_BUCKETS % (sizeof(uintptr_t) * 8)) == 0, "ZT_TINYMAP_BUCKETS is not a power of two");
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -12,244 +12,255 @@
/****/
#include "Topology.hpp"
#include "Defaults.hpp"
#include "TrustStore.hpp"
#include "Locator.hpp"
#include "TrustStore.hpp"
namespace ZeroTier {
Topology::Topology(const Context &ctx, const CallContext &cc) :
m_ctx(ctx)
{}
SharedPtr< Peer > Topology::add(const CallContext &cc, const SharedPtr< Peer > &peer)
Topology::Topology(const Context& ctx, const CallContext& cc) : m_ctx(ctx)
{
RWMutex::Lock _l(m_peers_l);
SharedPtr< Peer > &hp = m_peers[peer->address()];
if (hp)
return hp;
m_loadCached(cc, peer->address(), hp);
if (hp)
return hp;
hp = peer;
return peer;
}
void Topology::allPeers(Vector< SharedPtr< Peer > > &allPeers, Vector< SharedPtr< Peer > > &rootPeers) const
SharedPtr<Peer> Topology::add(const CallContext& cc, const SharedPtr<Peer>& peer)
{
allPeers.clear();
{
RWMutex::RLock l(m_peers_l);
allPeers.reserve(m_peers.size());
for (Map< Address, SharedPtr< Peer > >::const_iterator i(m_peers.begin()); i != m_peers.end(); ++i)
allPeers.push_back(i->second);
}
{
Mutex::Lock l(m_roots_l);
rootPeers = m_roots;
}
RWMutex::Lock _l(m_peers_l);
SharedPtr<Peer>& hp = m_peers[peer->address()];
if (hp)
return hp;
m_loadCached(cc, peer->address(), hp);
if (hp)
return hp;
hp = peer;
return peer;
}
void Topology::doPeriodicTasks(const CallContext &cc)
void Topology::allPeers(Vector<SharedPtr<Peer> >& allPeers, Vector<SharedPtr<Peer> >& rootPeers) const
{
// Get a list of root peer pointer addresses for filtering during peer cleanup.
Vector< uintptr_t > rootLookup;
{
Mutex::Lock l(m_roots_l);
m_rankRoots();
rootLookup.reserve(m_roots.size());
for (Vector< SharedPtr< Peer > >::const_iterator r(m_roots.begin()); r != m_roots.end(); ++r)
rootLookup.push_back((uintptr_t)r->ptr());
}
std::sort(rootLookup.begin(), rootLookup.end());
// Cleaning of peers and paths uses a two pass method to avoid write locking
// m_peers or m_paths for any significant amount of time. This avoids pauses
// on nodes with large numbers of peers or paths.
{
Vector< Address > toDelete;
{
RWMutex::RLock l1(m_peers_l);
for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end(); ++i) {
// TODO: also delete if the peer has not exchanged meaningful communication in a while, such as a network frame or non-trivial control packet.
if (((cc.ticks - i->second->lastReceive()) > ZT_PEER_ALIVE_TIMEOUT) && (!std::binary_search(rootLookup.begin(), rootLookup.end(), reinterpret_cast<uintptr_t>(i->second.ptr()))))
toDelete.push_back(i->first);
}
}
if (!toDelete.empty()) {
ZT_SPEW("garbage collecting %u offline or stale peer objects", (unsigned int)toDelete.size());
for (Vector< Address >::iterator i(toDelete.begin()); i != toDelete.end(); ++i) {
SharedPtr< Peer > toSave;
{
RWMutex::Lock l1(m_peers_l);
const Map< Address, SharedPtr< Peer > >::iterator p(m_peers.find(*i));
if (p != m_peers.end()) {
p->second.swap(toSave);
m_peers.erase(p);
}
}
if (toSave)
toSave->save(m_ctx, cc);
}
}
}
{
Vector< Path * > toDelete;
{
RWMutex::Lock l1(m_paths_l);
for (Map< Path::Key, SharedPtr< Path > >::iterator i(m_paths.begin()); i != m_paths.end();) {
Path *const d = i->second.weakGC();
if (likely(d == nullptr)) {
++i;
} else {
m_paths.erase(i++);
try {
toDelete.push_back(d);
} catch (...) {
delete d;
}
}
}
}
if (!toDelete.empty()) {
for (Vector< Path * >::iterator i(toDelete.begin()); i != toDelete.end(); ++i)
delete *i;
ZT_SPEW("garbage collected %u orphaned paths", (unsigned int)toDelete.size());
}
}
allPeers.clear();
{
RWMutex::RLock l(m_peers_l);
allPeers.reserve(m_peers.size());
for (Map<Address, SharedPtr<Peer> >::const_iterator i(m_peers.begin()); i != m_peers.end(); ++i)
allPeers.push_back(i->second);
}
{
Mutex::Lock l(m_roots_l);
rootPeers = m_roots;
}
}
void Topology::trustStoreChanged(const CallContext &cc)
void Topology::doPeriodicTasks(const CallContext& cc)
{
Map< Identity, SharedPtr< const Locator > > roots(m_ctx.ts->roots());
// Get a list of root peer pointer addresses for filtering during peer cleanup.
Vector<uintptr_t> rootLookup;
{
Mutex::Lock l(m_roots_l);
m_rankRoots();
rootLookup.reserve(m_roots.size());
for (Vector<SharedPtr<Peer> >::const_iterator r(m_roots.begin()); r != m_roots.end(); ++r)
rootLookup.push_back((uintptr_t)r->ptr());
}
std::sort(rootLookup.begin(), rootLookup.end());
Vector< SharedPtr< Peer > > newRootList;
newRootList.reserve(roots.size());
// Cleaning of peers and paths uses a two pass method to avoid write locking
// m_peers or m_paths for any significant amount of time. This avoids pauses
// on nodes with large numbers of peers or paths.
{
Vector<Address> toDelete;
{
RWMutex::RLock l1(m_peers_l);
for (Map<Address, SharedPtr<Peer> >::iterator i(m_peers.begin()); i != m_peers.end(); ++i) {
// TODO: also delete if the peer has not exchanged meaningful communication in a while, such as a
// network frame or non-trivial control packet.
if (((cc.ticks - i->second->lastReceive()) > ZT_PEER_ALIVE_TIMEOUT)
&& (! std::binary_search(
rootLookup.begin(),
rootLookup.end(),
reinterpret_cast<uintptr_t>(i->second.ptr()))))
toDelete.push_back(i->first);
}
}
if (! toDelete.empty()) {
ZT_SPEW("garbage collecting %u offline or stale peer objects", (unsigned int)toDelete.size());
for (Vector<Address>::iterator i(toDelete.begin()); i != toDelete.end(); ++i) {
SharedPtr<Peer> toSave;
{
RWMutex::Lock l1(m_peers_l);
const Map<Address, SharedPtr<Peer> >::iterator p(m_peers.find(*i));
if (p != m_peers.end()) {
p->second.swap(toSave);
m_peers.erase(p);
}
}
if (toSave)
toSave->save(m_ctx, cc);
}
}
}
for (Map< Identity, SharedPtr< const Locator > >::const_iterator r(roots.begin()); r != roots.end(); ++r) {
SharedPtr< Peer > root(this->peer(cc, r->first.address(), true));
if (!root) {
root.set(new Peer());
if (root->init(m_ctx, cc, r->first)) {
root = this->add(cc, root);
} else {
root.zero();
}
}
if (root) {
newRootList.push_back(root);
if (r->second)
root->setLocator(r->second, true);
}
}
{
Mutex::Lock l(m_roots_l);
m_roots.swap(newRootList);
m_rankRoots();
}
{
Vector<Path*> toDelete;
{
RWMutex::Lock l1(m_paths_l);
for (Map<Path::Key, SharedPtr<Path> >::iterator i(m_paths.begin()); i != m_paths.end();) {
Path* const d = i->second.weakGC();
if (likely(d == nullptr)) {
++i;
}
else {
m_paths.erase(i++);
try {
toDelete.push_back(d);
}
catch (...) {
delete d;
}
}
}
}
if (! toDelete.empty()) {
for (Vector<Path*>::iterator i(toDelete.begin()); i != toDelete.end(); ++i)
delete *i;
ZT_SPEW("garbage collected %u orphaned paths", (unsigned int)toDelete.size());
}
}
}
void Topology::saveAll(const CallContext &cc)
void Topology::trustStoreChanged(const CallContext& cc)
{
RWMutex::RLock l(m_peers_l);
for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end(); ++i)
i->second->save(m_ctx, cc);
Map<Identity, SharedPtr<const Locator> > roots(m_ctx.ts->roots());
Vector<SharedPtr<Peer> > newRootList;
newRootList.reserve(roots.size());
for (Map<Identity, SharedPtr<const Locator> >::const_iterator r(roots.begin()); r != roots.end(); ++r) {
SharedPtr<Peer> root(this->peer(cc, r->first.address(), true));
if (! root) {
root.set(new Peer());
if (root->init(m_ctx, cc, r->first)) {
root = this->add(cc, root);
}
else {
root.zero();
}
}
if (root) {
newRootList.push_back(root);
if (r->second)
root->setLocator(r->second, true);
}
}
{
Mutex::Lock l(m_roots_l);
m_roots.swap(newRootList);
m_rankRoots();
}
}
struct p_RootRankingComparisonOperator
void Topology::saveAll(const CallContext& cc)
{
ZT_INLINE bool operator()(const SharedPtr< Peer > &a, const SharedPtr< Peer > &b) const noexcept
{
// Sort roots first in order of which root has spoken most recently, but
// only at a resolution of ZT_PATH_KEEPALIVE_PERIOD/2 units of time. This
// means that living roots that seem responsive are ranked the same. Then
// they're sorted in descending order of latency so that the apparently
// fastest root is ranked first.
const int64_t alr = a->lastReceive() / (ZT_PATH_KEEPALIVE_PERIOD / 2);
const int64_t blr = b->lastReceive() / (ZT_PATH_KEEPALIVE_PERIOD / 2);
if (alr < blr) {
return true;
} else if (blr == alr) {
const int bb = b->latency();
if (bb < 0)
return true;
return bb < a->latency();
}
return false;
}
RWMutex::RLock l(m_peers_l);
for (Map<Address, SharedPtr<Peer> >::iterator i(m_peers.begin()); i != m_peers.end(); ++i)
i->second->save(m_ctx, cc);
}
struct p_RootRankingComparisonOperator {
ZT_INLINE bool operator()(const SharedPtr<Peer>& a, const SharedPtr<Peer>& b) const noexcept
{
// Sort roots first in order of which root has spoken most recently, but
// only at a resolution of ZT_PATH_KEEPALIVE_PERIOD/2 units of time. This
// means that living roots that seem responsive are ranked the same. Then
// they're sorted in descending order of latency so that the apparently
// fastest root is ranked first.
const int64_t alr = a->lastReceive() / (ZT_PATH_KEEPALIVE_PERIOD / 2);
const int64_t blr = b->lastReceive() / (ZT_PATH_KEEPALIVE_PERIOD / 2);
if (alr < blr) {
return true;
}
else if (blr == alr) {
const int bb = b->latency();
if (bb < 0)
return true;
return bb < a->latency();
}
return false;
}
};
void Topology::m_rankRoots()
{
// assumes m_roots is locked
if (unlikely(m_roots.empty())) {
l_bestRoot.lock();
m_bestRoot.zero();
l_bestRoot.unlock();
} else {
std::sort(m_roots.begin(), m_roots.end(), p_RootRankingComparisonOperator());
l_bestRoot.lock();
m_bestRoot = m_roots.front();
l_bestRoot.unlock();
}
// assumes m_roots is locked
if (unlikely(m_roots.empty())) {
l_bestRoot.lock();
m_bestRoot.zero();
l_bestRoot.unlock();
}
else {
std::sort(m_roots.begin(), m_roots.end(), p_RootRankingComparisonOperator());
l_bestRoot.lock();
m_bestRoot = m_roots.front();
l_bestRoot.unlock();
}
}
void Topology::m_loadCached(const CallContext &cc, const Address &zta, SharedPtr< Peer > &peer)
void Topology::m_loadCached(const CallContext& cc, const Address& zta, SharedPtr<Peer>& peer)
{
// does not require any locks to be held
// does not require any locks to be held
try {
uint64_t id[2];
id[0] = zta.toInt();
id[1] = 0;
Vector< uint8_t > data(m_ctx.store->get(cc, ZT_STATE_OBJECT_PEER, id, 1));
if (data.size() > 8) {
const uint8_t *d = data.data();
int dl = (int)data.size();
try {
uint64_t id[2];
id[0] = zta.toInt();
id[1] = 0;
Vector<uint8_t> data(m_ctx.store->get(cc, ZT_STATE_OBJECT_PEER, id, 1));
if (data.size() > 8) {
const uint8_t* d = data.data();
int dl = (int)data.size();
const int64_t ts = (int64_t)Utils::loadBigEndian< uint64_t >(d);
Peer *const p = new Peer();
int n = p->unmarshal(m_ctx, cc.ticks, d + 8, dl - 8);
if (n < 0) {
delete p;
return;
}
if ((cc.ticks - ts) < ZT_PEER_GLOBAL_TIMEOUT) {
// TODO: handle many peers, same address (?)
peer.set(p);
return;
}
}
} catch (...) {
peer.zero();
}
const int64_t ts = (int64_t)Utils::loadBigEndian<uint64_t>(d);
Peer* const p = new Peer();
int n = p->unmarshal(m_ctx, cc.ticks, d + 8, dl - 8);
if (n < 0) {
delete p;
return;
}
if ((cc.ticks - ts) < ZT_PEER_GLOBAL_TIMEOUT) {
// TODO: handle many peers, same address (?)
peer.set(p);
return;
}
}
}
catch (...) {
peer.zero();
}
}
SharedPtr< Peer > Topology::m_peerFromCached(const CallContext &cc, const Address &zta)
SharedPtr<Peer> Topology::m_peerFromCached(const CallContext& cc, const Address& zta)
{
SharedPtr< Peer > p;
m_loadCached(cc, zta, p);
if (p) {
RWMutex::Lock l(m_peers_l);
SharedPtr< Peer > &hp = m_peers[zta];
if (hp)
return hp;
hp = p;
}
return p;
SharedPtr<Peer> p;
m_loadCached(cc, zta, p);
if (p) {
RWMutex::Lock l(m_peers_l);
SharedPtr<Peer>& hp = m_peers[zta];
if (hp)
return hp;
hp = p;
}
return p;
}
SharedPtr< Path > Topology::m_newPath(const int64_t l, const InetAddress &r, const Path::Key &k)
SharedPtr<Path> Topology::m_newPath(const int64_t l, const InetAddress& r, const Path::Key& k)
{
SharedPtr< Path > p(new Path(l, r));
RWMutex::Lock lck(m_paths_l);
SharedPtr< Path > &p2 = m_paths[k];
if (p2)
return p2;
p2 = p;
return p;
SharedPtr<Path> p(new Path(l, r));
RWMutex::Lock lck(m_paths_l);
SharedPtr<Path>& p2 = m_paths[k];
if (p2)
return p2;
p2 = p;
return p;
}
} // namespace ZeroTier
} // namespace ZeroTier

View file

@ -14,21 +14,21 @@
#ifndef ZT_TOPOLOGY_HPP
#define ZT_TOPOLOGY_HPP
#include "Constants.hpp"
#include "Address.hpp"
#include "Identity.hpp"
#include "Peer.hpp"
#include "Path.hpp"
#include "Mutex.hpp"
#include "InetAddress.hpp"
#include "SharedPtr.hpp"
#include "ScopedPtr.hpp"
#include "Fingerprint.hpp"
#include "FCV.hpp"
#include "Certificate.hpp"
#include "Containers.hpp"
#include "Spinlock.hpp"
#include "CallContext.hpp"
#include "Certificate.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "FCV.hpp"
#include "Fingerprint.hpp"
#include "Identity.hpp"
#include "InetAddress.hpp"
#include "Mutex.hpp"
#include "Path.hpp"
#include "Peer.hpp"
#include "ScopedPtr.hpp"
#include "SharedPtr.hpp"
#include "Spinlock.hpp"
namespace ZeroTier {
@ -37,136 +37,135 @@ class Context;
/**
* Database of network topology
*/
class Topology
{
public:
Topology(const Context &ctx, const CallContext &cc);
class Topology {
public:
Topology(const Context& ctx, const CallContext& cc);
/**
* Add peer to database
*
* If there's already a peer with this address, the existing peer is
* returned. Otherwise the new peer is added and returned.
*
* @param peer Peer to add
* @return New or existing peer
*/
SharedPtr< Peer > add(const CallContext &cc, const SharedPtr< Peer > &peer);
/**
* Add peer to database
*
* If there's already a peer with this address, the existing peer is
* returned. Otherwise the new peer is added and returned.
*
* @param peer Peer to add
* @return New or existing peer
*/
SharedPtr<Peer> add(const CallContext& cc, const SharedPtr<Peer>& peer);
/**
* Get a peer from its address
*
* @param zta ZeroTier address of peer
* @param loadFromCached If false do not load from cache if not in memory (default: true)
* @return Peer or NULL if not found
*/
ZT_INLINE SharedPtr< Peer > peer(const CallContext &cc, const Address &zta, const bool loadFromCached = true)
{
{
RWMutex::RLock l(m_peers_l);
Map< Address, SharedPtr< Peer > >::const_iterator ap(m_peers.find(zta));
if (likely(ap != m_peers.end()))
return ap->second;
}
if (loadFromCached)
return m_peerFromCached(cc, zta);
return SharedPtr< Peer >();
}
/**
* Get a peer from its address
*
* @param zta ZeroTier address of peer
* @param loadFromCached If false do not load from cache if not in memory (default: true)
* @return Peer or NULL if not found
*/
ZT_INLINE SharedPtr<Peer> peer(const CallContext& cc, const Address& zta, const bool loadFromCached = true)
{
{
RWMutex::RLock l(m_peers_l);
Map<Address, SharedPtr<Peer> >::const_iterator ap(m_peers.find(zta));
if (likely(ap != m_peers.end()))
return ap->second;
}
if (loadFromCached)
return m_peerFromCached(cc, zta);
return SharedPtr<Peer>();
}
/**
* Get a Path object for a given local and remote physical address, creating if needed
*
* @param l Local socket
* @param r Remote address
* @return Pointer to canonicalized Path object or NULL on error
*/
ZT_INLINE SharedPtr< Path > path(const int64_t l, const InetAddress &r)
{
const Path::Key k(r);
{
RWMutex::RLock lck(m_paths_l);
Map< Path::Key, SharedPtr< Path > >::const_iterator p(m_paths.find(k));
if (likely(p != m_paths.end()))
return p->second;
}
return m_newPath(l, r, k);
}
/**
* Get a Path object for a given local and remote physical address, creating if needed
*
* @param l Local socket
* @param r Remote address
* @return Pointer to canonicalized Path object or NULL on error
*/
ZT_INLINE SharedPtr<Path> path(const int64_t l, const InetAddress& r)
{
const Path::Key k(r);
{
RWMutex::RLock lck(m_paths_l);
Map<Path::Key, SharedPtr<Path> >::const_iterator p(m_paths.find(k));
if (likely(p != m_paths.end()))
return p->second;
}
return m_newPath(l, r, k);
}
/**
* Get current best root
*
* @return Root peer or nullptr if none
*/
ZT_INLINE SharedPtr< Peer > root()
{
l_bestRoot.lock(); // spinlock
SharedPtr< Peer > r(m_bestRoot);
l_bestRoot.unlock();
return r;
}
/**
* Get current best root
*
* @return Root peer or nullptr if none
*/
ZT_INLINE SharedPtr<Peer> root()
{
l_bestRoot.lock(); // spinlock
SharedPtr<Peer> r(m_bestRoot);
l_bestRoot.unlock();
return r;
}
/**
* Get current best root by setting a result parameter
*
* @param root Set to best root or nullptr if none
*/
ZT_INLINE void root(SharedPtr< Peer > &root)
{
l_bestRoot.lock(); // spinlock
root = m_bestRoot;
l_bestRoot.unlock();
}
/**
* Get current best root by setting a result parameter
*
* @param root Set to best root or nullptr if none
*/
ZT_INLINE void root(SharedPtr<Peer>& root)
{
l_bestRoot.lock(); // spinlock
root = m_bestRoot;
l_bestRoot.unlock();
}
/**
* @param allPeers Vector to fill with all current peers
* @param rootPeers Vector to fill with peers that are roots
*/
void allPeers(Vector< SharedPtr< Peer > > &allPeers, Vector< SharedPtr< Peer > > &rootPeers) const;
/**
* @param allPeers Vector to fill with all current peers
* @param rootPeers Vector to fill with peers that are roots
*/
void allPeers(Vector<SharedPtr<Peer> >& allPeers, Vector<SharedPtr<Peer> >& rootPeers) const;
/**
* Do periodic tasks such as database cleanup, cert cleanup, root ranking, etc.
*/
void doPeriodicTasks(const CallContext &cc);
/**
* Do periodic tasks such as database cleanup, cert cleanup, root ranking, etc.
*/
void doPeriodicTasks(const CallContext& cc);
/**
* Rank root servers in descending order of quality
*/
ZT_INLINE void rankRoots(const CallContext &cc)
{
Mutex::Lock l(m_roots_l);
m_rankRoots();
}
/**
* Rank root servers in descending order of quality
*/
ZT_INLINE void rankRoots(const CallContext& cc)
{
Mutex::Lock l(m_roots_l);
m_rankRoots();
}
/**
* Perform internal updates based on changes in the trust store
*/
void trustStoreChanged(const CallContext &cc);
/**
* Perform internal updates based on changes in the trust store
*/
void trustStoreChanged(const CallContext& cc);
/**
* Save all currently known peers to data store
*/
void saveAll(const CallContext &cc);
/**
* Save all currently known peers to data store
*/
void saveAll(const CallContext& cc);
private:
void m_rankRoots();
void m_loadCached(const CallContext &cc, const Address &zta, SharedPtr< Peer > &peer);
SharedPtr< Peer > m_peerFromCached(const CallContext &cc, const Address &zta);
SharedPtr< Path > m_newPath(int64_t l, const InetAddress &r, const Path::Key &k);
private:
void m_rankRoots();
void m_loadCached(const CallContext& cc, const Address& zta, SharedPtr<Peer>& peer);
SharedPtr<Peer> m_peerFromCached(const CallContext& cc, const Address& zta);
SharedPtr<Path> m_newPath(int64_t l, const InetAddress& r, const Path::Key& k);
const Context &m_ctx;
const Context& m_ctx;
Vector< SharedPtr< Peer > > m_roots;
Map< Address, SharedPtr< Peer > > m_peers;
Map< Path::Key, SharedPtr< Path > > m_paths;
Vector<SharedPtr<Peer> > m_roots;
Map<Address, SharedPtr<Peer> > m_peers;
Map<Path::Key, SharedPtr<Path> > m_paths;
RWMutex m_peers_l; // m_peers
RWMutex m_paths_l; // m_paths
Mutex m_roots_l; // m_roots
RWMutex m_peers_l; // m_peers
RWMutex m_paths_l; // m_paths
Mutex m_roots_l; // m_roots
SharedPtr< Peer > m_bestRoot;
Spinlock l_bestRoot;
SharedPtr<Peer> m_bestRoot;
Spinlock l_bestRoot;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -12,280 +12,285 @@
/****/
#include "Trace.hpp"
#include "Context.hpp"
#include "FCV.hpp"
#include "InetAddress.hpp"
#include "Node.hpp"
#include "Peer.hpp"
#include "InetAddress.hpp"
#include "FCV.hpp"
// NOTE: packet IDs are always handled in network byte order, so no need to convert them.
namespace ZeroTier {
Trace::Trace(const Context &ctx) :
m_ctx(ctx),
m_traceFlags(0)
{}
void Trace::unexpectedError(
const CallContext &cc,
uint32_t codeLocation,
const char *message,
...)
Trace::Trace(const Context& ctx) : m_ctx(ctx), m_traceFlags(0)
{
FCV< uint8_t, 4096 > buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_UNEXPECTED_ERROR);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_MESSAGE, message);
buf.push_back(0);
m_ctx.node->postEvent(cc.tPtr, ZT_EVENT_TRACE, buf.data());
}
void Trace::unexpectedError(const CallContext& cc, uint32_t codeLocation, const char* message, ...)
{
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_UNEXPECTED_ERROR);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_MESSAGE, message);
buf.push_back(0);
m_ctx.node->postEvent(cc.tPtr, ZT_EVENT_TRACE, buf.data());
}
void Trace::m_resettingPathsInScope(
void *tPtr,
uint32_t codeLocation,
const Identity &reporter,
const InetAddress &from,
const InetAddress &oldExternal,
const InetAddress &newExternal,
ZT_InetAddress_IpScope scope)
void* tPtr,
uint32_t codeLocation,
const Identity& reporter,
const InetAddress& from,
const InetAddress& oldExternal,
const InetAddress& newExternal,
ZT_InetAddress_IpScope scope)
{
FCV< uint8_t, 4096 > buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL1_RESETTING_PATHS_IN_SCOPE);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
if (reporter)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, reporter.fingerprint());
if (from)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_TRIGGER_FROM_ENDPOINT, Endpoint(from));
if (oldExternal)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_OLD_ENDPOINT, Endpoint(oldExternal));
if (newExternal)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_NEW_ENDPOINT, Endpoint(newExternal));
Dictionary::append(buf, ZT_TRACE_FIELD_RESET_ADDRESS_SCOPE, scope);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL1_RESETTING_PATHS_IN_SCOPE);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
if (reporter)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, reporter.fingerprint());
if (from)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_TRIGGER_FROM_ENDPOINT, Endpoint(from));
if (oldExternal)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_OLD_ENDPOINT, Endpoint(oldExternal));
if (newExternal)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_NEW_ENDPOINT, Endpoint(newExternal));
Dictionary::append(buf, ZT_TRACE_FIELD_RESET_ADDRESS_SCOPE, scope);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
void Trace::m_tryingNewPath(
void *tPtr,
uint32_t codeLocation,
const Identity &trying,
const InetAddress &physicalAddress,
const InetAddress &triggerAddress,
uint64_t triggeringPacketId,
uint8_t triggeringPacketVerb,
const Identity &triggeringPeer)
void* tPtr,
uint32_t codeLocation,
const Identity& trying,
const InetAddress& physicalAddress,
const InetAddress& triggerAddress,
uint64_t triggeringPacketId,
uint8_t triggeringPacketVerb,
const Identity& triggeringPeer)
{
if ((trying)&&(physicalAddress)) {
FCV< uint8_t, 4096 > buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL1_TRYING_NEW_PATH);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, trying.fingerprint());
Dictionary::appendObject(buf, ZT_TRACE_FIELD_ENDPOINT, physicalAddress);
if (triggerAddress)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_TRIGGER_FROM_ENDPOINT, Endpoint(triggerAddress));
Dictionary::appendPacketId(buf, ZT_TRACE_FIELD_TRIGGER_FROM_PACKET_ID, triggeringPacketId);
Dictionary::append(buf, ZT_TRACE_FIELD_TRIGGER_FROM_PACKET_VERB, triggeringPacketVerb);
if (triggeringPeer)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_TRIGGER_FROM_PEER_FINGERPRINT, triggeringPeer.fingerprint());
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
if ((trying) && (physicalAddress)) {
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL1_TRYING_NEW_PATH);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, trying.fingerprint());
Dictionary::appendObject(buf, ZT_TRACE_FIELD_ENDPOINT, physicalAddress);
if (triggerAddress)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_TRIGGER_FROM_ENDPOINT, Endpoint(triggerAddress));
Dictionary::appendPacketId(buf, ZT_TRACE_FIELD_TRIGGER_FROM_PACKET_ID, triggeringPacketId);
Dictionary::append(buf, ZT_TRACE_FIELD_TRIGGER_FROM_PACKET_VERB, triggeringPacketVerb);
if (triggeringPeer)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_TRIGGER_FROM_PEER_FINGERPRINT, triggeringPeer.fingerprint());
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
}
void Trace::m_learnedNewPath(
void *tPtr,
uint32_t codeLocation,
uint64_t packetId,
const Identity &peerIdentity,
const InetAddress &physicalAddress,
const InetAddress &replaced)
void* tPtr,
uint32_t codeLocation,
uint64_t packetId,
const Identity& peerIdentity,
const InetAddress& physicalAddress,
const InetAddress& replaced)
{
if (peerIdentity) {
FCV< uint8_t, 4096 > buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL1_LEARNED_NEW_PATH);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::appendPacketId(buf, ZT_TRACE_FIELD_PACKET_ID, packetId);
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, peerIdentity.fingerprint());
if (physicalAddress)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_ENDPOINT, Endpoint(physicalAddress));
if (replaced)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_OLD_ENDPOINT, Endpoint(replaced));
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
if (peerIdentity) {
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL1_LEARNED_NEW_PATH);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::appendPacketId(buf, ZT_TRACE_FIELD_PACKET_ID, packetId);
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, peerIdentity.fingerprint());
if (physicalAddress)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_ENDPOINT, Endpoint(physicalAddress));
if (replaced)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_OLD_ENDPOINT, Endpoint(replaced));
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
}
void Trace::m_incomingPacketDropped(
void *tPtr,
uint32_t codeLocation,
uint64_t packetId,
uint64_t networkId,
const Identity &peerIdentity,
const InetAddress &physicalAddress,
uint8_t hops,
uint8_t verb,
ZT_TracePacketDropReason reason)
void* tPtr,
uint32_t codeLocation,
uint64_t packetId,
uint64_t networkId,
const Identity& peerIdentity,
const InetAddress& physicalAddress,
uint8_t hops,
uint8_t verb,
ZT_TracePacketDropReason reason)
{
FCV< uint8_t, 4096 > buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL1_INCOMING_PACKET_DROPPED);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::appendPacketId(buf, ZT_TRACE_FIELD_PACKET_ID, packetId);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
if (peerIdentity)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, peerIdentity.fingerprint());
if (physicalAddress)
Dictionary::append(buf, ZT_TRACE_FIELD_ENDPOINT, Endpoint(physicalAddress));
Dictionary::append(buf, ZT_TRACE_FIELD_PACKET_HOPS, hops);
Dictionary::append(buf, ZT_TRACE_FIELD_PACKET_VERB, verb);
Dictionary::append(buf, ZT_TRACE_FIELD_REASON, reason);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL1_INCOMING_PACKET_DROPPED);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::appendPacketId(buf, ZT_TRACE_FIELD_PACKET_ID, packetId);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
if (peerIdentity)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, peerIdentity.fingerprint());
if (physicalAddress)
Dictionary::append(buf, ZT_TRACE_FIELD_ENDPOINT, Endpoint(physicalAddress));
Dictionary::append(buf, ZT_TRACE_FIELD_PACKET_HOPS, hops);
Dictionary::append(buf, ZT_TRACE_FIELD_PACKET_VERB, verb);
Dictionary::append(buf, ZT_TRACE_FIELD_REASON, reason);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
void Trace::m_outgoingNetworkFrameDropped(
void *tPtr,
uint32_t codeLocation,
uint64_t networkId,
const MAC &sourceMac,
const MAC &destMac,
uint16_t etherType,
uint16_t frameLength,
const uint8_t *frameData,
ZT_TraceFrameDropReason reason)
void* tPtr,
uint32_t codeLocation,
uint64_t networkId,
const MAC& sourceMac,
const MAC& destMac,
uint16_t etherType,
uint16_t frameLength,
const uint8_t* frameData,
ZT_TraceFrameDropReason reason)
{
FCV< uint8_t, 4096 > buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL1_INCOMING_PACKET_DROPPED);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
Dictionary::append(buf, ZT_TRACE_FIELD_SOURCE_MAC, sourceMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_DEST_MAC, destMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_ETHERTYPE, etherType);
Dictionary::append(buf, ZT_TRACE_FIELD_FRAME_LENGTH, frameLength);
if (frameData)
Dictionary::append(buf, ZT_TRACE_FIELD_FRAME_DATA, frameData, std::min((unsigned int)64, (unsigned int)frameLength));
Dictionary::append(buf, ZT_TRACE_FIELD_REASON, reason);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL1_INCOMING_PACKET_DROPPED);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
Dictionary::append(buf, ZT_TRACE_FIELD_SOURCE_MAC, sourceMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_DEST_MAC, destMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_ETHERTYPE, etherType);
Dictionary::append(buf, ZT_TRACE_FIELD_FRAME_LENGTH, frameLength);
if (frameData)
Dictionary::append(
buf,
ZT_TRACE_FIELD_FRAME_DATA,
frameData,
std::min((unsigned int)64, (unsigned int)frameLength));
Dictionary::append(buf, ZT_TRACE_FIELD_REASON, reason);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
void Trace::m_incomingNetworkFrameDropped(
void *tPtr,
uint32_t codeLocation,
uint64_t networkId,
const MAC &sourceMac,
const MAC &destMac,
const uint16_t etherType,
const Identity &peerIdentity,
const InetAddress &physicalAddress,
uint8_t hops,
uint16_t frameLength,
const uint8_t *frameData,
uint8_t verb,
bool credentialRequestSent,
ZT_TraceFrameDropReason reason)
void* tPtr,
uint32_t codeLocation,
uint64_t networkId,
const MAC& sourceMac,
const MAC& destMac,
const uint16_t etherType,
const Identity& peerIdentity,
const InetAddress& physicalAddress,
uint8_t hops,
uint16_t frameLength,
const uint8_t* frameData,
uint8_t verb,
bool credentialRequestSent,
ZT_TraceFrameDropReason reason)
{
FCV< uint8_t, 4096 > buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL2_INCOMING_FRAME_DROPPED);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
Dictionary::append(buf, ZT_TRACE_FIELD_SOURCE_MAC, sourceMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_DEST_MAC, destMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_ETHERTYPE, etherType);
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, peerIdentity.fingerprint());
if (physicalAddress)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_ENDPOINT, Endpoint(physicalAddress));
Dictionary::append(buf, ZT_TRACE_FIELD_PACKET_HOPS, hops);
Dictionary::append(buf, ZT_TRACE_FIELD_PACKET_VERB, verb);
Dictionary::append(buf, ZT_TRACE_FIELD_FRAME_LENGTH, frameLength);
if (frameData)
Dictionary::append(buf, ZT_TRACE_FIELD_FRAME_DATA, frameData, std::min((unsigned int)64, (unsigned int)frameLength));
Dictionary::append(buf, ZT_TRACE_FIELD_FLAG_CREDENTIAL_REQUEST_SENT, credentialRequestSent);
Dictionary::append(buf, ZT_TRACE_FIELD_REASON, reason);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL2_INCOMING_FRAME_DROPPED);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
Dictionary::append(buf, ZT_TRACE_FIELD_SOURCE_MAC, sourceMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_DEST_MAC, destMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_ETHERTYPE, etherType);
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, peerIdentity.fingerprint());
if (physicalAddress)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_ENDPOINT, Endpoint(physicalAddress));
Dictionary::append(buf, ZT_TRACE_FIELD_PACKET_HOPS, hops);
Dictionary::append(buf, ZT_TRACE_FIELD_PACKET_VERB, verb);
Dictionary::append(buf, ZT_TRACE_FIELD_FRAME_LENGTH, frameLength);
if (frameData)
Dictionary::append(
buf,
ZT_TRACE_FIELD_FRAME_DATA,
frameData,
std::min((unsigned int)64, (unsigned int)frameLength));
Dictionary::append(buf, ZT_TRACE_FIELD_FLAG_CREDENTIAL_REQUEST_SENT, credentialRequestSent);
Dictionary::append(buf, ZT_TRACE_FIELD_REASON, reason);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
void Trace::m_networkConfigRequestSent(
void *tPtr,
uint32_t codeLocation,
uint64_t networkId)
void Trace::m_networkConfigRequestSent(void* tPtr, uint32_t codeLocation, uint64_t networkId)
{
FCV< uint8_t, 4096 > buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL2_NETWORK_CONFIG_REQUESTED);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL2_NETWORK_CONFIG_REQUESTED);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
void Trace::m_networkFilter(
void *tPtr,
uint32_t codeLocation,
uint64_t networkId,
const uint8_t *primaryRuleSetLog,
const uint8_t *matchingCapabilityRuleSetLog,
uint32_t matchingCapabilityId,
int64_t matchingCapabilityTimestamp,
const Address &source,
const Address &dest,
const MAC &sourceMac,
const MAC &destMac,
uint16_t frameLength,
const uint8_t *frameData,
uint16_t etherType,
uint16_t vlanId,
bool noTee,
bool inbound,
int accept)
void* tPtr,
uint32_t codeLocation,
uint64_t networkId,
const uint8_t* primaryRuleSetLog,
const uint8_t* matchingCapabilityRuleSetLog,
uint32_t matchingCapabilityId,
int64_t matchingCapabilityTimestamp,
const Address& source,
const Address& dest,
const MAC& sourceMac,
const MAC& destMac,
uint16_t frameLength,
const uint8_t* frameData,
uint16_t etherType,
uint16_t vlanId,
bool noTee,
bool inbound,
int accept)
{
FCV< uint8_t, 4096 > buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL2_NETWORK_FILTER);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
if ((primaryRuleSetLog) && (!Utils::allZero(primaryRuleSetLog, 512)))
Dictionary::append(buf, ZT_TRACE_FIELD_PRIMARY_RULE_SET_LOG, primaryRuleSetLog, 512);
if ((matchingCapabilityRuleSetLog) && (!Utils::allZero(matchingCapabilityRuleSetLog, 512)))
Dictionary::append(buf, ZT_TRACE_FIELD_MATCHING_CAPABILITY_RULE_SET_LOG, matchingCapabilityRuleSetLog, 512);
Dictionary::append(buf, ZT_TRACE_FIELD_MATCHING_CAPABILITY_ID, matchingCapabilityId);
Dictionary::append(buf, ZT_TRACE_FIELD_MATCHING_CAPABILITY_TIMESTAMP, matchingCapabilityTimestamp);
Dictionary::append(buf, ZT_TRACE_FIELD_SOURCE_ZT_ADDRESS, source);
Dictionary::append(buf, ZT_TRACE_FIELD_DEST_ZT_ADDRESS, dest);
Dictionary::append(buf, ZT_TRACE_FIELD_SOURCE_MAC, sourceMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_DEST_MAC, destMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_FRAME_LENGTH, frameLength);
if (frameData)
Dictionary::append(buf, ZT_TRACE_FIELD_FRAME_DATA, frameData, std::min((unsigned int)64, (unsigned int)frameLength));
Dictionary::append(buf, ZT_TRACE_FIELD_ETHERTYPE, etherType);
Dictionary::append(buf, ZT_TRACE_FIELD_VLAN_ID, vlanId);
Dictionary::append(buf, ZT_TRACE_FIELD_RULE_FLAG_NOTEE, noTee);
Dictionary::append(buf, ZT_TRACE_FIELD_RULE_FLAG_INBOUND, inbound);
Dictionary::append(buf, ZT_TRACE_FIELD_RULE_FLAG_ACCEPT, (int32_t)accept);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL2_NETWORK_FILTER);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
if ((primaryRuleSetLog) && (! Utils::allZero(primaryRuleSetLog, 512)))
Dictionary::append(buf, ZT_TRACE_FIELD_PRIMARY_RULE_SET_LOG, primaryRuleSetLog, 512);
if ((matchingCapabilityRuleSetLog) && (! Utils::allZero(matchingCapabilityRuleSetLog, 512)))
Dictionary::append(buf, ZT_TRACE_FIELD_MATCHING_CAPABILITY_RULE_SET_LOG, matchingCapabilityRuleSetLog, 512);
Dictionary::append(buf, ZT_TRACE_FIELD_MATCHING_CAPABILITY_ID, matchingCapabilityId);
Dictionary::append(buf, ZT_TRACE_FIELD_MATCHING_CAPABILITY_TIMESTAMP, matchingCapabilityTimestamp);
Dictionary::append(buf, ZT_TRACE_FIELD_SOURCE_ZT_ADDRESS, source);
Dictionary::append(buf, ZT_TRACE_FIELD_DEST_ZT_ADDRESS, dest);
Dictionary::append(buf, ZT_TRACE_FIELD_SOURCE_MAC, sourceMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_DEST_MAC, destMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_FRAME_LENGTH, frameLength);
if (frameData)
Dictionary::append(
buf,
ZT_TRACE_FIELD_FRAME_DATA,
frameData,
std::min((unsigned int)64, (unsigned int)frameLength));
Dictionary::append(buf, ZT_TRACE_FIELD_ETHERTYPE, etherType);
Dictionary::append(buf, ZT_TRACE_FIELD_VLAN_ID, vlanId);
Dictionary::append(buf, ZT_TRACE_FIELD_RULE_FLAG_NOTEE, noTee);
Dictionary::append(buf, ZT_TRACE_FIELD_RULE_FLAG_INBOUND, inbound);
Dictionary::append(buf, ZT_TRACE_FIELD_RULE_FLAG_ACCEPT, (int32_t)accept);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
void Trace::m_credentialRejected(
void *tPtr,
uint32_t codeLocation,
uint64_t networkId,
const Identity &identity,
uint32_t credentialId,
int64_t credentialTimestamp,
uint8_t credentialType,
ZT_TraceCredentialRejectionReason reason)
void* tPtr,
uint32_t codeLocation,
uint64_t networkId,
const Identity& identity,
uint32_t credentialId,
int64_t credentialTimestamp,
uint8_t credentialType,
ZT_TraceCredentialRejectionReason reason)
{
FCV< uint8_t, 4096 > buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL2_NETWORK_CREDENTIAL_REJECTED);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, identity.fingerprint());
Dictionary::append(buf, ZT_TRACE_FIELD_CREDENTIAL_ID, credentialId);
Dictionary::append(buf, ZT_TRACE_FIELD_CREDENTIAL_TIMESTAMP, credentialTimestamp);
Dictionary::append(buf, ZT_TRACE_FIELD_CREDENTIAL_TYPE, credentialType);
Dictionary::append(buf, ZT_TRACE_FIELD_REASON, reason);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL2_NETWORK_CREDENTIAL_REJECTED);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, identity.fingerprint());
Dictionary::append(buf, ZT_TRACE_FIELD_CREDENTIAL_ID, credentialId);
Dictionary::append(buf, ZT_TRACE_FIELD_CREDENTIAL_TIMESTAMP, credentialTimestamp);
Dictionary::append(buf, ZT_TRACE_FIELD_CREDENTIAL_TYPE, credentialType);
Dictionary::append(buf, ZT_TRACE_FIELD_REASON, reason);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
} // namespace ZeroTier
} // namespace ZeroTier

View file

@ -14,15 +14,15 @@
#ifndef ZT_TRACE_HPP
#define ZT_TRACE_HPP
#include "Constants.hpp"
#include "SharedPtr.hpp"
#include "Mutex.hpp"
#include "InetAddress.hpp"
#include "Address.hpp"
#include "MAC.hpp"
#include "Containers.hpp"
#include "Utils.hpp"
#include "CallContext.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "InetAddress.hpp"
#include "MAC.hpp"
#include "Mutex.hpp"
#include "SharedPtr.hpp"
#include "Utils.hpp"
#define ZT_TRACE_F_VL1 0x01U
#define ZT_TRACE_F_VL2 0x02U
@ -56,291 +56,333 @@ struct NetworkConfig;
* turned into constants that are semi-official and stored in a database to
* provide extra debug context.
*/
class Trace
{
public:
struct RuleResultLog : public TriviallyCopyable
{
uint8_t l[ZT_MAX_NETWORK_RULES / 2]; // ZT_MAX_NETWORK_RULES 4-bit fields
class Trace {
public:
struct RuleResultLog : public TriviallyCopyable {
uint8_t l[ZT_MAX_NETWORK_RULES / 2]; // ZT_MAX_NETWORK_RULES 4-bit fields
ZT_INLINE void log(const unsigned int rn, const uint8_t thisRuleMatches, const uint8_t thisSetMatches) noexcept
{ l[rn >> 1U] |= (((thisRuleMatches + 1U) << 2U) | (thisSetMatches + 1U)) << ((rn & 1U) << 2U); }
ZT_INLINE void log(const unsigned int rn, const uint8_t thisRuleMatches, const uint8_t thisSetMatches) noexcept
{
l[rn >> 1U] |= (((thisRuleMatches + 1U) << 2U) | (thisSetMatches + 1U)) << ((rn & 1U) << 2U);
}
ZT_INLINE void logSkipped(const unsigned int rn, const uint8_t thisSetMatches) noexcept
{ l[rn >> 1U] |= (thisSetMatches + 1U) << ((rn & 1U) << 2U); }
ZT_INLINE void logSkipped(const unsigned int rn, const uint8_t thisSetMatches) noexcept
{
l[rn >> 1U] |= (thisSetMatches + 1U) << ((rn & 1U) << 2U);
}
ZT_INLINE void clear() noexcept
{ memoryZero(this); }
};
ZT_INLINE void clear() noexcept
{
memoryZero(this);
}
};
explicit Trace(const Context &ctx);
explicit Trace(const Context& ctx);
void unexpectedError(
const CallContext &cc,
uint32_t codeLocation,
const char *message,
...);
void unexpectedError(const CallContext& cc, uint32_t codeLocation, const char* message, ...);
ZT_INLINE void resettingPathsInScope(
const CallContext &cc,
const uint32_t codeLocation,
const Identity &reporter,
const InetAddress &from,
const InetAddress &oldExternal,
const InetAddress &newExternal,
const InetAddress::IpScope scope)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL1) != 0))
m_resettingPathsInScope(cc.tPtr, codeLocation, reporter, from, oldExternal, newExternal, scope);
}
ZT_INLINE void resettingPathsInScope(
const CallContext& cc,
const uint32_t codeLocation,
const Identity& reporter,
const InetAddress& from,
const InetAddress& oldExternal,
const InetAddress& newExternal,
const InetAddress::IpScope scope)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL1) != 0))
m_resettingPathsInScope(cc.tPtr, codeLocation, reporter, from, oldExternal, newExternal, scope);
}
ZT_INLINE void tryingNewPath(
const CallContext &cc,
const uint32_t codeLocation,
const Identity &trying,
const InetAddress &physicalAddress,
const InetAddress &triggerAddress,
uint64_t triggeringPacketId,
uint8_t triggeringPacketVerb,
const Identity &triggeringPeer)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL1) != 0))
m_tryingNewPath(cc.tPtr, codeLocation, trying, physicalAddress, triggerAddress, triggeringPacketId, triggeringPacketVerb, triggeringPeer);
}
ZT_INLINE void tryingNewPath(
const CallContext& cc,
const uint32_t codeLocation,
const Identity& trying,
const InetAddress& physicalAddress,
const InetAddress& triggerAddress,
uint64_t triggeringPacketId,
uint8_t triggeringPacketVerb,
const Identity& triggeringPeer)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL1) != 0))
m_tryingNewPath(
cc.tPtr,
codeLocation,
trying,
physicalAddress,
triggerAddress,
triggeringPacketId,
triggeringPacketVerb,
triggeringPeer);
}
ZT_INLINE void learnedNewPath(
const CallContext &cc,
const uint32_t codeLocation,
uint64_t packetId,
const Identity &peerIdentity,
const InetAddress &physicalAddress,
const InetAddress &replaced)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL1) != 0))
m_learnedNewPath(cc.tPtr, codeLocation, packetId, peerIdentity, physicalAddress, replaced);
}
ZT_INLINE void learnedNewPath(
const CallContext& cc,
const uint32_t codeLocation,
uint64_t packetId,
const Identity& peerIdentity,
const InetAddress& physicalAddress,
const InetAddress& replaced)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL1) != 0))
m_learnedNewPath(cc.tPtr, codeLocation, packetId, peerIdentity, physicalAddress, replaced);
}
ZT_INLINE void incomingPacketDropped(
const CallContext &cc,
const uint32_t codeLocation,
uint64_t packetId,
uint64_t networkId,
const Identity &peerIdentity,
const InetAddress &physicalAddress,
uint8_t hops,
uint8_t verb,
const ZT_TracePacketDropReason reason)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL1) != 0))
m_incomingPacketDropped(cc.tPtr, codeLocation, packetId, networkId, peerIdentity, physicalAddress, hops, verb, reason);
}
ZT_INLINE void incomingPacketDropped(
const CallContext& cc,
const uint32_t codeLocation,
uint64_t packetId,
uint64_t networkId,
const Identity& peerIdentity,
const InetAddress& physicalAddress,
uint8_t hops,
uint8_t verb,
const ZT_TracePacketDropReason reason)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL1) != 0))
m_incomingPacketDropped(
cc.tPtr,
codeLocation,
packetId,
networkId,
peerIdentity,
physicalAddress,
hops,
verb,
reason);
}
ZT_INLINE void outgoingNetworkFrameDropped(
const CallContext &cc,
const uint32_t codeLocation,
uint64_t networkId,
const MAC &sourceMac,
const MAC &destMac,
uint16_t etherType,
uint16_t frameLength,
const uint8_t *frameData,
ZT_TraceFrameDropReason reason)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL2) != 0))
m_outgoingNetworkFrameDropped(cc.tPtr, codeLocation, networkId, sourceMac, destMac, etherType, frameLength, frameData, reason);
}
ZT_INLINE void outgoingNetworkFrameDropped(
const CallContext& cc,
const uint32_t codeLocation,
uint64_t networkId,
const MAC& sourceMac,
const MAC& destMac,
uint16_t etherType,
uint16_t frameLength,
const uint8_t* frameData,
ZT_TraceFrameDropReason reason)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL2) != 0))
m_outgoingNetworkFrameDropped(
cc.tPtr,
codeLocation,
networkId,
sourceMac,
destMac,
etherType,
frameLength,
frameData,
reason);
}
ZT_INLINE void incomingNetworkFrameDropped(
const CallContext &cc,
const uint32_t codeLocation,
uint64_t networkId,
const MAC &sourceMac,
const MAC &destMac,
const uint16_t etherType,
const Identity &peerIdentity,
const InetAddress &physicalAddress,
uint8_t hops,
uint16_t frameLength,
const uint8_t *frameData,
uint8_t verb,
bool credentialRequestSent,
ZT_TraceFrameDropReason reason)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL2) != 0))
m_incomingNetworkFrameDropped(cc.tPtr, codeLocation, networkId, sourceMac, destMac, etherType, peerIdentity, physicalAddress, hops, frameLength, frameData, verb, credentialRequestSent, reason);
}
ZT_INLINE void incomingNetworkFrameDropped(
const CallContext& cc,
const uint32_t codeLocation,
uint64_t networkId,
const MAC& sourceMac,
const MAC& destMac,
const uint16_t etherType,
const Identity& peerIdentity,
const InetAddress& physicalAddress,
uint8_t hops,
uint16_t frameLength,
const uint8_t* frameData,
uint8_t verb,
bool credentialRequestSent,
ZT_TraceFrameDropReason reason)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL2) != 0))
m_incomingNetworkFrameDropped(
cc.tPtr,
codeLocation,
networkId,
sourceMac,
destMac,
etherType,
peerIdentity,
physicalAddress,
hops,
frameLength,
frameData,
verb,
credentialRequestSent,
reason);
}
ZT_INLINE void networkConfigRequestSent(
const CallContext &cc,
const uint32_t codeLocation,
uint64_t networkId)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL2) != 0))
m_networkConfigRequestSent(cc.tPtr, codeLocation, networkId);
}
ZT_INLINE void networkConfigRequestSent(const CallContext& cc, const uint32_t codeLocation, uint64_t networkId)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL2) != 0))
m_networkConfigRequestSent(cc.tPtr, codeLocation, networkId);
}
ZT_INLINE void networkFilter(
const CallContext &cc,
const uint32_t codeLocation,
uint64_t networkId,
const uint8_t primaryRuleSetLog[512],
const uint8_t matchingCapabilityRuleSetLog[512],
uint32_t matchingCapabilityId,
int64_t matchingCapabilityTimestamp,
const Address &source,
const Address &dest,
const MAC &sourceMac,
const MAC &destMac,
uint16_t frameLength,
const uint8_t *frameData,
uint16_t etherType,
uint16_t vlanId,
bool noTee,
bool inbound,
int accept)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL2_FILTER) != 0)) {
m_networkFilter(
cc.tPtr,
codeLocation,
networkId,
primaryRuleSetLog,
matchingCapabilityRuleSetLog,
matchingCapabilityId,
matchingCapabilityTimestamp,
source,
dest,
sourceMac,
destMac,
frameLength,
frameData,
etherType,
vlanId,
noTee,
inbound,
accept);
}
}
ZT_INLINE void networkFilter(
const CallContext& cc,
const uint32_t codeLocation,
uint64_t networkId,
const uint8_t primaryRuleSetLog[512],
const uint8_t matchingCapabilityRuleSetLog[512],
uint32_t matchingCapabilityId,
int64_t matchingCapabilityTimestamp,
const Address& source,
const Address& dest,
const MAC& sourceMac,
const MAC& destMac,
uint16_t frameLength,
const uint8_t* frameData,
uint16_t etherType,
uint16_t vlanId,
bool noTee,
bool inbound,
int accept)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL2_FILTER) != 0)) {
m_networkFilter(
cc.tPtr,
codeLocation,
networkId,
primaryRuleSetLog,
matchingCapabilityRuleSetLog,
matchingCapabilityId,
matchingCapabilityTimestamp,
source,
dest,
sourceMac,
destMac,
frameLength,
frameData,
etherType,
vlanId,
noTee,
inbound,
accept);
}
}
ZT_INLINE void credentialRejected(
const CallContext &cc,
const uint32_t codeLocation,
uint64_t networkId,
const Identity &identity,
uint32_t credentialId,
int64_t credentialTimestamp,
uint8_t credentialType,
ZT_TraceCredentialRejectionReason reason)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL2) != 0))
m_credentialRejected(cc.tPtr, codeLocation, networkId, identity, credentialId, credentialTimestamp, credentialType, reason);
}
ZT_INLINE void credentialRejected(
const CallContext& cc,
const uint32_t codeLocation,
uint64_t networkId,
const Identity& identity,
uint32_t credentialId,
int64_t credentialTimestamp,
uint8_t credentialType,
ZT_TraceCredentialRejectionReason reason)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL2) != 0))
m_credentialRejected(
cc.tPtr,
codeLocation,
networkId,
identity,
credentialId,
credentialTimestamp,
credentialType,
reason);
}
private:
void m_resettingPathsInScope(
void *tPtr,
uint32_t codeLocation,
const Identity &reporter,
const InetAddress &from,
const InetAddress &oldExternal,
const InetAddress &newExternal,
InetAddress::IpScope scope);
private:
void m_resettingPathsInScope(
void* tPtr,
uint32_t codeLocation,
const Identity& reporter,
const InetAddress& from,
const InetAddress& oldExternal,
const InetAddress& newExternal,
InetAddress::IpScope scope);
void m_tryingNewPath(
void *tPtr,
uint32_t codeLocation,
const Identity &trying,
const InetAddress &physicalAddress,
const InetAddress &triggerAddress,
uint64_t triggeringPacketId,
uint8_t triggeringPacketVerb,
const Identity &triggeringPeer);
void m_tryingNewPath(
void* tPtr,
uint32_t codeLocation,
const Identity& trying,
const InetAddress& physicalAddress,
const InetAddress& triggerAddress,
uint64_t triggeringPacketId,
uint8_t triggeringPacketVerb,
const Identity& triggeringPeer);
void m_learnedNewPath(
void *tPtr,
uint32_t codeLocation,
uint64_t packetId,
const Identity &peerIdentity,
const InetAddress &physicalAddress,
const InetAddress &replaced);
void m_learnedNewPath(
void* tPtr,
uint32_t codeLocation,
uint64_t packetId,
const Identity& peerIdentity,
const InetAddress& physicalAddress,
const InetAddress& replaced);
void m_incomingPacketDropped(
void *tPtr,
uint32_t codeLocation,
uint64_t packetId,
uint64_t networkId,
const Identity &peerIdentity,
const InetAddress &physicalAddress,
uint8_t hops,
uint8_t verb,
ZT_TracePacketDropReason reason);
void m_incomingPacketDropped(
void* tPtr,
uint32_t codeLocation,
uint64_t packetId,
uint64_t networkId,
const Identity& peerIdentity,
const InetAddress& physicalAddress,
uint8_t hops,
uint8_t verb,
ZT_TracePacketDropReason reason);
void m_outgoingNetworkFrameDropped(
void *tPtr,
uint32_t codeLocation,
uint64_t networkId,
const MAC &sourceMac,
const MAC &destMac,
uint16_t etherType,
uint16_t frameLength,
const uint8_t *frameData,
ZT_TraceFrameDropReason reason);
void m_outgoingNetworkFrameDropped(
void* tPtr,
uint32_t codeLocation,
uint64_t networkId,
const MAC& sourceMac,
const MAC& destMac,
uint16_t etherType,
uint16_t frameLength,
const uint8_t* frameData,
ZT_TraceFrameDropReason reason);
void m_incomingNetworkFrameDropped(
void *tPtr,
uint32_t codeLocation,
uint64_t networkId,
const MAC &sourceMac,
const MAC &destMac,
const uint16_t etherType,
const Identity &peerIdentity,
const InetAddress &physicalAddress,
uint8_t hops,
uint16_t frameLength,
const uint8_t *frameData,
uint8_t verb,
bool credentialRequestSent,
ZT_TraceFrameDropReason reason);
void m_incomingNetworkFrameDropped(
void* tPtr,
uint32_t codeLocation,
uint64_t networkId,
const MAC& sourceMac,
const MAC& destMac,
const uint16_t etherType,
const Identity& peerIdentity,
const InetAddress& physicalAddress,
uint8_t hops,
uint16_t frameLength,
const uint8_t* frameData,
uint8_t verb,
bool credentialRequestSent,
ZT_TraceFrameDropReason reason);
void m_networkConfigRequestSent(
void *tPtr,
uint32_t codeLocation,
uint64_t networkId);
void m_networkConfigRequestSent(void* tPtr, uint32_t codeLocation, uint64_t networkId);
void m_networkFilter(
void *tPtr,
uint32_t codeLocation,
uint64_t networkId,
const uint8_t *primaryRuleSetLog,
const uint8_t *matchingCapabilityRuleSetLog,
uint32_t matchingCapabilityId,
int64_t matchingCapabilityTimestamp,
const Address &source,
const Address &dest,
const MAC &sourceMac,
const MAC &destMac,
uint16_t frameLength,
const uint8_t *frameData,
uint16_t etherType,
uint16_t vlanId,
bool noTee,
bool inbound,
int accept);
void m_networkFilter(
void* tPtr,
uint32_t codeLocation,
uint64_t networkId,
const uint8_t* primaryRuleSetLog,
const uint8_t* matchingCapabilityRuleSetLog,
uint32_t matchingCapabilityId,
int64_t matchingCapabilityTimestamp,
const Address& source,
const Address& dest,
const MAC& sourceMac,
const MAC& destMac,
uint16_t frameLength,
const uint8_t* frameData,
uint16_t etherType,
uint16_t vlanId,
bool noTee,
bool inbound,
int accept);
void m_credentialRejected(
void *tPtr,
uint32_t codeLocation,
uint64_t networkId,
const Identity &identity,
uint32_t credentialId,
int64_t credentialTimestamp,
uint8_t credentialType,
ZT_TraceCredentialRejectionReason reason);
void m_credentialRejected(
void* tPtr,
uint32_t codeLocation,
uint64_t networkId,
const Identity& identity,
uint32_t credentialId,
int64_t credentialTimestamp,
uint8_t credentialType,
ZT_TraceCredentialRejectionReason reason);
const Context &m_ctx;
volatile unsigned int m_traceFlags; // faster than atomic, but may not "instantly" change... should be okay
const Context& m_ctx;
volatile unsigned int m_traceFlags; // faster than atomic, but may not "instantly" change... should be okay
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -24,40 +24,41 @@ namespace ZeroTier {
*
* It also includes some static methods to do this conveniently.
*/
struct TriviallyCopyable
{
public:
/**
* Zero a TriviallyCopyable object
*
* @tparam T Automatically inferred type of object
* @param obj Any TriviallyCopyable object
*/
template<typename T>
static ZT_INLINE void memoryZero(T *obj) noexcept
{
mustBeTriviallyCopyable(obj);
Utils::zero<sizeof(T)>(obj);
}
struct TriviallyCopyable {
public:
/**
* Zero a TriviallyCopyable object
*
* @tparam T Automatically inferred type of object
* @param obj Any TriviallyCopyable object
*/
template <typename T> static ZT_INLINE void memoryZero(T* obj) noexcept
{
mustBeTriviallyCopyable(obj);
Utils::zero<sizeof(T)>(obj);
}
/**
* Zero a TriviallyCopyable object
*
* @tparam T Automatically inferred type of object
* @param obj Any TriviallyCopyable object
*/
template<typename T>
static ZT_INLINE void memoryZero(T &obj) noexcept
{
mustBeTriviallyCopyable(obj);
Utils::zero<sizeof(T)>(&obj);
}
/**
* Zero a TriviallyCopyable object
*
* @tparam T Automatically inferred type of object
* @param obj Any TriviallyCopyable object
*/
template <typename T> static ZT_INLINE void memoryZero(T& obj) noexcept
{
mustBeTriviallyCopyable(obj);
Utils::zero<sizeof(T)>(&obj);
}
private:
static ZT_INLINE void mustBeTriviallyCopyable(const TriviallyCopyable &) noexcept {}
static ZT_INLINE void mustBeTriviallyCopyable(const TriviallyCopyable *) noexcept {}
private:
static ZT_INLINE void mustBeTriviallyCopyable(const TriviallyCopyable&) noexcept
{
}
static ZT_INLINE void mustBeTriviallyCopyable(const TriviallyCopyable*) noexcept
{
}
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -12,348 +12,376 @@
/****/
#include "TrustStore.hpp"
#include "LZ4.hpp"
namespace ZeroTier {
TrustStore::TrustStore()
{}
{
}
TrustStore::~TrustStore()
{}
SharedPtr< TrustStore::Entry > TrustStore::get(const H384 &serial) const
{
RWMutex::RLock l(m_lock);
Map< H384, SharedPtr< Entry > >::const_iterator c(m_bySerial.find(serial));
return (c != m_bySerial.end()) ? c->second : SharedPtr< TrustStore::Entry >();
}
Map< Identity, SharedPtr< const Locator > > TrustStore::roots()
SharedPtr<TrustStore::Entry> TrustStore::get(const H384& serial) const
{
RWMutex::RLock l(m_lock);
Map< Identity, SharedPtr< const Locator > > r;
// Iterate using m_bySubjectIdentity to only scan certificates with subject identities.
// This map also does not contian error or deprecated certificates.
for (Map< Fingerprint, Vector< SharedPtr< Entry > > >::const_iterator cv(m_bySubjectIdentity.begin()); cv != m_bySubjectIdentity.end(); ++cv) {
for (Vector< SharedPtr< Entry > >::const_iterator c(cv->second.begin()); c != cv->second.end(); ++c) {
// A root set cert must be marked for this use and authorized to influence this node's config.
if ((((*c)->m_certificate.usageFlags & ZT_CERTIFICATE_USAGE_ZEROTIER_ROOT_SET) != 0) && (((*c)->m_localTrust & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_CONFIG) != 0)) {
// Add all identities to the root set, and for each entry in the set make sure we have the latest locator if there's more than one cert with one.
for (unsigned int j = 0; j < (*c)->certificate().subject.identityCount; ++j) {
const Identity *const id = reinterpret_cast<const Identity *>((*c)->certificate().subject.identities[j].identity);
if ((id) && (*id)) { // sanity check
SharedPtr< const Locator > &existingLoc = r[*id];
const Locator *const loc = reinterpret_cast<const Locator *>((*c)->certificate().subject.identities[j].locator);
if (loc) {
if ((!existingLoc) || (existingLoc->revision() < loc->revision()))
existingLoc.set(new Locator(*loc));
}
}
}
}
}
}
return r;
RWMutex::RLock l(m_lock);
Map<H384, SharedPtr<Entry> >::const_iterator c(m_bySerial.find(serial));
return (c != m_bySerial.end()) ? c->second : SharedPtr<TrustStore::Entry>();
}
Vector< SharedPtr< TrustStore::Entry > > TrustStore::all(const bool includeRejectedCertificates) const
Map<Identity, SharedPtr<const Locator> > TrustStore::roots()
{
RWMutex::RLock l(m_lock);
Vector< SharedPtr< Entry > > r;
r.reserve(m_bySerial.size());
for (Map< H384, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
if ((includeRejectedCertificates) || (c->second->error() == ZT_CERTIFICATE_ERROR_NONE))
r.push_back(c->second);
}
return r;
RWMutex::RLock l(m_lock);
Map<Identity, SharedPtr<const Locator> > r;
// Iterate using m_bySubjectIdentity to only scan certificates with subject identities.
// This map also does not contian error or deprecated certificates.
for (Map<Fingerprint, Vector<SharedPtr<Entry> > >::const_iterator cv(m_bySubjectIdentity.begin());
cv != m_bySubjectIdentity.end();
++cv) {
for (Vector<SharedPtr<Entry> >::const_iterator c(cv->second.begin()); c != cv->second.end(); ++c) {
// A root set cert must be marked for this use and authorized to influence this node's config.
if ((((*c)->m_certificate.usageFlags & ZT_CERTIFICATE_USAGE_ZEROTIER_ROOT_SET) != 0)
&& (((*c)->m_localTrust & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_CONFIG) != 0)) {
// Add all identities to the root set, and for each entry in the set make sure we have the latest
// locator if there's more than one cert with one.
for (unsigned int j = 0; j < (*c)->certificate().subject.identityCount; ++j) {
const Identity* const id =
reinterpret_cast<const Identity*>((*c)->certificate().subject.identities[j].identity);
if ((id) && (*id)) { // sanity check
SharedPtr<const Locator>& existingLoc = r[*id];
const Locator* const loc =
reinterpret_cast<const Locator*>((*c)->certificate().subject.identities[j].locator);
if (loc) {
if ((! existingLoc) || (existingLoc->revision() < loc->revision()))
existingLoc.set(new Locator(*loc));
}
}
}
}
}
}
return r;
}
void TrustStore::add(const Certificate &cert, const unsigned int localTrust)
Vector<SharedPtr<TrustStore::Entry> > TrustStore::all(const bool includeRejectedCertificates) const
{
RWMutex::Lock l(m_lock);
m_addQueue.push_front(SharedPtr< Entry >(new Entry(this->m_lock, cert, localTrust)));
RWMutex::RLock l(m_lock);
Vector<SharedPtr<Entry> > r;
r.reserve(m_bySerial.size());
for (Map<H384, SharedPtr<Entry> >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
if ((includeRejectedCertificates) || (c->second->error() == ZT_CERTIFICATE_ERROR_NONE))
r.push_back(c->second);
}
return r;
}
void TrustStore::erase(const H384 &serial)
void TrustStore::add(const Certificate& cert, const unsigned int localTrust)
{
RWMutex::Lock l(m_lock);
m_deleteQueue.push_front(serial);
RWMutex::Lock l(m_lock);
m_addQueue.push_front(SharedPtr<Entry>(new Entry(this->m_lock, cert, localTrust)));
}
bool TrustStore::update(const int64_t clock, Vector< SharedPtr< Entry > > *const purge)
void TrustStore::erase(const H384& serial)
{
RWMutex::Lock l(m_lock);
// Check for certificate time validity status changes. If any of these occur then
// full re-validation is required.
bool errorStateModified = false;
for (Map< H384, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
const bool timeValid = c->second->m_certificate.verifyTimeWindow(clock);
switch (c->second->m_error) {
case ZT_CERTIFICATE_ERROR_NONE:
case ZT_CERTIFICATE_ERROR_INVALID_CHAIN:
if (!timeValid) {
c->second->m_error = ZT_CERTIFICATE_ERROR_OUT_OF_VALID_TIME_WINDOW;
errorStateModified = true;
}
break;
case ZT_CERTIFICATE_ERROR_OUT_OF_VALID_TIME_WINDOW:
if (timeValid) {
c->second->m_error = c->second->m_certificate.verify(-1, false);
errorStateModified = true;
}
break;
default:
break;
}
}
// If there were not any such changes and if the add and delete queues are empty,
// there is nothing more to be done.
if ((!errorStateModified) && (m_addQueue.empty()) && (m_deleteQueue.empty()))
return false;
// Add new certificates to m_bySerial, which is the master certificate set. They still
// have yet to have their full certificate chains validated. Full signature checking is
// performed here.
while (!m_addQueue.empty()) {
SharedPtr< Entry > &qi = m_addQueue.front();
qi->m_error = qi->m_certificate.verify(clock, true);
m_bySerial[H384(qi->m_certificate.serialNo)].move(qi);
m_addQueue.pop_front();
}
// Delete any certificates enqueued to be deleted.
while (!m_deleteQueue.empty()) {
m_bySerial.erase(m_deleteQueue.front());
m_deleteQueue.pop_front();
}
// Reset flags for deprecation and a cert being on a trust path, which are
// recomputed when chain and subjects are checked below.
for (Map< H384, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
if (c->second->m_error == ZT_CERTIFICATE_ERROR_NONE) {
c->second->m_subjectDeprecated = false;
c->second->m_onTrustPath = false;
}
}
// Validate certificate trust paths.
{
Vector< Entry * > visited;
visited.reserve(8);
for (Map< H384, SharedPtr< Entry > >::iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
if ((c->second->m_error == ZT_CERTIFICATE_ERROR_NONE) && (!c->second->m_onTrustPath) && ((c->second->m_localTrust & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ROOT_CA) == 0)) {
// Trace the path of each certificate all the way back to a trusted CA.
unsigned int pathLength = 0;
Map< H384, SharedPtr< Entry > >::const_iterator current(c);
visited.clear();
for (;;) {
if (pathLength <= current->second->m_certificate.maxPathLength) {
// Check if this cert isn't a CA or already part of a valid trust path. If so then step upward toward CA.
if (((current->second->m_localTrust & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ROOT_CA) == 0) && (!current->second->m_onTrustPath)) {
// If the issuer (parent) certificiate is (1) valid, (2) not already visited (to prevent loops),
// and (3) has a public key that matches this cert's issuer public key (sanity check), proceed
// up the certificate graph toward a potential CA.
visited.push_back(current->second.ptr());
const Map< H384, SharedPtr< Entry > >::const_iterator prevChild(current);
current = m_bySerial.find(H384(current->second->m_certificate.issuer));
if ((current != m_bySerial.end()) &&
(std::find(visited.begin(), visited.end(), current->second.ptr()) == visited.end()) &&
(current->second->m_error == ZT_CERTIFICATE_ERROR_NONE) &&
(current->second->m_certificate.publicKeySize == prevChild->second->m_certificate.issuerPublicKeySize) &&
(memcmp(current->second->m_certificate.publicKey, prevChild->second->m_certificate.issuerPublicKey, current->second->m_certificate.publicKeySize) == 0)) {
++pathLength;
continue;
}
} else {
// If we've traced this to a root CA, flag its parents as also being on a trust path. Then
// break the loop without setting an error. We don't flag the current cert as being on a
// trust path since no other certificates depend on it.
for (Vector< Entry * >::const_iterator v(visited.begin()); v != visited.end(); ++v) {
if (*v != c->second.ptr())
(*v)->m_onTrustPath = true;
}
break;
}
}
// If we made it here without breaking or continuing, no path to a
// CA was found and the certificate's chain is invalid.
c->second->m_error = ZT_CERTIFICATE_ERROR_INVALID_CHAIN;
break;
}
}
}
}
// Repopulate mapping of subject unique IDs to their certificates, marking older
// certificates for the same subject as deprecated. A deprecated certificate is not invalid
// but will be purged if it is also not part of a trust path. Error certificates are ignored.
m_bySubjectUniqueId.clear();
for (Map< H384, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end();) {
if (c->second->m_error == ZT_CERTIFICATE_ERROR_NONE) {
const unsigned int uniqueIdSize = c->second->m_certificate.subject.uniqueIdSize;
if ((uniqueIdSize > 0) && (uniqueIdSize <= ZT_CERTIFICATE_MAX_PUBLIC_KEY_SIZE)) {
SharedPtr< Entry > &entry = m_bySubjectUniqueId[Blob< ZT_CERTIFICATE_MAX_PUBLIC_KEY_SIZE >(c->second->m_certificate.subject.uniqueId, uniqueIdSize)];
if (entry) {
// If there's already an entry, see if there's a newer certificate for this subject.
if (c->second->m_certificate.subject.timestamp > entry->m_certificate.subject.timestamp) {
entry->m_subjectDeprecated = true;
entry = c->second;
} else if (c->second->m_certificate.subject.timestamp < entry->m_certificate.subject.timestamp) {
c->second->m_subjectDeprecated = true;
} else {
// Equal timestamps should never happen, but handle it anyway by comparing serials.
if (memcmp(c->second->m_certificate.serialNo, entry->m_certificate.serialNo, ZT_CERTIFICATE_HASH_SIZE) > 0) {
entry->m_subjectDeprecated = true;
entry = c->second;
} else {
c->second->m_subjectDeprecated = true;
}
}
} else {
entry = c->second;
}
}
}
}
// Populate mapping of identities to certificates whose subjects reference them, ignoring
// error or deprecated certificates.
m_bySubjectIdentity.clear();
for (Map< H384, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
if ((c->second->m_error == ZT_CERTIFICATE_ERROR_NONE) && (!c->second->m_subjectDeprecated)) {
for (unsigned int i = 0; i < c->second->m_certificate.subject.identityCount; ++i) {
const Identity *const id = reinterpret_cast<const Identity *>(c->second->m_certificate.subject.identities[i].identity);
if ((id) && (*id)) // sanity check
m_bySubjectIdentity[id->fingerprint()].push_back(c->second);
}
}
}
// If purge is set, erase and return error and deprecated certs (that are not on a trust path).
if (purge) {
for (Map< H384, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end();) {
if ( (c->second->error() != ZT_CERTIFICATE_ERROR_NONE) || ((c->second->m_subjectDeprecated) && (!c->second->m_onTrustPath)) ) {
purge->push_back(c->second);
m_bySerial.erase(c++);
} else {
++c;
}
}
}
return true;
RWMutex::Lock l(m_lock);
m_deleteQueue.push_front(serial);
}
Vector< uint8_t > TrustStore::save() const
bool TrustStore::update(const int64_t clock, Vector<SharedPtr<Entry> >* const purge)
{
Vector< uint8_t > comp;
RWMutex::Lock l(m_lock);
int compSize;
{
RWMutex::RLock l(m_lock);
// Check for certificate time validity status changes. If any of these occur then
// full re-validation is required.
bool errorStateModified = false;
for (Map<H384, SharedPtr<Entry> >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
const bool timeValid = c->second->m_certificate.verifyTimeWindow(clock);
switch (c->second->m_error) {
case ZT_CERTIFICATE_ERROR_NONE:
case ZT_CERTIFICATE_ERROR_INVALID_CHAIN:
if (! timeValid) {
c->second->m_error = ZT_CERTIFICATE_ERROR_OUT_OF_VALID_TIME_WINDOW;
errorStateModified = true;
}
break;
case ZT_CERTIFICATE_ERROR_OUT_OF_VALID_TIME_WINDOW:
if (timeValid) {
c->second->m_error = c->second->m_certificate.verify(-1, false);
errorStateModified = true;
}
break;
default:
break;
}
}
Vector< uint8_t > b;
b.reserve(4096);
// If there were not any such changes and if the add and delete queues are empty,
// there is nothing more to be done.
if ((! errorStateModified) && (m_addQueue.empty()) && (m_deleteQueue.empty()))
return false;
// A version byte.
b.push_back(0);
// Add new certificates to m_bySerial, which is the master certificate set. They still
// have yet to have their full certificate chains validated. Full signature checking is
// performed here.
while (! m_addQueue.empty()) {
SharedPtr<Entry>& qi = m_addQueue.front();
qi->m_error = qi->m_certificate.verify(clock, true);
m_bySerial[H384(qi->m_certificate.serialNo)].move(qi);
m_addQueue.pop_front();
}
// <size[2]> <certificate[...]> <trust[2]> tuples terminated by a 0 size.
for (Map< H384, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
const Vector< uint8_t > cdata(c->second->certificate().encode());
const unsigned long size = (uint32_t)cdata.size();
if ((size > 0) && (size <= 0xffff)) {
b.push_back((uint8_t)(size >> 8U));
b.push_back((uint8_t)size);
b.insert(b.end(), cdata.begin(), cdata.end());
const uint32_t localTrust = (uint32_t)c->second->localTrust();
b.push_back((uint8_t)(localTrust >> 8U));
b.push_back((uint8_t)localTrust);
}
}
b.push_back(0);
b.push_back(0);
// Delete any certificates enqueued to be deleted.
while (! m_deleteQueue.empty()) {
m_bySerial.erase(m_deleteQueue.front());
m_deleteQueue.pop_front();
}
comp.resize((unsigned long)LZ4_COMPRESSBOUND(b.size()) + 8);
compSize = LZ4_compress_fast(reinterpret_cast<const char *>(b.data()), reinterpret_cast<char *>(comp.data() + 8), (int)b.size(), (int)(comp.size() - 8));
if (unlikely(compSize <= 0)) // shouldn't be possible
return Vector< uint8_t >();
// Reset flags for deprecation and a cert being on a trust path, which are
// recomputed when chain and subjects are checked below.
for (Map<H384, SharedPtr<Entry> >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
if (c->second->m_error == ZT_CERTIFICATE_ERROR_NONE) {
c->second->m_subjectDeprecated = false;
c->second->m_onTrustPath = false;
}
}
const uint32_t uncompSize = (uint32_t)b.size();
Utils::storeBigEndian(comp.data(), uncompSize);
Utils::storeBigEndian(comp.data() + 4, Utils::fnv1a32(b.data(), (unsigned int)uncompSize));
compSize += 8;
}
// Validate certificate trust paths.
{
Vector<Entry*> visited;
visited.reserve(8);
for (Map<H384, SharedPtr<Entry> >::iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
if ((c->second->m_error == ZT_CERTIFICATE_ERROR_NONE) && (! c->second->m_onTrustPath)
&& ((c->second->m_localTrust & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ROOT_CA) == 0)) {
// Trace the path of each certificate all the way back to a trusted CA.
unsigned int pathLength = 0;
Map<H384, SharedPtr<Entry> >::const_iterator current(c);
visited.clear();
for (;;) {
if (pathLength <= current->second->m_certificate.maxPathLength) {
// Check if this cert isn't a CA or already part of a valid trust path. If so then step upward
// toward CA.
if (((current->second->m_localTrust & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ROOT_CA) == 0)
&& (! current->second->m_onTrustPath)) {
// If the issuer (parent) certificiate is (1) valid, (2) not already visited (to prevent
// loops), and (3) has a public key that matches this cert's issuer public key (sanity
// check), proceed up the certificate graph toward a potential CA.
visited.push_back(current->second.ptr());
const Map<H384, SharedPtr<Entry> >::const_iterator prevChild(current);
current = m_bySerial.find(H384(current->second->m_certificate.issuer));
if ((current != m_bySerial.end())
&& (std::find(visited.begin(), visited.end(), current->second.ptr()) == visited.end())
&& (current->second->m_error == ZT_CERTIFICATE_ERROR_NONE)
&& (current->second->m_certificate.publicKeySize
== prevChild->second->m_certificate.issuerPublicKeySize)
&& (memcmp(
current->second->m_certificate.publicKey,
prevChild->second->m_certificate.issuerPublicKey,
current->second->m_certificate.publicKeySize)
== 0)) {
++pathLength;
continue;
}
}
else {
// If we've traced this to a root CA, flag its parents as also being on a trust path. Then
// break the loop without setting an error. We don't flag the current cert as being on a
// trust path since no other certificates depend on it.
for (Vector<Entry*>::const_iterator v(visited.begin()); v != visited.end(); ++v) {
if (*v != c->second.ptr())
(*v)->m_onTrustPath = true;
}
break;
}
}
comp.resize((unsigned long)compSize);
comp.shrink_to_fit();
// If we made it here without breaking or continuing, no path to a
// CA was found and the certificate's chain is invalid.
c->second->m_error = ZT_CERTIFICATE_ERROR_INVALID_CHAIN;
break;
}
}
}
}
return comp;
// Repopulate mapping of subject unique IDs to their certificates, marking older
// certificates for the same subject as deprecated. A deprecated certificate is not invalid
// but will be purged if it is also not part of a trust path. Error certificates are ignored.
m_bySubjectUniqueId.clear();
for (Map<H384, SharedPtr<Entry> >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end();) {
if (c->second->m_error == ZT_CERTIFICATE_ERROR_NONE) {
const unsigned int uniqueIdSize = c->second->m_certificate.subject.uniqueIdSize;
if ((uniqueIdSize > 0) && (uniqueIdSize <= ZT_CERTIFICATE_MAX_PUBLIC_KEY_SIZE)) {
SharedPtr<Entry>& entry = m_bySubjectUniqueId[Blob<ZT_CERTIFICATE_MAX_PUBLIC_KEY_SIZE>(
c->second->m_certificate.subject.uniqueId,
uniqueIdSize)];
if (entry) {
// If there's already an entry, see if there's a newer certificate for this subject.
if (c->second->m_certificate.subject.timestamp > entry->m_certificate.subject.timestamp) {
entry->m_subjectDeprecated = true;
entry = c->second;
}
else if (c->second->m_certificate.subject.timestamp < entry->m_certificate.subject.timestamp) {
c->second->m_subjectDeprecated = true;
}
else {
// Equal timestamps should never happen, but handle it anyway by comparing serials.
if (memcmp(
c->second->m_certificate.serialNo,
entry->m_certificate.serialNo,
ZT_CERTIFICATE_HASH_SIZE)
> 0) {
entry->m_subjectDeprecated = true;
entry = c->second;
}
else {
c->second->m_subjectDeprecated = true;
}
}
}
else {
entry = c->second;
}
}
}
}
// Populate mapping of identities to certificates whose subjects reference them, ignoring
// error or deprecated certificates.
m_bySubjectIdentity.clear();
for (Map<H384, SharedPtr<Entry> >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
if ((c->second->m_error == ZT_CERTIFICATE_ERROR_NONE) && (! c->second->m_subjectDeprecated)) {
for (unsigned int i = 0; i < c->second->m_certificate.subject.identityCount; ++i) {
const Identity* const id =
reinterpret_cast<const Identity*>(c->second->m_certificate.subject.identities[i].identity);
if ((id) && (*id)) // sanity check
m_bySubjectIdentity[id->fingerprint()].push_back(c->second);
}
}
}
// If purge is set, erase and return error and deprecated certs (that are not on a trust path).
if (purge) {
for (Map<H384, SharedPtr<Entry> >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end();) {
if ((c->second->error() != ZT_CERTIFICATE_ERROR_NONE)
|| ((c->second->m_subjectDeprecated) && (! c->second->m_onTrustPath))) {
purge->push_back(c->second);
m_bySerial.erase(c++);
}
else {
++c;
}
}
}
return true;
}
int TrustStore::load(const Vector< uint8_t > &data)
Vector<uint8_t> TrustStore::save() const
{
if (data.size() < 8)
return -1;
Vector<uint8_t> comp;
const unsigned int uncompSize = Utils::loadBigEndian< uint32_t >(data.data());
if ((uncompSize == 0) || (uncompSize > (unsigned int)(data.size() * 128)))
return -1;
int compSize;
{
RWMutex::RLock l(m_lock);
Vector< uint8_t > uncomp;
uncomp.resize(uncompSize);
Vector<uint8_t> b;
b.reserve(4096);
if (LZ4_decompress_safe(reinterpret_cast<const char *>(data.data() + 8), reinterpret_cast<char *>(uncomp.data()), (int)(data.size() - 8), (int)uncompSize) != (int)uncompSize)
return -1;
const uint8_t *b = uncomp.data();
if (Utils::fnv1a32(b, (unsigned int)uncompSize) != Utils::loadBigEndian< uint32_t >(data.data() + 4))
return -1;
const uint8_t *const eof = b + uncompSize;
// A version byte.
b.push_back(0);
if (*(b++) != 0) // unrecognized version
return -1;
// <size[2]> <certificate[...]> <trust[2]> tuples terminated by a 0 size.
for (Map<H384, SharedPtr<Entry> >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
const Vector<uint8_t> cdata(c->second->certificate().encode());
const unsigned long size = (uint32_t)cdata.size();
if ((size > 0) && (size <= 0xffff)) {
b.push_back((uint8_t)(size >> 8U));
b.push_back((uint8_t)size);
b.insert(b.end(), cdata.begin(), cdata.end());
const uint32_t localTrust = (uint32_t)c->second->localTrust();
b.push_back((uint8_t)(localTrust >> 8U));
b.push_back((uint8_t)localTrust);
}
}
b.push_back(0);
b.push_back(0);
int readCount = 0;
comp.resize((unsigned long)LZ4_COMPRESSBOUND(b.size()) + 8);
compSize = LZ4_compress_fast(
reinterpret_cast<const char*>(b.data()),
reinterpret_cast<char*>(comp.data() + 8),
(int)b.size(),
(int)(comp.size() - 8));
if (unlikely(compSize <= 0)) // shouldn't be possible
return Vector<uint8_t>();
for (;;) {
if ((b + 2) > eof)
break;
const uint32_t certDataSize = Utils::loadBigEndian< uint16_t >(b);
b += 2;
const uint32_t uncompSize = (uint32_t)b.size();
Utils::storeBigEndian(comp.data(), uncompSize);
Utils::storeBigEndian(comp.data() + 4, Utils::fnv1a32(b.data(), (unsigned int)uncompSize));
compSize += 8;
}
if (certDataSize == 0)
break;
comp.resize((unsigned long)compSize);
comp.shrink_to_fit();
if ((b + certDataSize + 2) > eof) // certificate length + 2 bytes for trust flags
break;
Certificate c;
if (c.decode(b, (unsigned int)certDataSize)) {
b += certDataSize;
this->add(c, Utils::loadBigEndian< uint16_t >(b));
b += 2;
++readCount;
}
}
return readCount;
return comp;
}
} // namespace ZeroTier
int TrustStore::load(const Vector<uint8_t>& data)
{
if (data.size() < 8)
return -1;
const unsigned int uncompSize = Utils::loadBigEndian<uint32_t>(data.data());
if ((uncompSize == 0) || (uncompSize > (unsigned int)(data.size() * 128)))
return -1;
Vector<uint8_t> uncomp;
uncomp.resize(uncompSize);
if (LZ4_decompress_safe(
reinterpret_cast<const char*>(data.data() + 8),
reinterpret_cast<char*>(uncomp.data()),
(int)(data.size() - 8),
(int)uncompSize)
!= (int)uncompSize)
return -1;
const uint8_t* b = uncomp.data();
if (Utils::fnv1a32(b, (unsigned int)uncompSize) != Utils::loadBigEndian<uint32_t>(data.data() + 4))
return -1;
const uint8_t* const eof = b + uncompSize;
if (*(b++) != 0) // unrecognized version
return -1;
int readCount = 0;
for (;;) {
if ((b + 2) > eof)
break;
const uint32_t certDataSize = Utils::loadBigEndian<uint16_t>(b);
b += 2;
if (certDataSize == 0)
break;
if ((b + certDataSize + 2) > eof) // certificate length + 2 bytes for trust flags
break;
Certificate c;
if (c.decode(b, (unsigned int)certDataSize)) {
b += certDataSize;
this->add(c, Utils::loadBigEndian<uint16_t>(b));
b += 2;
++readCount;
}
}
return readCount;
}
} // namespace ZeroTier

View file

@ -14,16 +14,16 @@
#ifndef ZT_TRUSTSTORE_HPP
#define ZT_TRUSTSTORE_HPP
#include "Constants.hpp"
#include "Context.hpp"
#include "Containers.hpp"
#include "Certificate.hpp"
#include "SHA512.hpp"
#include "SharedPtr.hpp"
#include "Identity.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Context.hpp"
#include "Fingerprint.hpp"
#include "Identity.hpp"
#include "Mutex.hpp"
#include "Peer.hpp"
#include "SHA512.hpp"
#include "SharedPtr.hpp"
namespace ZeroTier {
@ -43,176 +43,181 @@ namespace ZeroTier {
* hence there are no methods for doing that. There's only one instance in a
* node anyway.
*/
class TrustStore
{
public:
/**
* An entry in the node certificate trust store
*/
class Entry
{
friend class SharedPtr< TrustStore::Entry >;
friend class SharedPtr< const TrustStore::Entry >;
friend class TrustStore;
class TrustStore {
public:
/**
* An entry in the node certificate trust store
*/
class Entry {
friend class SharedPtr<TrustStore::Entry>;
friend class SharedPtr<const TrustStore::Entry>;
friend class TrustStore;
public:
/**
* @return Reference to held certificate
*/
ZT_INLINE const Certificate &certificate() const noexcept
{ return m_certificate; }
public:
/**
* @return Reference to held certificate
*/
ZT_INLINE const Certificate& certificate() const noexcept
{
return m_certificate;
}
/**
* Get the local trust for this certificate
*
* This value may be changed dynamically by calls to update().
*
* @return Local trust bit mask
*/
ZT_INLINE unsigned int localTrust() const noexcept
{
RWMutex::RLock l(m_lock);
return m_localTrust;
}
/**
* Get the local trust for this certificate
*
* This value may be changed dynamically by calls to update().
*
* @return Local trust bit mask
*/
ZT_INLINE unsigned int localTrust() const noexcept
{
RWMutex::RLock l(m_lock);
return m_localTrust;
}
/**
* Change the local trust of this entry
*
* @param lt New local trust bit mask
*/
ZT_INLINE void setLocalTrust(const unsigned int lt) noexcept
{
RWMutex::Lock l(m_lock);
m_localTrust = lt;
}
/**
* Change the local trust of this entry
*
* @param lt New local trust bit mask
*/
ZT_INLINE void setLocalTrust(const unsigned int lt) noexcept
{
RWMutex::Lock l(m_lock);
m_localTrust = lt;
}
/**
* Get the error code for this certificate
*
* @return Error or ZT_CERTIFICATE_ERROR_NONE if none
*/
ZT_INLINE ZT_CertificateError error() const noexcept
{
RWMutex::RLock l(m_lock);
return m_error;
}
/**
* Get the error code for this certificate
*
* @return Error or ZT_CERTIFICATE_ERROR_NONE if none
*/
ZT_INLINE ZT_CertificateError error() const noexcept
{
RWMutex::RLock l(m_lock);
return m_error;
}
private:
Entry &operator=(const Entry &) { return *this; }
private:
Entry& operator=(const Entry&)
{
return *this;
}
ZT_INLINE Entry(RWMutex &l, const Certificate &cert, const unsigned int lt) noexcept:
__refCount(0),
m_lock(l),
m_certificate(cert),
m_localTrust(lt),
m_error(ZT_CERTIFICATE_ERROR_NONE),
m_subjectDeprecated(false),
m_onTrustPath(false)
{}
ZT_INLINE Entry(RWMutex& l, const Certificate& cert, const unsigned int lt) noexcept
: __refCount(0)
, m_lock(l)
, m_certificate(cert)
, m_localTrust(lt)
, m_error(ZT_CERTIFICATE_ERROR_NONE)
, m_subjectDeprecated(false)
, m_onTrustPath(false)
{
}
std::atomic< int > __refCount;
std::atomic<int> __refCount;
RWMutex &m_lock;
const Certificate m_certificate;
unsigned int m_localTrust;
ZT_CertificateError m_error;
bool m_subjectDeprecated;
bool m_onTrustPath;
};
RWMutex& m_lock;
const Certificate m_certificate;
unsigned int m_localTrust;
ZT_CertificateError m_error;
bool m_subjectDeprecated;
bool m_onTrustPath;
};
TrustStore();
~TrustStore();
TrustStore();
~TrustStore();
/**
* Get certificate by certificate serial number
*
* Note that the error code should be checked. The certificate may be
* rejected and may still be in the store unless the store has been
* purged.
*
* @param serial SHA384 hash of certificate
* @return Entry or empty/nil if not found
*/
SharedPtr< Entry > get(const H384 &serial) const;
/**
* Get certificate by certificate serial number
*
* Note that the error code should be checked. The certificate may be
* rejected and may still be in the store unless the store has been
* purged.
*
* @param serial SHA384 hash of certificate
* @return Entry or empty/nil if not found
*/
SharedPtr<Entry> get(const H384& serial) const;
/**
* Get roots specified by root set certificates in the local store.
*
* If more than one certificate locally trusted as a root set specifies
* the root, it will be returned once (as per Map behavior) but the latest
* locator will be returned from among those available.
*
* @return Roots and the latest locator specified for each (if any)
*/
Map< Identity, SharedPtr< const Locator > > roots();
/**
* Get roots specified by root set certificates in the local store.
*
* If more than one certificate locally trusted as a root set specifies
* the root, it will be returned once (as per Map behavior) but the latest
* locator will be returned from among those available.
*
* @return Roots and the latest locator specified for each (if any)
*/
Map<Identity, SharedPtr<const Locator> > roots();
/**
* @param includeRejectedCertificates If true, also include certificates with error codes
* @return All certificates in asecending sort order by serial
*/
Vector< SharedPtr< Entry > > all(bool includeRejectedCertificates) const;
/**
* @param includeRejectedCertificates If true, also include certificates with error codes
* @return All certificates in asecending sort order by serial
*/
Vector<SharedPtr<Entry> > all(bool includeRejectedCertificates) const;
/**
* Add a certificate
*
* A copy is made so it's fine if the original is freed after this call. If
* the certificate already exists its local trust flags are updated.
*
* IMPORTANT: The caller MUST also call update() after calling add() one or
* more times to actually add and revalidate certificates and their signature
* chains.
*
* @param cert Certificate to add
*/
void add(const Certificate &cert, unsigned int localTrust);
/**
* Add a certificate
*
* A copy is made so it's fine if the original is freed after this call. If
* the certificate already exists its local trust flags are updated.
*
* IMPORTANT: The caller MUST also call update() after calling add() one or
* more times to actually add and revalidate certificates and their signature
* chains.
*
* @param cert Certificate to add
*/
void add(const Certificate& cert, unsigned int localTrust);
/**
* Queue a certificate to be deleted
*
* Actual delete does not happen until the next update().
*
* @param serial Serial of certificate to delete
*/
void erase(const H384 &serial);
/**
* Queue a certificate to be deleted
*
* Actual delete does not happen until the next update().
*
* @param serial Serial of certificate to delete
*/
void erase(const H384& serial);
/**
* Validate all certificates and their certificate chains
*
* This also processes any certificates added with add() since the last call to update().
*
* @param clock Current time in milliseconds since epoch, or -1 to not check times on this pass
* @param purge If non-NULL, purge rejected certificates and return them in this vector (vector should be empty)
* @return True if there were changes
*/
bool update(int64_t clock, Vector< SharedPtr< Entry > > *purge);
/**
* Validate all certificates and their certificate chains
*
* This also processes any certificates added with add() since the last call to update().
*
* @param clock Current time in milliseconds since epoch, or -1 to not check times on this pass
* @param purge If non-NULL, purge rejected certificates and return them in this vector (vector should be empty)
* @return True if there were changes
*/
bool update(int64_t clock, Vector<SharedPtr<Entry> >* purge);
/**
* Create a compressed binary version of certificates and their local trust
*
* @return Binary compressed certificates and local trust info
*/
Vector< uint8_t > save() const;
/**
* Create a compressed binary version of certificates and their local trust
*
* @return Binary compressed certificates and local trust info
*/
Vector<uint8_t> save() const;
/**
* Decode a saved trust store
*
* Decoded certificates are added to the add queue, so update() must be
* called after this to actually apply them.
*
* @param data Data to decode
* @return Number of certificates or -1 if input is invalid
*/
int load(const Vector< uint8_t > &data);
/**
* Decode a saved trust store
*
* Decoded certificates are added to the add queue, so update() must be
* called after this to actually apply them.
*
* @param data Data to decode
* @return Number of certificates or -1 if input is invalid
*/
int load(const Vector<uint8_t>& data);
private:
Map< H384, SharedPtr< Entry > > m_bySerial; // all certificates
Map< Blob< ZT_CERTIFICATE_MAX_PUBLIC_KEY_SIZE >, SharedPtr< Entry > > m_bySubjectUniqueId; // non-rejected certificates only
Map< Fingerprint, Vector< SharedPtr< Entry > > > m_bySubjectIdentity; // non-rejected certificates only
ForwardList< SharedPtr< Entry > > m_addQueue;
ForwardList< H384 > m_deleteQueue;
RWMutex m_lock;
private:
Map<H384, SharedPtr<Entry> > m_bySerial; // all certificates
Map<Blob<ZT_CERTIFICATE_MAX_PUBLIC_KEY_SIZE>, SharedPtr<Entry> >
m_bySubjectUniqueId; // non-rejected certificates only
Map<Fingerprint, Vector<SharedPtr<Entry> > > m_bySubjectIdentity; // non-rejected certificates only
ForwardList<SharedPtr<Entry> > m_addQueue;
ForwardList<H384> m_deleteQueue;
RWMutex m_lock;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -12,16 +12,17 @@
/****/
#include "Utils.hpp"
#include "Mutex.hpp"
#include "AES.hpp"
#include "Mutex.hpp"
#include "SHA512.hpp"
#include <time.h>
#ifdef __UNIX_LIKE__
#include <unistd.h>
#include <fcntl.h>
#include <sys/uio.h>
#include <unistd.h>
#endif
#ifdef __WINDOWS__
@ -32,8 +33,8 @@
#ifdef ZT_ARCH_ARM_HAS_NEON
#ifdef __LINUX__
#include <sys/auxv.h>
#include <asm/hwcap.h>
#include <sys/auxv.h>
#endif
#if defined(__FreeBSD__)
@ -41,9 +42,9 @@
#include <sys/auxv.h>
static inline long getauxval(int caps)
{
long hwcaps = 0;
elf_aux_info(caps, &hwcaps, sizeof(hwcaps));
return hwcaps;
long hwcaps = 0;
elf_aux_info(caps, &hwcaps, sizeof(hwcaps));
return hwcaps;
}
#endif
@ -73,41 +74,40 @@ namespace Utils {
#ifdef ZT_ARCH_ARM_HAS_NEON
ARMCapabilities::ARMCapabilities() noexcept
{
#ifdef __APPLE__
this->aes = true;
this->crc32 = true;
this->pmull = true;
this->sha1 = true;
this->sha2 = true;
this->aes = true;
this->crc32 = true;
this->pmull = true;
this->sha1 = true;
this->sha2 = true;
#else
#ifdef __LINUX__
#ifdef HWCAP2_AES
if (sizeof(void *) == 4) {
const long hwcaps2 = getauxval(AT_HWCAP2);
this->aes = (hwcaps2 & HWCAP2_AES) != 0;
this->crc32 = (hwcaps2 & HWCAP2_CRC32) != 0;
this->pmull = (hwcaps2 & HWCAP2_PMULL) != 0;
this->sha1 = (hwcaps2 & HWCAP2_SHA1) != 0;
this->sha2 = (hwcaps2 & HWCAP2_SHA2) != 0;
} else {
if (sizeof(void*) == 4) {
const long hwcaps2 = getauxval(AT_HWCAP2);
this->aes = (hwcaps2 & HWCAP2_AES) != 0;
this->crc32 = (hwcaps2 & HWCAP2_CRC32) != 0;
this->pmull = (hwcaps2 & HWCAP2_PMULL) != 0;
this->sha1 = (hwcaps2 & HWCAP2_SHA1) != 0;
this->sha2 = (hwcaps2 & HWCAP2_SHA2) != 0;
}
else {
#endif
const long hwcaps = getauxval(AT_HWCAP);
this->aes = (hwcaps & HWCAP_AES) != 0;
this->crc32 = (hwcaps & HWCAP_CRC32) != 0;
this->pmull = (hwcaps & HWCAP_PMULL) != 0;
this->sha1 = (hwcaps & HWCAP_SHA1) != 0;
this->sha2 = (hwcaps & HWCAP_SHA2) != 0;
const long hwcaps = getauxval(AT_HWCAP);
this->aes = (hwcaps & HWCAP_AES) != 0;
this->crc32 = (hwcaps & HWCAP_CRC32) != 0;
this->pmull = (hwcaps & HWCAP_PMULL) != 0;
this->sha1 = (hwcaps & HWCAP_SHA1) != 0;
this->sha2 = (hwcaps & HWCAP_SHA2) != 0;
#ifdef HWCAP2_AES
}
}
#endif
#endif
#endif
}
const ARMCapabilities ARMCAP;
@ -116,47 +116,39 @@ const ARMCapabilities ARMCAP;
#ifdef ZT_ARCH_X64
CPUIDRegisters::CPUIDRegisters() noexcept
{
uint32_t eax, ebx, ecx, edx;
uint32_t eax, ebx, ecx, edx;
#ifdef __WINDOWS__
int regs[4];
__cpuid(regs,1);
eax = (uint32_t)regs[0];
ebx = (uint32_t)regs[1];
ecx = (uint32_t)regs[2];
edx = (uint32_t)regs[3];
int regs[4];
__cpuid(regs, 1);
eax = (uint32_t)regs[0];
ebx = (uint32_t)regs[1];
ecx = (uint32_t)regs[2];
edx = (uint32_t)regs[3];
#else
__asm__ __volatile__ (
"cpuid"
: "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
: "a"(1), "c"(0)
);
__asm__ __volatile__("cpuid" : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) : "a"(1), "c"(0));
#endif
rdrand = ((ecx & (1U << 30U)) != 0);
aes = (((ecx & (1U << 25U)) != 0) && ((ecx & (1U << 19U)) != 0) && ((ecx & (1U << 1U)) != 0));
avx = ((ecx & (1U << 25U)) != 0);
rdrand = ((ecx & (1U << 30U)) != 0);
aes = (((ecx & (1U << 25U)) != 0) && ((ecx & (1U << 19U)) != 0) && ((ecx & (1U << 1U)) != 0));
avx = ((ecx & (1U << 25U)) != 0);
#ifdef __WINDOWS__
__cpuid(regs,7);
eax = (uint32_t)regs[0];
ebx = (uint32_t)regs[1];
ecx = (uint32_t)regs[2];
edx = (uint32_t)regs[3];
__cpuid(regs, 7);
eax = (uint32_t)regs[0];
ebx = (uint32_t)regs[1];
ecx = (uint32_t)regs[2];
edx = (uint32_t)regs[3];
#else
__asm__ __volatile__ (
"cpuid"
: "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
: "a"(7), "c"(0)
);
__asm__ __volatile__("cpuid" : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) : "a"(7), "c"(0));
#endif
vaes = aes && avx && ((ecx & (1U << 9U)) != 0);
vpclmulqdq = aes && avx && ((ecx & (1U << 10U)) != 0);
avx2 = avx && ((ebx & (1U << 5U)) != 0);
avx512f = avx && ((ebx & (1U << 16U)) != 0);
sha = ((ebx & (1U << 29U)) != 0);
fsrm = ((edx & (1U << 4U)) != 0);
vaes = aes && avx && ((ecx & (1U << 9U)) != 0);
vpclmulqdq = aes && avx && ((ecx & (1U << 10U)) != 0);
avx2 = avx && ((ebx & (1U << 5U)) != 0);
avx512f = avx && ((ebx & (1U << 16U)) != 0);
sha = ((ebx & (1U << 29U)) != 0);
fsrm = ((edx & (1U << 4U)) != 0);
}
const CPUIDRegisters CPUID;
@ -164,379 +156,391 @@ const CPUIDRegisters CPUID;
const std::bad_alloc BadAllocException;
const std::out_of_range OutOfRangeException("access out of range");
const uint64_t ZERO256[4] = {0, 0, 0, 0};
const char HEXCHARS[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
const uint64_t ZERO256[4] = { 0, 0, 0, 0 };
const char HEXCHARS[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
const uint64_t s_mapNonce = getSecureRandomU64();
bool secureEq(const void *const a, const void *const b, const unsigned int len) noexcept
bool secureEq(const void* const a, const void* const b, const unsigned int len) noexcept
{
uint8_t diff = 0;
for (unsigned int i = 0; i < len; ++i)
diff |= ((reinterpret_cast<const uint8_t *>(a))[i] ^ (reinterpret_cast<const uint8_t *>(b))[i]);
return (diff == 0);
uint8_t diff = 0;
for (unsigned int i = 0; i < len; ++i)
diff |= ((reinterpret_cast<const uint8_t*>(a))[i] ^ (reinterpret_cast<const uint8_t*>(b))[i]);
return (diff == 0);
}
void burn(volatile void *const ptr, const unsigned int len)
void burn(volatile void* const ptr, const unsigned int len)
{
static volatile uintptr_t foo = 0;
Utils::zero((void *)ptr, len);
// Force compiler not to optimize this function out by taking a volatile
// parameter and also updating a volatile variable.
foo += (uintptr_t)len ^ (uintptr_t)reinterpret_cast<volatile uint8_t *>(ptr)[0];
static volatile uintptr_t foo = 0;
Utils::zero((void*)ptr, len);
// Force compiler not to optimize this function out by taking a volatile
// parameter and also updating a volatile variable.
foo += (uintptr_t)len ^ (uintptr_t) reinterpret_cast<volatile uint8_t*>(ptr)[0];
}
static unsigned long s_decimalRecursive(unsigned long n, char *s)
static unsigned long s_decimalRecursive(unsigned long n, char* s)
{
if (n == 0)
return 0;
unsigned long pos = s_decimalRecursive(n / 10, s);
if (pos >= 22) // sanity check,should be impossible
pos = 22;
s[pos] = (char)('0' + (n % 10));
return pos + 1;
if (n == 0)
return 0;
unsigned long pos = s_decimalRecursive(n / 10, s);
if (pos >= 22) // sanity check,should be impossible
pos = 22;
s[pos] = (char)('0' + (n % 10));
return pos + 1;
}
char *decimal(unsigned long n, char s[24]) noexcept
char* decimal(unsigned long n, char s[24]) noexcept
{
if (n == 0) {
s[0] = '0';
s[1] = (char)0;
return s;
}
s[s_decimalRecursive(n, s)] = (char)0;
return s;
if (n == 0) {
s[0] = '0';
s[1] = (char)0;
return s;
}
s[s_decimalRecursive(n, s)] = (char)0;
return s;
}
char *hex(uint64_t i, char buf[17]) noexcept
char* hex(uint64_t i, char buf[17]) noexcept
{
if (i != 0) {
char *p = nullptr;
for (int b = 60; b >= 0; b -= 4) {
const unsigned int nyb = (unsigned int)(i >> (unsigned int)b) & 0xfU;
if (p) {
*(p++) = HEXCHARS[nyb];
} else if (nyb != 0) {
p = buf;
*(p++) = HEXCHARS[nyb];
}
}
*p = 0;
return buf;
} else {
buf[0] = '0';
buf[1] = 0;
return buf;
}
if (i != 0) {
char* p = nullptr;
for (int b = 60; b >= 0; b -= 4) {
const unsigned int nyb = (unsigned int)(i >> (unsigned int)b) & 0xfU;
if (p) {
*(p++) = HEXCHARS[nyb];
}
else if (nyb != 0) {
p = buf;
*(p++) = HEXCHARS[nyb];
}
}
*p = 0;
return buf;
}
else {
buf[0] = '0';
buf[1] = 0;
return buf;
}
}
uint64_t unhex(const char *s) noexcept
uint64_t unhex(const char* s) noexcept
{
uint64_t n = 0;
if (s) {
int k = 0;
while (k < 16) {
char hc = *(s++);
if (!hc) break;
uint64_t n = 0;
if (s) {
int k = 0;
while (k < 16) {
char hc = *(s++);
if (! hc)
break;
uint8_t c = 0;
if ((hc >= 48) && (hc <= 57))
c = (uint8_t)hc - 48;
else if ((hc >= 97) && (hc <= 102))
c = (uint8_t)hc - 87;
else if ((hc >= 65) && (hc <= 70))
c = (uint8_t)hc - 55;
uint8_t c = 0;
if ((hc >= 48) && (hc <= 57))
c = (uint8_t)hc - 48;
else if ((hc >= 97) && (hc <= 102))
c = (uint8_t)hc - 87;
else if ((hc >= 65) && (hc <= 70))
c = (uint8_t)hc - 55;
n <<= 4U;
n |= (uint64_t)c;
++k;
}
}
return n;
n <<= 4U;
n |= (uint64_t)c;
++k;
}
}
return n;
}
char *hex(const void *d, unsigned int l, char *s) noexcept
char* hex(const void* d, unsigned int l, char* s) noexcept
{
char *const save = s;
for (unsigned int i = 0; i < l; ++i) {
const unsigned int b = reinterpret_cast<const uint8_t *>(d)[i];
*(s++) = HEXCHARS[b >> 4U];
*(s++) = HEXCHARS[b & 0xfU];
}
*s = (char)0;
return save;
char* const save = s;
for (unsigned int i = 0; i < l; ++i) {
const unsigned int b = reinterpret_cast<const uint8_t*>(d)[i];
*(s++) = HEXCHARS[b >> 4U];
*(s++) = HEXCHARS[b & 0xfU];
}
*s = (char)0;
return save;
}
unsigned int unhex(const char *h, unsigned int hlen, void *buf, unsigned int buflen) noexcept
unsigned int unhex(const char* h, unsigned int hlen, void* buf, unsigned int buflen) noexcept
{
unsigned int l = 0;
const char *hend = h + hlen;
while (l < buflen) {
if (h == hend) break;
uint8_t hc = *(reinterpret_cast<const uint8_t *>(h++));
if (!hc) break;
unsigned int l = 0;
const char* hend = h + hlen;
while (l < buflen) {
if (h == hend)
break;
uint8_t hc = *(reinterpret_cast<const uint8_t*>(h++));
if (! hc)
break;
uint8_t c = 0;
if ((hc >= 48) && (hc <= 57))
c = hc - 48;
else if ((hc >= 97) && (hc <= 102))
c = hc - 87;
else if ((hc >= 65) && (hc <= 70))
c = hc - 55;
uint8_t c = 0;
if ((hc >= 48) && (hc <= 57))
c = hc - 48;
else if ((hc >= 97) && (hc <= 102))
c = hc - 87;
else if ((hc >= 65) && (hc <= 70))
c = hc - 55;
if (h == hend) break;
hc = *(reinterpret_cast<const uint8_t *>(h++));
if (!hc) break;
if (h == hend)
break;
hc = *(reinterpret_cast<const uint8_t*>(h++));
if (! hc)
break;
c <<= 4U;
if ((hc >= 48) && (hc <= 57))
c |= hc - 48;
else if ((hc >= 97) && (hc <= 102))
c |= hc - 87;
else if ((hc >= 65) && (hc <= 70))
c |= hc - 55;
c <<= 4U;
if ((hc >= 48) && (hc <= 57))
c |= hc - 48;
else if ((hc >= 97) && (hc <= 102))
c |= hc - 87;
else if ((hc >= 65) && (hc <= 70))
c |= hc - 55;
reinterpret_cast<uint8_t *>(buf)[l++] = c;
}
return l;
reinterpret_cast<uint8_t*>(buf)[l++] = c;
}
return l;
}
#define ZT_GETSECURERANDOM_STATE_SIZE 64
#define ZT_GETSECURERANDOM_STATE_SIZE 64
#define ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR 1048576
void getSecureRandom(void *const buf, unsigned int bytes) noexcept
void getSecureRandom(void* const buf, unsigned int bytes) noexcept
{
static Mutex globalLock;
static bool initialized = false;
static uint64_t randomState[ZT_GETSECURERANDOM_STATE_SIZE];
static unsigned int randomByteCounter = ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR; // init on first run
static AES randomGen;
static Mutex globalLock;
static bool initialized = false;
static uint64_t randomState[ZT_GETSECURERANDOM_STATE_SIZE];
static unsigned int randomByteCounter = ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR; // init on first run
static AES randomGen;
Mutex::Lock gl(globalLock);
Mutex::Lock gl(globalLock);
// Re-initialize the PRNG every ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR bytes. Note that
// if 'bytes' is larger than ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR we can generate more
// than this, but this isn't an issue. ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR could be
// much larger if we wanted and this would still be safe.
randomByteCounter += bytes;
if (unlikely(randomByteCounter >= ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR)) {
randomByteCounter = 0;
// Re-initialize the PRNG every ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR bytes. Note that
// if 'bytes' is larger than ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR we can generate more
// than this, but this isn't an issue. ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR could be
// much larger if we wanted and this would still be safe.
randomByteCounter += bytes;
if (unlikely(randomByteCounter >= ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR)) {
randomByteCounter = 0;
if (unlikely(!initialized)) {
initialized = true;
Utils::zero< sizeof(randomState) >(randomState);
if (unlikely(! initialized)) {
initialized = true;
Utils::zero<sizeof(randomState)>(randomState);
#ifdef __WINDOWS__
HCRYPTPROV cryptProvider = NULL;
if (!CryptAcquireContextA(&cryptProvider,NULL,NULL,PROV_RSA_FULL,CRYPT_VERIFYCONTEXT|CRYPT_SILENT)) {
fprintf(stderr,"FATAL: Utils::getSecureRandom() unable to obtain WinCrypt context!\r\n");
exit(1);
}
if (!CryptGenRandom(cryptProvider,(DWORD)sizeof(randomState),(BYTE *)randomState)) {
fprintf(stderr,"FATAL: Utils::getSecureRandom() CryptGenRandom failed!\r\n");
exit(1);
}
CryptReleaseContext(cryptProvider,0);
HCRYPTPROV cryptProvider = NULL;
if (! CryptAcquireContextA(&cryptProvider, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT | CRYPT_SILENT)) {
fprintf(stderr, "FATAL: Utils::getSecureRandom() unable to obtain WinCrypt context!\r\n");
exit(1);
}
if (! CryptGenRandom(cryptProvider, (DWORD)sizeof(randomState), (BYTE*)randomState)) {
fprintf(stderr, "FATAL: Utils::getSecureRandom() CryptGenRandom failed!\r\n");
exit(1);
}
CryptReleaseContext(cryptProvider, 0);
#else
int devURandomFd = ::open("/dev/urandom", O_RDONLY);
if (devURandomFd < 0) {
fprintf(stderr, "FATAL: Utils::getSecureRandom() unable to open /dev/urandom\n");
exit(1);
}
if ((long)::read(devURandomFd, randomState, sizeof(randomState)) != (long)sizeof(randomState)) {
::close(devURandomFd);
fprintf(stderr, "FATAL: Utils::getSecureRandom() unable to read from /dev/urandom\n");
exit(1);
}
close(devURandomFd);
int devURandomFd = ::open("/dev/urandom", O_RDONLY);
if (devURandomFd < 0) {
fprintf(stderr, "FATAL: Utils::getSecureRandom() unable to open /dev/urandom\n");
exit(1);
}
if ((long)::read(devURandomFd, randomState, sizeof(randomState)) != (long)sizeof(randomState)) {
::close(devURandomFd);
fprintf(stderr, "FATAL: Utils::getSecureRandom() unable to read from /dev/urandom\n");
exit(1);
}
close(devURandomFd);
#endif
#ifdef __UNIX_LIKE__
randomState[0] += (uint64_t)getpid();
randomState[1] += (uint64_t)getppid();
randomState[0] += (uint64_t)getpid();
randomState[1] += (uint64_t)getppid();
#endif
#ifdef ZT_ARCH_X64
if (CPUID.rdrand) {
uint64_t tmp = 0;
for (unsigned long i = 0; i < ZT_GETSECURERANDOM_STATE_SIZE; ++i) {
_rdrand64_step((unsigned long long *)&tmp);
randomState[i] ^= tmp;
}
}
if (CPUID.rdrand) {
uint64_t tmp = 0;
for (unsigned long i = 0; i < ZT_GETSECURERANDOM_STATE_SIZE; ++i) {
_rdrand64_step((unsigned long long*)&tmp);
randomState[i] ^= tmp;
}
}
#endif
}
}
// Initialize or re-initialize generator by hashing the full state,
// replacing the first 64 bytes with this hash, and then re-initializing
// AES with the first 32 bytes.
randomState[0] += (uint64_t)time(nullptr);
SHA512(randomState, randomState, sizeof(randomState));
randomGen.init(randomState);
}
// Initialize or re-initialize generator by hashing the full state,
// replacing the first 64 bytes with this hash, and then re-initializing
// AES with the first 32 bytes.
randomState[0] += (uint64_t)time(nullptr);
SHA512(randomState, randomState, sizeof(randomState));
randomGen.init(randomState);
}
// Generate random bytes using AES and bytes 32-48 of randomState as an in-place
// AES-CTR counter. Counter can be machine endian; we don't care about portability
// for a random generator.
uint64_t *const ctr = randomState + 4;
uint8_t *out = reinterpret_cast<uint8_t *>(buf);
// Generate random bytes using AES and bytes 32-48 of randomState as an in-place
// AES-CTR counter. Counter can be machine endian; we don't care about portability
// for a random generator.
uint64_t* const ctr = randomState + 4;
uint8_t* out = reinterpret_cast<uint8_t*>(buf);
while (bytes >= 16) {
++*ctr;
randomGen.encrypt(ctr, out);
out += 16;
bytes -= 16;
}
while (bytes >= 16) {
++*ctr;
randomGen.encrypt(ctr, out);
out += 16;
bytes -= 16;
}
if (bytes > 0) {
uint8_t tmp[16];
++*ctr;
randomGen.encrypt(ctr, tmp);
for (unsigned int i = 0; i < bytes; ++i)
out[i] = tmp[i];
Utils::burn(tmp, sizeof(tmp)); // don't leave used cryptographic randomness lying around!
}
if (bytes > 0) {
uint8_t tmp[16];
++*ctr;
randomGen.encrypt(ctr, tmp);
for (unsigned int i = 0; i < bytes; ++i)
out[i] = tmp[i];
Utils::burn(tmp, sizeof(tmp)); // don't leave used cryptographic randomness lying around!
}
}
uint64_t getSecureRandomU64() noexcept
{
uint64_t tmp;
getSecureRandom(&tmp, sizeof(tmp));
return tmp;
uint64_t tmp;
getSecureRandom(&tmp, sizeof(tmp));
return tmp;
}
int b32e(const uint8_t *data, int length, char *result, int bufSize) noexcept
int b32e(const uint8_t* data, int length, char* result, int bufSize) noexcept
{
if (length < 0 || length > (1 << 28U)) {
result[0] = (char)0;
return -1;
}
int count = 0;
if (length > 0) {
int buffer = data[0];
int next = 1;
int bitsLeft = 8;
while (count < bufSize && (bitsLeft > 0 || next < length)) {
if (bitsLeft < 5) {
if (next < length) {
buffer <<= 8U;
buffer |= data[next++] & 0xffU;
bitsLeft += 8;
} else {
int pad = 5 - bitsLeft;
buffer <<= pad;
bitsLeft += pad;
}
}
int index = 0x1f & (buffer >> (unsigned int)(bitsLeft - 5));
bitsLeft -= 5;
result[count++] = "abcdefghijklmnopqrstuvwxyz234567"[index];
}
}
if (count < bufSize) {
result[count] = (char)0;
return count;
}
result[0] = (char)0;
return -1;
if (length < 0 || length > (1 << 28U)) {
result[0] = (char)0;
return -1;
}
int count = 0;
if (length > 0) {
int buffer = data[0];
int next = 1;
int bitsLeft = 8;
while (count < bufSize && (bitsLeft > 0 || next < length)) {
if (bitsLeft < 5) {
if (next < length) {
buffer <<= 8U;
buffer |= data[next++] & 0xffU;
bitsLeft += 8;
}
else {
int pad = 5 - bitsLeft;
buffer <<= pad;
bitsLeft += pad;
}
}
int index = 0x1f & (buffer >> (unsigned int)(bitsLeft - 5));
bitsLeft -= 5;
result[count++] = "abcdefghijklmnopqrstuvwxyz234567"[index];
}
}
if (count < bufSize) {
result[count] = (char)0;
return count;
}
result[0] = (char)0;
return -1;
}
int b32d(const char *encoded, uint8_t *result, int bufSize) noexcept
int b32d(const char* encoded, uint8_t* result, int bufSize) noexcept
{
int buffer = 0;
int bitsLeft = 0;
int count = 0;
for (const uint8_t *ptr = (const uint8_t *)encoded; count < bufSize && *ptr; ++ptr) {
uint8_t ch = *ptr;
if (ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' || ch == '-' || ch == '.') {
continue;
}
buffer <<= 5;
int buffer = 0;
int bitsLeft = 0;
int count = 0;
for (const uint8_t* ptr = (const uint8_t*)encoded; count < bufSize && *ptr; ++ptr) {
uint8_t ch = *ptr;
if (ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' || ch == '-' || ch == '.') {
continue;
}
buffer <<= 5;
if (ch == '0') {
ch = 'O';
} else if (ch == '1') {
ch = 'L';
} else if (ch == '8') {
ch = 'B';
}
if (ch == '0') {
ch = 'O';
}
else if (ch == '1') {
ch = 'L';
}
else if (ch == '8') {
ch = 'B';
}
if ((ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z')) {
ch = (ch & 0x1f) - 1;
} else if (ch >= '2' && ch <= '7') {
ch -= '2' - 26;
} else {
return -1;
}
if ((ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z')) {
ch = (ch & 0x1f) - 1;
}
else if (ch >= '2' && ch <= '7') {
ch -= '2' - 26;
}
else {
return -1;
}
buffer |= ch;
bitsLeft += 5;
if (bitsLeft >= 8) {
result[count++] = buffer >> (bitsLeft - 8);
bitsLeft -= 8;
}
}
if (count < bufSize)
result[count] = (uint8_t)0;
return count;
buffer |= ch;
bitsLeft += 5;
if (bitsLeft >= 8) {
result[count++] = buffer >> (bitsLeft - 8);
bitsLeft -= 8;
}
}
if (count < bufSize)
result[count] = (uint8_t)0;
return count;
}
uint64_t random() noexcept
{
static volatile uint64_t s_s0 = getSecureRandomU64();
static volatile uint64_t s_s1 = getSecureRandomU64();
static volatile uint64_t s_s2 = getSecureRandomU64();
static volatile uint64_t s_s3 = getSecureRandomU64();
static volatile uint64_t s_s0 = getSecureRandomU64();
static volatile uint64_t s_s1 = getSecureRandomU64();
static volatile uint64_t s_s2 = getSecureRandomU64();
static volatile uint64_t s_s3 = getSecureRandomU64();
// https://en.wikipedia.org/wiki/Xorshift#xoshiro256**
uint64_t s0 = s_s0;
uint64_t s1 = s_s1;
uint64_t s2 = s_s2;
uint64_t s3 = s_s3;
const uint64_t s1x5 = s1 * 5ULL;
const uint64_t result = ((s1x5 << 7U) | (s1x5 >> 57U)) * 9ULL;
const uint64_t t = s1 << 17U;
s2 ^= s0;
s3 ^= s1;
s1 ^= s2;
s0 ^= s3;
s2 ^= t;
s3 = ((s3 << 45U) | (s3 >> 19U));
s_s0 = s0;
s_s1 = s1;
s_s2 = s2;
s_s3 = s3;
// https://en.wikipedia.org/wiki/Xorshift#xoshiro256**
uint64_t s0 = s_s0;
uint64_t s1 = s_s1;
uint64_t s2 = s_s2;
uint64_t s3 = s_s3;
const uint64_t s1x5 = s1 * 5ULL;
const uint64_t result = ((s1x5 << 7U) | (s1x5 >> 57U)) * 9ULL;
const uint64_t t = s1 << 17U;
s2 ^= s0;
s3 ^= s1;
s1 ^= s2;
s0 ^= s3;
s2 ^= t;
s3 = ((s3 << 45U) | (s3 >> 19U));
s_s0 = s0;
s_s1 = s1;
s_s2 = s2;
s_s3 = s3;
return result;
return result;
}
bool scopy(char *const dest, const unsigned int len, const char *const src) noexcept
bool scopy(char* const dest, const unsigned int len, const char* const src) noexcept
{
if (unlikely((len == 0)||(dest == nullptr))) {
return false;
}
if (unlikely(src == nullptr)) {
*dest = (char)0;
return true;
}
unsigned int i = 0;
for (;;) {
if (i >= len) {
dest[len - 1] = 0;
return false;
}
if ((dest[i] = src[i]) == 0) {
return true;
}
++i;
}
if (unlikely((len == 0) || (dest == nullptr))) {
return false;
}
if (unlikely(src == nullptr)) {
*dest = (char)0;
return true;
}
unsigned int i = 0;
for (;;) {
if (i >= len) {
dest[len - 1] = 0;
return false;
}
if ((dest[i] = src[i]) == 0) {
return true;
}
++i;
}
}
uint32_t fnv1a32(const void *const restrict data, const unsigned int len) noexcept
uint32_t fnv1a32(const void* const restrict data, const unsigned int len) noexcept
{
uint32_t h = 0x811c9dc5;
const uint32_t p = 0x01000193;
for (unsigned int i = 0; i < len; ++i)
h = (h ^ (uint32_t)reinterpret_cast<const uint8_t *>(data)[i]) * p;
return h;
uint32_t h = 0x811c9dc5;
const uint32_t p = 0x01000193;
for (unsigned int i = 0; i < len; ++i)
h = (h ^ (uint32_t) reinterpret_cast<const uint8_t*>(data)[i]) * p;
return h;
}
} // namespace Utils
} // namespace Utils
} // namespace ZeroTier
} // namespace ZeroTier

View file

@ -16,13 +16,12 @@
#include "Constants.hpp"
#include <stddef.h>
#include <stdarg.h>
#include <utility>
#include <algorithm>
#include <memory>
#include <stdarg.h>
#include <stddef.h>
#include <stdexcept>
#include <utility>
namespace ZeroTier {
@ -37,15 +36,11 @@ namespace Utils {
// Macros to convert endian-ness at compile time for constants.
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define ZT_CONST_TO_BE_UINT16(x) ((uint16_t)((uint16_t)((uint16_t)(x) << 8U) | (uint16_t)((uint16_t)(x) >> 8U)))
#define ZT_CONST_TO_BE_UINT64(x) ( \
(((uint64_t)(x) & 0x00000000000000ffULL) << 56U) | \
(((uint64_t)(x) & 0x000000000000ff00ULL) << 40U) | \
(((uint64_t)(x) & 0x0000000000ff0000ULL) << 24U) | \
(((uint64_t)(x) & 0x00000000ff000000ULL) << 8U) | \
(((uint64_t)(x) & 0x000000ff00000000ULL) >> 8U) | \
(((uint64_t)(x) & 0x0000ff0000000000ULL) >> 24U) | \
(((uint64_t)(x) & 0x00ff000000000000ULL) >> 40U) | \
(((uint64_t)(x) & 0xff00000000000000ULL) >> 56U))
#define ZT_CONST_TO_BE_UINT64(x) \
((((uint64_t)(x)&0x00000000000000ffULL) << 56U) | (((uint64_t)(x)&0x000000000000ff00ULL) << 40U) \
| (((uint64_t)(x)&0x0000000000ff0000ULL) << 24U) | (((uint64_t)(x)&0x00000000ff000000ULL) << 8U) \
| (((uint64_t)(x)&0x000000ff00000000ULL) >> 8U) | (((uint64_t)(x)&0x0000ff0000000000ULL) >> 24U) \
| (((uint64_t)(x)&0x00ff000000000000ULL) >> 40U) | (((uint64_t)(x)&0xff00000000000000ULL) >> 56U))
#else
#define ZT_CONST_TO_BE_UINT16(x) ((uint16_t)(x))
#define ZT_CONST_TO_BE_UINT64(x) ((uint64_t)(x))
@ -57,19 +52,17 @@ namespace Utils {
#define ZT_ROL32(x, r) (((x) << (r)) | ((x) >> (32 - (r))))
#ifdef ZT_ARCH_ARM_HAS_NEON
struct ARMCapabilities
{
ARMCapabilities() noexcept;
bool aes, crc32, pmull, sha1, sha2;
struct ARMCapabilities {
ARMCapabilities() noexcept;
bool aes, crc32, pmull, sha1, sha2;
};
extern const ARMCapabilities ARMCAP;
#endif
#ifdef ZT_ARCH_X64
struct CPUIDRegisters
{
CPUIDRegisters() noexcept;
bool rdrand, aes, avx, vaes, vpclmulqdq, avx2, avx512f, sha, fsrm;
struct CPUIDRegisters {
CPUIDRegisters() noexcept;
bool rdrand, aes, avx, vaes, vpclmulqdq, avx2, avx512f, sha, fsrm;
};
extern const CPUIDRegisters CPUID;
#endif
@ -100,7 +93,7 @@ extern const uint64_t s_mapNonce;
* @param len Length of strings
* @return True if strings are equal
*/
bool secureEq(const void *a, const void *b, unsigned int len) noexcept;
bool secureEq(const void* a, const void* b, unsigned int len) noexcept;
/**
* Be absolutely sure to zero memory
@ -111,14 +104,14 @@ bool secureEq(const void *a, const void *b, unsigned int len) noexcept;
* @param ptr Memory to zero
* @param len Length of memory in bytes
*/
void burn(volatile void *ptr, unsigned int len);
void burn(volatile void* ptr, unsigned int len);
/**
* @param n Number to convert
* @param s Buffer, at least 24 bytes in size
* @return String containing 'n' in base 10 form
*/
char *decimal(unsigned long n, char s[24]) noexcept;
char* decimal(unsigned long n, char s[24]) noexcept;
/**
* Convert an unsigned integer into hex
@ -127,7 +120,7 @@ char *decimal(unsigned long n, char s[24]) noexcept;
* @param s Buffer to receive hex, must be at least (2*sizeof(i))+1 in size or overflow will occur.
* @return Pointer to s containing hex string with trailing zero byte
*/
char *hex(uint64_t i, char buf[17]) noexcept;
char* hex(uint64_t i, char buf[17]) noexcept;
/**
* Decode an unsigned integer in hex format
@ -135,7 +128,7 @@ char *hex(uint64_t i, char buf[17]) noexcept;
* @param s String to decode, non-hex chars are ignored
* @return Unsigned integer
*/
uint64_t unhex(const char *s) noexcept;
uint64_t unhex(const char* s) noexcept;
/**
* Convert a byte array into hex
@ -145,7 +138,7 @@ uint64_t unhex(const char *s) noexcept;
* @param s String buffer, must be at least (l*2)+1 in size or overflow will occur
* @return Pointer to filled string buffer
*/
char *hex(const void *d, unsigned int l, char *s) noexcept;
char* hex(const void* d, unsigned int l, char* s) noexcept;
/**
* Decode a hex string
@ -156,7 +149,7 @@ char *hex(const void *d, unsigned int l, char *s) noexcept;
* @param buflen Length of output buffer
* @return Number of written bytes
*/
unsigned int unhex(const char *h, unsigned int hlen, void *buf, unsigned int buflen) noexcept;
unsigned int unhex(const char* h, unsigned int hlen, void* buf, unsigned int buflen) noexcept;
/**
* Generate secure random bytes
@ -167,7 +160,7 @@ unsigned int unhex(const char *h, unsigned int hlen, void *buf, unsigned int buf
* @param buf Buffer to fill
* @param bytes Number of random bytes to generate
*/
void getSecureRandom(void *buf, unsigned int bytes) noexcept;
void getSecureRandom(void* buf, unsigned int bytes) noexcept;
/**
* @return Secure random 64-bit integer
@ -183,7 +176,7 @@ uint64_t getSecureRandomU64() noexcept;
* @param bufSize Size of result buffer
* @return Number of bytes written
*/
int b32e(const uint8_t *data, int length, char *result, int bufSize) noexcept;
int b32e(const uint8_t* data, int length, char* result, int bufSize) noexcept;
/**
* Decode base32 string
@ -193,7 +186,7 @@ int b32e(const uint8_t *data, int length, char *result, int bufSize) noexcept;
* @param bufSize Size of result buffer
* @return Number of bytes written or -1 on error
*/
int b32d(const char *encoded, uint8_t *result, int bufSize) noexcept;
int b32d(const char* encoded, uint8_t* result, int bufSize) noexcept;
/**
* Get a non-cryptographic random integer.
@ -215,18 +208,18 @@ uint64_t random() noexcept;
* @param src Source string (if NULL, dest will receive a zero-length string and true is returned)
* @return True on success, false on overflow (buffer will still be 0-terminated)
*/
bool scopy(char *dest, unsigned int len, const char *src) noexcept;
bool scopy(char* dest, unsigned int len, const char* src) noexcept;
/**
* Check if a buffer's contents are all zero
*/
static ZT_INLINE bool allZero(const void *const b, const unsigned int l) noexcept
static ZT_INLINE bool allZero(const void* const b, const unsigned int l) noexcept
{
for (unsigned int i=0;i<l;++i) {
if (reinterpret_cast<const uint8_t *>(b)[i] != 0)
return false;
}
return true;
for (unsigned int i = 0; i < l; ++i) {
if (reinterpret_cast<const uint8_t*>(b)[i] != 0)
return false;
}
return true;
}
/**
@ -237,50 +230,59 @@ static ZT_INLINE bool allZero(const void *const b, const unsigned int l) noexcep
* @param saveptr Pointer to pointer where function can save state
* @return Next token or NULL if none
*/
static ZT_INLINE char *stok(char *str, const char *delim, char **saveptr) noexcept
static ZT_INLINE char* stok(char* str, const char* delim, char** saveptr) noexcept
{
#ifdef __WINDOWS__
return strtok_s(str, delim, saveptr);
return strtok_s(str, delim, saveptr);
#else
return strtok_r(str, delim, saveptr);
return strtok_r(str, delim, saveptr);
#endif
}
static ZT_INLINE unsigned int strToUInt(const char *s) noexcept
{ return (unsigned int)strtoul(s, nullptr, 10); }
static ZT_INLINE unsigned int strToUInt(const char* s) noexcept
{
return (unsigned int)strtoul(s, nullptr, 10);
}
static ZT_INLINE unsigned long long hexStrToU64(const char *s) noexcept
static ZT_INLINE unsigned long long hexStrToU64(const char* s) noexcept
{
#ifdef __WINDOWS__
return (unsigned long long)_strtoui64(s,nullptr,16);
return (unsigned long long)_strtoui64(s, nullptr, 16);
#else
return strtoull(s, nullptr, 16);
return strtoull(s, nullptr, 16);
#endif
}
#ifdef __GNUC__
static ZT_INLINE unsigned int countBits(const uint8_t v) noexcept
{ return (unsigned int)__builtin_popcount((unsigned int)v); }
{
return (unsigned int)__builtin_popcount((unsigned int)v);
}
static ZT_INLINE unsigned int countBits(const uint16_t v) noexcept
{ return (unsigned int)__builtin_popcount((unsigned int)v); }
{
return (unsigned int)__builtin_popcount((unsigned int)v);
}
static ZT_INLINE unsigned int countBits(const uint32_t v) noexcept
{ return (unsigned int)__builtin_popcountl((unsigned long)v); }
{
return (unsigned int)__builtin_popcountl((unsigned long)v);
}
static ZT_INLINE unsigned int countBits(const uint64_t v) noexcept
{ return (unsigned int)__builtin_popcountll((unsigned long long)v); }
{
return (unsigned int)__builtin_popcountll((unsigned long long)v);
}
#else
template<typename T>
static ZT_INLINE unsigned int countBits(T v) noexcept
template <typename T> static ZT_INLINE unsigned int countBits(T v) noexcept
{
v = v - ((v >> 1) & (T)~(T)0/3);
v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3);
v = (v + (v >> 4)) & (T)~(T)0/255*15;
return (unsigned int)((v * ((~((T)0))/((T)255))) >> ((sizeof(T) - 1) * 8));
v = v - ((v >> 1) & (T) ~(T)0 / 3);
v = (v & (T) ~(T)0 / 15 * 3) + ((v >> 2) & (T) ~(T)0 / 15 * 3);
v = (v + (v >> 4)) & (T) ~(T)0 / 255 * 15;
return (unsigned int)((v * ((~((T)0)) / ((T)255))) >> ((sizeof(T) - 1) * 8));
}
#endif
@ -294,21 +296,15 @@ static ZT_INLINE unsigned int countBits(T v) noexcept
static ZT_INLINE uint64_t swapBytes(const uint64_t n) noexcept
{
#ifdef __GNUC__
return __builtin_bswap64(n);
return __builtin_bswap64(n);
#else
#ifdef _MSC_VER
return (uint64_t)_byteswap_uint64((unsigned __int64)n);
return (uint64_t)_byteswap_uint64((unsigned __int64)n);
#else
return (
((n & 0x00000000000000ffULL) << 56) |
((n & 0x000000000000ff00ULL) << 40) |
((n & 0x0000000000ff0000ULL) << 24) |
((n & 0x00000000ff000000ULL) << 8) |
((n & 0x000000ff00000000ULL) >> 8) |
((n & 0x0000ff0000000000ULL) >> 24) |
((n & 0x00ff000000000000ULL) >> 40) |
((n & 0xff00000000000000ULL) >> 56)
);
return (
((n & 0x00000000000000ffULL) << 56) | ((n & 0x000000000000ff00ULL) << 40) | ((n & 0x0000000000ff0000ULL) << 24)
| ((n & 0x00000000ff000000ULL) << 8) | ((n & 0x000000ff00000000ULL) >> 8) | ((n & 0x0000ff0000000000ULL) >> 24)
| ((n & 0x00ff000000000000ULL) >> 40) | ((n & 0xff00000000000000ULL) >> 56));
#endif
#endif
}
@ -322,12 +318,12 @@ static ZT_INLINE uint64_t swapBytes(const uint64_t n) noexcept
static ZT_INLINE uint32_t swapBytes(const uint32_t n) noexcept
{
#if defined(__GNUC__)
return __builtin_bswap32(n);
return __builtin_bswap32(n);
#else
#ifdef _MSC_VER
return (uint32_t)_byteswap_ulong((unsigned long)n);
return (uint32_t)_byteswap_ulong((unsigned long)n);
#else
return htonl(n);
return htonl(n);
#endif
#endif
}
@ -341,121 +337,120 @@ static ZT_INLINE uint32_t swapBytes(const uint32_t n) noexcept
static ZT_INLINE uint16_t swapBytes(const uint16_t n) noexcept
{
#if defined(__GNUC__)
return __builtin_bswap16(n);
return __builtin_bswap16(n);
#else
#ifdef _MSC_VER
return (uint16_t)_byteswap_ushort((unsigned short)n);
return (uint16_t)_byteswap_ushort((unsigned short)n);
#else
return htons(n);
return htons(n);
#endif
#endif
}
// These are helper adapters to load and swap integer types special cased by size
// to work with all typedef'd variants, signed/unsigned, etc.
template< typename I, unsigned int S >
class _swap_bytes_bysize;
template <typename I, unsigned int S> class _swap_bytes_bysize;
template< typename I >
class _swap_bytes_bysize< I, 1 >
{
public:
static ZT_INLINE I s(const I n) noexcept
{ return n; }
template <typename I> class _swap_bytes_bysize<I, 1> {
public:
static ZT_INLINE I s(const I n) noexcept
{
return n;
}
};
template< typename I >
class _swap_bytes_bysize< I, 2 >
{
public:
static ZT_INLINE I s(const I n) noexcept
{ return (I)swapBytes((uint16_t)n); }
template <typename I> class _swap_bytes_bysize<I, 2> {
public:
static ZT_INLINE I s(const I n) noexcept
{
return (I)swapBytes((uint16_t)n);
}
};
template< typename I >
class _swap_bytes_bysize< I, 4 >
{
public:
static ZT_INLINE I s(const I n) noexcept
{ return (I)swapBytes((uint32_t)n); }
template <typename I> class _swap_bytes_bysize<I, 4> {
public:
static ZT_INLINE I s(const I n) noexcept
{
return (I)swapBytes((uint32_t)n);
}
};
template< typename I >
class _swap_bytes_bysize< I, 8 >
{
public:
static ZT_INLINE I s(const I n) noexcept
{ return (I)swapBytes((uint64_t)n); }
template <typename I> class _swap_bytes_bysize<I, 8> {
public:
static ZT_INLINE I s(const I n) noexcept
{
return (I)swapBytes((uint64_t)n);
}
};
template< typename I, unsigned int S >
class _load_be_bysize;
template <typename I, unsigned int S> class _load_be_bysize;
template< typename I >
class _load_be_bysize< I, 1 >
{
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept
{ return p[0]; }
template <typename I> class _load_be_bysize<I, 1> {
public:
static ZT_INLINE I l(const uint8_t* const p) noexcept
{
return p[0];
}
};
template< typename I >
class _load_be_bysize< I, 2 >
{
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept
{ return (I)(((unsigned int)p[0] << 8U) | (unsigned int)p[1]); }
template <typename I> class _load_be_bysize<I, 2> {
public:
static ZT_INLINE I l(const uint8_t* const p) noexcept
{
return (I)(((unsigned int)p[0] << 8U) | (unsigned int)p[1]);
}
};
template< typename I >
class _load_be_bysize< I, 4 >
{
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept
{ return (I)(((uint32_t)p[0] << 24U) | ((uint32_t)p[1] << 16U) | ((uint32_t)p[2] << 8U) | (uint32_t)p[3]); }
template <typename I> class _load_be_bysize<I, 4> {
public:
static ZT_INLINE I l(const uint8_t* const p) noexcept
{
return (I)(((uint32_t)p[0] << 24U) | ((uint32_t)p[1] << 16U) | ((uint32_t)p[2] << 8U) | (uint32_t)p[3]);
}
};
template< typename I >
class _load_be_bysize< I, 8 >
{
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept
{ return (I)(((uint64_t)p[0] << 56U) | ((uint64_t)p[1] << 48U) | ((uint64_t)p[2] << 40U) | ((uint64_t)p[3] << 32U) | ((uint64_t)p[4] << 24U) | ((uint64_t)p[5] << 16U) | ((uint64_t)p[6] << 8U) | (uint64_t)p[7]); }
template <typename I> class _load_be_bysize<I, 8> {
public:
static ZT_INLINE I l(const uint8_t* const p) noexcept
{
return (
I)(((uint64_t)p[0] << 56U) | ((uint64_t)p[1] << 48U) | ((uint64_t)p[2] << 40U) | ((uint64_t)p[3] << 32U) | ((uint64_t)p[4] << 24U) | ((uint64_t)p[5] << 16U) | ((uint64_t)p[6] << 8U) | (uint64_t)p[7]);
}
};
template< typename I, unsigned int S >
class _load_le_bysize;
template <typename I, unsigned int S> class _load_le_bysize;
template< typename I >
class _load_le_bysize< I, 1 >
{
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept
{ return p[0]; }
template <typename I> class _load_le_bysize<I, 1> {
public:
static ZT_INLINE I l(const uint8_t* const p) noexcept
{
return p[0];
}
};
template< typename I >
class _load_le_bysize< I, 2 >
{
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept
{ return (I)((unsigned int)p[0] | ((unsigned int)p[1] << 8U)); }
template <typename I> class _load_le_bysize<I, 2> {
public:
static ZT_INLINE I l(const uint8_t* const p) noexcept
{
return (I)((unsigned int)p[0] | ((unsigned int)p[1] << 8U));
}
};
template< typename I >
class _load_le_bysize< I, 4 >
{
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept
{ return (I)((uint32_t)p[0] | ((uint32_t)p[1] << 8U) | ((uint32_t)p[2] << 16U) | ((uint32_t)p[3] << 24U)); }
template <typename I> class _load_le_bysize<I, 4> {
public:
static ZT_INLINE I l(const uint8_t* const p) noexcept
{
return (I)((uint32_t)p[0] | ((uint32_t)p[1] << 8U) | ((uint32_t)p[2] << 16U) | ((uint32_t)p[3] << 24U));
}
};
template< typename I >
class _load_le_bysize< I, 8 >
{
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept
{ return (I)((uint64_t)p[0] | ((uint64_t)p[1] << 8U) | ((uint64_t)p[2] << 16U) | ((uint64_t)p[3] << 24U) | ((uint64_t)p[4] << 32U) | ((uint64_t)p[5] << 40U) | ((uint64_t)p[6] << 48U) | ((uint64_t)p[7]) << 56U); }
template <typename I> class _load_le_bysize<I, 8> {
public:
static ZT_INLINE I l(const uint8_t* const p) noexcept
{
return (
I)((uint64_t)p[0] | ((uint64_t)p[1] << 8U) | ((uint64_t)p[2] << 16U) | ((uint64_t)p[3] << 24U) | ((uint64_t)p[4] << 32U) | ((uint64_t)p[5] << 40U) | ((uint64_t)p[6] << 48U) | ((uint64_t)p[7]) << 56U);
}
};
/**
@ -465,13 +460,12 @@ public:
* @param n Value to convert
* @return Value in big-endian order
*/
template< typename I >
static ZT_INLINE I hton(const I n) noexcept
template <typename I> static ZT_INLINE I hton(const I n) noexcept
{
#if __BYTE_ORDER == __LITTLE_ENDIAN
return _swap_bytes_bysize< I, sizeof(I) >::s(n);
return _swap_bytes_bysize<I, sizeof(I)>::s(n);
#else
return n;
return n;
#endif
}
@ -482,13 +476,12 @@ static ZT_INLINE I hton(const I n) noexcept
* @param n Value to convert
* @return Value in host byte order
*/
template< typename I >
static ZT_INLINE I ntoh(const I n) noexcept
template <typename I> static ZT_INLINE I ntoh(const I n) noexcept
{
#if __BYTE_ORDER == __LITTLE_ENDIAN
return _swap_bytes_bysize< I, sizeof(I) >::s(n);
return _swap_bytes_bysize<I, sizeof(I)>::s(n);
#else
return n;
return n;
#endif
}
@ -499,16 +492,15 @@ static ZT_INLINE I ntoh(const I n) noexcept
* @param p Byte stream, must be at least sizeof(I) in size
* @return Loaded raw integer
*/
template< typename I >
static ZT_INLINE I loadMachineEndian(const void *const restrict p) noexcept
template <typename I> static ZT_INLINE I loadMachineEndian(const void* const restrict p) noexcept
{
#ifdef ZT_NO_UNALIGNED_ACCESS
I tmp;
for(int i=0;i<(int)sizeof(I);++i)
reinterpret_cast<uint8_t *>(&tmp)[i] = reinterpret_cast<const uint8_t *>(p)[i];
return tmp;
I tmp;
for (int i = 0; i < (int)sizeof(I); ++i)
reinterpret_cast<uint8_t*>(&tmp)[i] = reinterpret_cast<const uint8_t*>(p)[i];
return tmp;
#else
return *reinterpret_cast<const I *>(p);
return *reinterpret_cast<const I*>(p);
#endif
}
@ -519,14 +511,13 @@ static ZT_INLINE I loadMachineEndian(const void *const restrict p) noexcept
* @param p Byte array (must be at least sizeof(I))
* @param i Integer to store
*/
template< typename I >
static ZT_INLINE void storeMachineEndian(void *const restrict p, const I i) noexcept
template <typename I> static ZT_INLINE void storeMachineEndian(void* const restrict p, const I i) noexcept
{
#ifdef ZT_NO_UNALIGNED_ACCESS
for(unsigned int k=0;k<sizeof(I);++k)
reinterpret_cast<uint8_t *>(p)[k] = reinterpret_cast<const uint8_t *>(&i)[k];
for (unsigned int k = 0; k < sizeof(I); ++k)
reinterpret_cast<uint8_t*>(p)[k] = reinterpret_cast<const uint8_t*>(&i)[k];
#else
*reinterpret_cast<I *>(p) = i;
*reinterpret_cast<I*>(p) = i;
#endif
}
@ -537,13 +528,12 @@ static ZT_INLINE void storeMachineEndian(void *const restrict p, const I i) noex
* @param p Byte stream, must be at least sizeof(I) in size
* @return Decoded integer
*/
template< typename I >
static ZT_INLINE I loadBigEndian(const void *const restrict p) noexcept
template <typename I> static ZT_INLINE I loadBigEndian(const void* const restrict p) noexcept
{
#ifdef ZT_NO_UNALIGNED_ACCESS
return _load_be_bysize<I,sizeof(I)>::l(reinterpret_cast<const uint8_t *>(p));
return _load_be_bysize<I, sizeof(I)>::l(reinterpret_cast<const uint8_t*>(p));
#else
return ntoh(*reinterpret_cast<const I *>(p));
return ntoh(*reinterpret_cast<const I*>(p));
#endif
}
@ -554,13 +544,12 @@ static ZT_INLINE I loadBigEndian(const void *const restrict p) noexcept
* @param p Byte stream to write (must be at least sizeof(I))
* #param i Integer to write
*/
template< typename I >
static ZT_INLINE void storeBigEndian(void *const restrict p, I i) noexcept
template <typename I> static ZT_INLINE void storeBigEndian(void* const restrict p, I i) noexcept
{
#ifdef ZT_NO_UNALIGNED_ACCESS
storeMachineEndian(p,hton(i));
storeMachineEndian(p, hton(i));
#else
*reinterpret_cast<I *>(p) = hton(i);
*reinterpret_cast<I*>(p) = hton(i);
#endif
}
@ -571,13 +560,12 @@ static ZT_INLINE void storeBigEndian(void *const restrict p, I i) noexcept
* @param p Byte stream, must be at least sizeof(I) in size
* @return Decoded integer
*/
template< typename I >
static ZT_INLINE I loadLittleEndian(const void *const restrict p) noexcept
template <typename I> static ZT_INLINE I loadLittleEndian(const void* const restrict p) noexcept
{
#if __BYTE_ORDER == __BIG_ENDIAN || defined(ZT_NO_UNALIGNED_ACCESS)
return _load_le_bysize<I,sizeof(I)>::l(reinterpret_cast<const uint8_t *>(p));
return _load_le_bysize<I, sizeof(I)>::l(reinterpret_cast<const uint8_t*>(p));
#else
return *reinterpret_cast<const I *>(p);
return *reinterpret_cast<const I*>(p);
#endif
}
@ -588,16 +576,15 @@ static ZT_INLINE I loadLittleEndian(const void *const restrict p) noexcept
* @param p Byte stream to write (must be at least sizeof(I))
* #param i Integer to write
*/
template< typename I >
static ZT_INLINE void storeLittleEndian(void *const restrict p, const I i) noexcept
template <typename I> static ZT_INLINE void storeLittleEndian(void* const restrict p, const I i) noexcept
{
#if __BYTE_ORDER == __BIG_ENDIAN
storeMachineEndian(p,_swap_bytes_bysize<I,sizeof(I)>::s(i));
storeMachineEndian(p, _swap_bytes_bysize<I, sizeof(I)>::s(i));
#else
#ifdef ZT_NO_UNALIGNED_ACCESS
storeMachineEndian(p,i);
storeMachineEndian(p, i);
#else
*reinterpret_cast<I *>(p) = i;
*reinterpret_cast<I*>(p) = i;
#endif
#endif
}
@ -609,14 +596,13 @@ static ZT_INLINE void storeLittleEndian(void *const restrict p, const I i) noexc
* @param dest Destination memory
* @param src Source memory
*/
template< unsigned long L >
static ZT_INLINE void copy(void *dest, const void *src) noexcept
template <unsigned long L> static ZT_INLINE void copy(void* dest, const void* src) noexcept
{
#if defined(ZT_ARCH_X64) && defined(__GNUC__)
uintptr_t l = L;
__asm__ __volatile__ ("cld ; rep movsb" : "+c"(l), "+S"(src), "+D"(dest) :: "memory");
uintptr_t l = L;
__asm__ __volatile__("cld ; rep movsb" : "+c"(l), "+S"(src), "+D"(dest)::"memory");
#else
memcpy(dest, src, L);
memcpy(dest, src, L);
#endif
}
@ -627,12 +613,12 @@ static ZT_INLINE void copy(void *dest, const void *src) noexcept
* @param src Source memory
* @param len Bytes to copy
*/
static ZT_INLINE void copy(void *dest, const void *src, unsigned long len) noexcept
static ZT_INLINE void copy(void* dest, const void* src, unsigned long len) noexcept
{
#if defined(ZT_ARCH_X64) && defined(__GNUC__)
__asm__ __volatile__ ("cld ; rep movsb" : "+c"(len), "+S"(src), "+D"(dest) :: "memory");
__asm__ __volatile__("cld ; rep movsb" : "+c"(len), "+S"(src), "+D"(dest)::"memory");
#else
memcpy(dest, src, len);
memcpy(dest, src, len);
#endif
}
@ -642,14 +628,13 @@ static ZT_INLINE void copy(void *dest, const void *src, unsigned long len) noexc
* @tparam L Size in bytes
* @param dest Memory to zero
*/
template< unsigned long L >
static ZT_INLINE void zero(void *dest) noexcept
template <unsigned long L> static ZT_INLINE void zero(void* dest) noexcept
{
#if defined(ZT_ARCH_X64) && defined(__GNUC__)
uintptr_t l = L;
__asm__ __volatile__ ("cld ; rep stosb" :"+c" (l), "+D" (dest) : "a" (0) : "memory");
uintptr_t l = L;
__asm__ __volatile__("cld ; rep stosb" : "+c"(l), "+D"(dest) : "a"(0) : "memory");
#else
memset(dest, 0, L);
memset(dest, 0, L);
#endif
}
@ -659,12 +644,12 @@ static ZT_INLINE void zero(void *dest) noexcept
* @param dest Memory to zero
* @param len Size in bytes
*/
static ZT_INLINE void zero(void *dest, unsigned long len) noexcept
static ZT_INLINE void zero(void* dest, unsigned long len) noexcept
{
#if defined(ZT_ARCH_X64) && defined(__GNUC__)
__asm__ __volatile__ ("cld ; rep stosb" :"+c" (len), "+D" (dest) : "a" (0) : "memory");
__asm__ __volatile__("cld ; rep stosb" : "+c"(len), "+D"(dest) : "a"(0) : "memory");
#else
memset(dest, 0, len);
memset(dest, 0, len);
#endif
}
@ -677,7 +662,7 @@ static ZT_INLINE void zero(void *dest, unsigned long len) noexcept
* @param len Length of data
* @return FNV1a checksum
*/
uint32_t fnv1a32(const void *restrict data, unsigned int len) noexcept;
uint32_t fnv1a32(const void* restrict data, unsigned int len) noexcept;
/**
* Mix bits in a 64-bit integer (non-cryptographic, for hash tables)
@ -689,12 +674,12 @@ uint32_t fnv1a32(const void *restrict data, unsigned int len) noexcept;
*/
static ZT_INLINE uint64_t hash64(uint64_t x) noexcept
{
x ^= x >> 30U;
x *= 0xbf58476d1ce4e5b9ULL;
x ^= x >> 27U;
x *= 0x94d049bb133111ebULL;
x ^= x >> 31U;
return x;
x ^= x >> 30U;
x *= 0xbf58476d1ce4e5b9ULL;
x ^= x >> 27U;
x *= 0x94d049bb133111ebULL;
x ^= x >> 31U;
return x;
}
/**
@ -707,16 +692,16 @@ static ZT_INLINE uint64_t hash64(uint64_t x) noexcept
*/
static ZT_INLINE uint32_t hash32(uint32_t x) noexcept
{
x ^= x >> 16U;
x *= 0x7feb352dU;
x ^= x >> 15U;
x *= 0x846ca68bU;
x ^= x >> 16U;
return x;
x ^= x >> 16U;
x *= 0x7feb352dU;
x ^= x >> 15U;
x *= 0x846ca68bU;
x ^= x >> 16U;
return x;
}
} // namespace Utils
} // namespace Utils
} // namespace ZeroTier
} // namespace ZeroTier
#endif

File diff suppressed because it is too large Load diff

View file

@ -14,15 +14,15 @@
#ifndef ZT_VL1_HPP
#define ZT_VL1_HPP
#include "Constants.hpp"
#include "Defragmenter.hpp"
#include "Buf.hpp"
#include "Address.hpp"
#include "Protocol.hpp"
#include "Mutex.hpp"
#include "FCV.hpp"
#include "Containers.hpp"
#include "Buf.hpp"
#include "CallContext.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Defragmenter.hpp"
#include "FCV.hpp"
#include "Mutex.hpp"
#include "Protocol.hpp"
#define ZT_VL1_MAX_WHOIS_WAITING_PACKETS 32
@ -43,62 +43,124 @@ class VL2;
*
* This class is thread safe.
*/
class VL1
{
public:
explicit VL1(const Context &ctx);
class VL1 {
public:
explicit VL1(const Context& ctx);
/**
* Called when a packet is received from the real network
*
* The packet data supplied to this method may be modified. Internal
* packet handler code may also take possession of it via atomic swap
* and leave the 'data' pointer NULL. The 'data' pointer and its
* contents should not be used after this call. Make a copy if the
* data might still be needed.
*
* @param localSocket Local I/O socket as supplied by external code
* @param fromAddr Internet IP address of origin
* @param data Packet data
* @param len Packet length
*/
void onRemotePacket(CallContext &cc, int64_t localSocket, const InetAddress &fromAddr, SharedPtr< Buf > &data, unsigned int len) noexcept;
/**
* Called when a packet is received from the real network
*
* The packet data supplied to this method may be modified. Internal
* packet handler code may also take possession of it via atomic swap
* and leave the 'data' pointer NULL. The 'data' pointer and its
* contents should not be used after this call. Make a copy if the
* data might still be needed.
*
* @param localSocket Local I/O socket as supplied by external code
* @param fromAddr Internet IP address of origin
* @param data Packet data
* @param len Packet length
*/
void onRemotePacket(
CallContext& cc,
int64_t localSocket,
const InetAddress& fromAddr,
SharedPtr<Buf>& data,
unsigned int len) noexcept;
private:
void m_relay(CallContext &cc, const SharedPtr< Path > &path, Address destination, SharedPtr< Buf > &pkt, int pktSize);
void m_sendPendingWhois(CallContext &cc);
SharedPtr< Peer > m_HELLO(CallContext &cc, const SharedPtr< Path > &path, Buf &pkt, int packetSize);
bool m_ERROR(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb);
bool m_OK(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb);
bool m_WHOIS(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_RENDEZVOUS(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_ECHO(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_PUSH_DIRECT_PATHS(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_USER_MESSAGE(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
bool m_ENCAP(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
private:
void m_relay(CallContext& cc, const SharedPtr<Path>& path, Address destination, SharedPtr<Buf>& pkt, int pktSize);
void m_sendPendingWhois(CallContext& cc);
SharedPtr<Peer> m_HELLO(CallContext& cc, const SharedPtr<Path>& path, Buf& pkt, int packetSize);
bool m_ERROR(
CallContext& cc,
uint64_t packetId,
unsigned int auth,
const SharedPtr<Path>& path,
const SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize,
Protocol::Verb& inReVerb);
bool m_OK(
CallContext& cc,
uint64_t packetId,
unsigned int auth,
const SharedPtr<Path>& path,
const SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize,
Protocol::Verb& inReVerb);
bool m_WHOIS(
CallContext& cc,
uint64_t packetId,
unsigned int auth,
const SharedPtr<Path>& path,
const SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize);
bool m_RENDEZVOUS(
CallContext& cc,
uint64_t packetId,
unsigned int auth,
const SharedPtr<Path>& path,
const SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize);
bool m_ECHO(
CallContext& cc,
uint64_t packetId,
unsigned int auth,
const SharedPtr<Path>& path,
const SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize);
bool m_PUSH_DIRECT_PATHS(
CallContext& cc,
uint64_t packetId,
unsigned int auth,
const SharedPtr<Path>& path,
const SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize);
bool m_USER_MESSAGE(
CallContext& cc,
uint64_t packetId,
unsigned int auth,
const SharedPtr<Path>& path,
const SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize);
bool m_ENCAP(
CallContext& cc,
uint64_t packetId,
unsigned int auth,
const SharedPtr<Path>& path,
const SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize);
const Context &m_ctx;
const Context& m_ctx;
// Defragmentation engine for handling inbound packets with more than one fragment.
Defragmenter< ZT_MAX_PACKET_FRAGMENTS > m_inputPacketAssembler;
// Defragmentation engine for handling inbound packets with more than one fragment.
Defragmenter<ZT_MAX_PACKET_FRAGMENTS> m_inputPacketAssembler;
// Queue of outbound WHOIS reqeusts and packets waiting on them.
struct p_WhoisQueueItem
{
ZT_INLINE p_WhoisQueueItem() : lastRetry(0), retries(0), waitingPacketCount(0)
{}
// Queue of outbound WHOIS reqeusts and packets waiting on them.
struct p_WhoisQueueItem {
ZT_INLINE p_WhoisQueueItem() : lastRetry(0), retries(0), waitingPacketCount(0)
{
}
int64_t lastRetry;
unsigned int retries;
unsigned int waitingPacketCount;
unsigned int waitingPacketSize[ZT_VL1_MAX_WHOIS_WAITING_PACKETS];
SharedPtr< Buf > waitingPacket[ZT_VL1_MAX_WHOIS_WAITING_PACKETS];
};
int64_t lastRetry;
unsigned int retries;
unsigned int waitingPacketCount;
unsigned int waitingPacketSize[ZT_VL1_MAX_WHOIS_WAITING_PACKETS];
SharedPtr<Buf> waitingPacket[ZT_VL1_MAX_WHOIS_WAITING_PACKETS];
};
Map< Address, p_WhoisQueueItem > m_whoisQueue;
Mutex m_whoisQueue_l;
Map<Address, p_WhoisQueueItem> m_whoisQueue;
Mutex m_whoisQueue_l;
};
} // namespace ZeroTier
} // namespace ZeroTier
#endif

View file

@ -12,59 +12,130 @@
/****/
#include "VL2.hpp"
#include "Context.hpp"
#include "VL1.hpp"
#include "Topology.hpp"
#include "Peer.hpp"
#include "Path.hpp"
#include "Network.hpp"
#include "MAC.hpp"
#include "Network.hpp"
#include "Path.hpp"
#include "Peer.hpp"
#include "Topology.hpp"
#include "VL1.hpp"
namespace ZeroTier {
VL2::VL2(const Context &ctx):
m_ctx(ctx)
VL2::VL2(const Context& ctx) : m_ctx(ctx)
{
}
void VL2::onLocalEthernet(CallContext &cc, const SharedPtr< Network > &network, const MAC &from, const MAC &to, const unsigned int etherType, unsigned int vlanId, SharedPtr< Buf > &data, unsigned int len)
void VL2::onLocalEthernet(
CallContext& cc,
const SharedPtr<Network>& network,
const MAC& from,
const MAC& to,
const unsigned int etherType,
unsigned int vlanId,
SharedPtr<Buf>& data,
unsigned int len)
{
}
bool VL2::m_FRAME(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
bool VL2::m_FRAME(
CallContext& cc,
const uint64_t packetId,
const unsigned int auth,
const SharedPtr<Path>& path,
SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize)
{
}
bool VL2::m_EXT_FRAME(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
bool VL2::m_EXT_FRAME(
CallContext& cc,
const uint64_t packetId,
const unsigned int auth,
const SharedPtr<Path>& path,
SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize)
{
}
bool VL2::m_MULTICAST_LIKE(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
bool VL2::m_MULTICAST_LIKE(
CallContext& cc,
const uint64_t packetId,
const unsigned int auth,
const SharedPtr<Path>& path,
SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize)
{
}
bool VL2::m_NETWORK_CREDENTIALS(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
bool VL2::m_NETWORK_CREDENTIALS(
CallContext& cc,
const uint64_t packetId,
const unsigned int auth,
const SharedPtr<Path>& path,
SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize)
{
}
bool VL2::m_NETWORK_CONFIG_REQUEST(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
bool VL2::m_NETWORK_CONFIG_REQUEST(
CallContext& cc,
const uint64_t packetId,
const unsigned int auth,
const SharedPtr<Path>& path,
SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize)
{
}
bool VL2::m_NETWORK_CONFIG(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
bool VL2::m_NETWORK_CONFIG(
CallContext& cc,
const uint64_t packetId,
const unsigned int auth,
const SharedPtr<Path>& path,
SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize)
{
}
bool VL2::m_MULTICAST_GATHER(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
bool VL2::m_MULTICAST_GATHER(
CallContext& cc,
const uint64_t packetId,
const unsigned int auth,
const SharedPtr<Path>& path,
SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize)
{
}
bool VL2::m_MULTICAST_FRAME_deprecated(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
bool VL2::m_MULTICAST_FRAME_deprecated(
CallContext& cc,
const uint64_t packetId,
const unsigned int auth,
const SharedPtr<Path>& path,
SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize)
{
}
bool VL2::m_MULTICAST(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize)
bool VL2::m_MULTICAST(
CallContext& cc,
const uint64_t packetId,
const unsigned int auth,
const SharedPtr<Path>& path,
SharedPtr<Peer>& peer,
Buf& pkt,
int packetSize)
{
}
} // namespace ZeroTier
} // namespace ZeroTier

Some files were not shown because too many files have changed in this diff Show more