Remove this stuff from attic in V2 branch.

This commit is contained in:
Adam Ierymenko 2021-10-14 18:59:11 -04:00
parent d805af7de2
commit 5c19377997
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
105 changed files with 0 additions and 35145 deletions

View file

@ -1,699 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "AES.hpp"
#include "Constants.hpp"
#ifdef __GNUC__
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#endif
#define Te1_r(x) ZT_ROR32(Te0[x], 8U)
#define Te2_r(x) ZT_ROR32(Te0[x], 16U)
#define Te3_r(x) ZT_ROR32(Te0[x], 24U)
#define Td1_r(x) ZT_ROR32(Td0[x], 8U)
#define Td2_r(x) ZT_ROR32(Td0[x], 16U)
#define Td3_r(x) ZT_ROR32(Td0[x], 24U)
namespace ZeroTier {
// GMAC ---------------------------------------------------------------------------------------------------------------
namespace {
#define s_bmul32(N, x, y, rh, rl) \
uint32_t x0t_##N = (x)&0x11111111U; \
uint32_t x1t_##N = (x)&0x22222222U; \
uint32_t x2t_##N = (x)&0x44444444U; \
uint32_t x3t_##N = (x)&0x88888888U; \
uint32_t y0t_##N = (y)&0x11111111U; \
uint32_t y1t_##N = (y)&0x22222222U; \
uint32_t y2t_##N = (y)&0x44444444U; \
uint32_t y3t_##N = (y)&0x88888888U; \
uint64_t z0t_##N = (((uint64_t)x0t_##N * y0t_##N) ^ ((uint64_t)x1t_##N * y3t_##N) ^ ((uint64_t)x2t_##N * y2t_##N) ^ ((uint64_t)x3t_##N * y1t_##N)) & 0x1111111111111111ULL; \
uint64_t z1t_##N = (((uint64_t)x0t_##N * y1t_##N) ^ ((uint64_t)x1t_##N * y0t_##N) ^ ((uint64_t)x2t_##N * y3t_##N) ^ ((uint64_t)x3t_##N * y2t_##N)) & 0x2222222222222222ULL; \
uint64_t z2t_##N = (((uint64_t)x0t_##N * y2t_##N) ^ ((uint64_t)x1t_##N * y1t_##N) ^ ((uint64_t)x2t_##N * y0t_##N) ^ ((uint64_t)x3t_##N * y3t_##N)) & 0x4444444444444444ULL; \
z0t_##N |= z1t_##N; \
z2t_##N |= z0t_##N; \
uint64_t zt_##N = z2t_##N | ((((uint64_t)x0t_##N * y3t_##N) ^ ((uint64_t)x1t_##N * y2t_##N) ^ ((uint64_t)x2t_##N * y1t_##N) ^ ((uint64_t)x3t_##N * y0t_##N)) & 0x8888888888888888ULL); \
(rh) = (uint32_t)(zt_##N >> 32U); \
(rl) = (uint32_t)zt_##N;
void s_gfmul(const uint64_t hh, const uint64_t hl, uint64_t &y0, uint64_t &y1) noexcept
{
uint32_t hhh = (uint32_t)(hh >> 32U);
uint32_t hhl = (uint32_t)hh;
uint32_t hlh = (uint32_t)(hl >> 32U);
uint32_t hll = (uint32_t)hl;
uint32_t hhXlh = hhh ^ hlh;
uint32_t hhXll = hhl ^ hll;
uint64_t yl = Utils::ntoh(y0);
uint64_t yh = Utils::ntoh(y1);
uint32_t cilh = (uint32_t)(yh >> 32U);
uint32_t cill = (uint32_t)yh;
uint32_t cihh = (uint32_t)(yl >> 32U);
uint32_t cihl = (uint32_t)yl;
uint32_t cihXlh = cihh ^ cilh;
uint32_t cihXll = cihl ^ cill;
uint32_t aah, aal, abh, abl, ach, acl;
s_bmul32(M0, cihh, hhh, aah, aal);
s_bmul32(M1, cihl, hhl, abh, abl);
s_bmul32(M2, cihh ^ cihl, hhh ^ hhl, ach, acl);
ach ^= aah ^ abh;
acl ^= aal ^ abl;
aal ^= ach;
abh ^= acl;
uint32_t bah, bal, bbh, bbl, bch, bcl;
s_bmul32(M3, cilh, hlh, bah, bal);
s_bmul32(M4, cill, hll, bbh, bbl);
s_bmul32(M5, cilh ^ cill, hlh ^ hll, bch, bcl);
bch ^= bah ^ bbh;
bcl ^= bal ^ bbl;
bal ^= bch;
bbh ^= bcl;
uint32_t cah, cal, cbh, cbl, cch, ccl;
s_bmul32(M6, cihXlh, hhXlh, cah, cal);
s_bmul32(M7, cihXll, hhXll, cbh, cbl);
s_bmul32(M8, cihXlh ^ cihXll, hhXlh ^ hhXll, cch, ccl);
cch ^= cah ^ cbh;
ccl ^= cal ^ cbl;
cal ^= cch;
cbh ^= ccl;
cah ^= bah ^ aah;
cal ^= bal ^ aal;
cbh ^= bbh ^ abh;
cbl ^= bbl ^ abl;
uint64_t zhh = ((uint64_t)aah << 32U) | aal;
uint64_t zhl = (((uint64_t)abh << 32U) | abl) ^ (((uint64_t)cah << 32U) | cal);
uint64_t zlh = (((uint64_t)bah << 32U) | bal) ^ (((uint64_t)cbh << 32U) | cbl);
uint64_t zll = ((uint64_t)bbh << 32U) | bbl;
zhh = zhh << 1U | zhl >> 63U;
zhl = zhl << 1U | zlh >> 63U;
zlh = zlh << 1U | zll >> 63U;
zll <<= 1U;
zlh ^= (zll << 63U) ^ (zll << 62U) ^ (zll << 57U);
zhh ^= zlh ^ (zlh >> 1U) ^ (zlh >> 2U) ^ (zlh >> 7U);
zhl ^= zll ^ (zll >> 1U) ^ (zll >> 2U) ^ (zll >> 7U) ^ (zlh << 63U) ^ (zlh << 62U) ^ (zlh << 57U);
y0 = Utils::hton(zhh);
y1 = Utils::hton(zhl);
}
} // anonymous namespace
void AES::GMAC::update(const void *const data, unsigned int len) noexcept
{
const uint8_t *in = reinterpret_cast<const uint8_t *>(data);
_len += len;
#ifdef ZT_AES_AESNI
if (likely(Utils::CPUID.aes)) {
p_aesNIUpdate(in, len);
return;
}
#endif // ZT_AES_AESNI
#ifdef ZT_AES_NEON
if (Utils::ARMCAP.pmull) {
p_armUpdate(in, len);
return;
}
#endif // ZT_AES_NEON
const uint64_t h0 = _aes.p_k.sw.h[0];
const uint64_t h1 = _aes.p_k.sw.h[1];
uint64_t y0 = _y[0];
uint64_t y1 = _y[1];
if (_rp) {
for (;;) {
if (!len)
return;
--len;
_r[_rp++] = *(in++);
if (_rp == 16) {
y0 ^= Utils::loadMachineEndian<uint64_t>(_r);
y1 ^= Utils::loadMachineEndian<uint64_t>(_r + 8);
s_gfmul(h0, h1, y0, y1);
break;
}
}
}
while (len >= 16) {
y0 ^= Utils::loadMachineEndian<uint64_t>(in);
y1 ^= Utils::loadMachineEndian<uint64_t>(in + 8);
in += 16;
s_gfmul(h0, h1, y0, y1);
len -= 16;
}
_y[0] = y0;
_y[1] = y1;
for (unsigned int i = 0; i < len; ++i)
_r[i] = in[i];
_rp = len; // len is always less than 16 here
}
void AES::GMAC::finish(uint8_t tag[16]) noexcept
{
#ifdef ZT_AES_AESNI
if (likely(Utils::CPUID.aes)) {
p_aesNIFinish(tag);
return;
}
#endif // ZT_AES_AESNI
#ifdef ZT_AES_NEON
if (Utils::ARMCAP.pmull) {
p_armFinish(tag);
return;
}
#endif // ZT_AES_NEON
const uint64_t h0 = _aes.p_k.sw.h[0];
const uint64_t h1 = _aes.p_k.sw.h[1];
uint64_t y0 = _y[0];
uint64_t y1 = _y[1];
if (_rp) {
while (_rp < 16)
_r[_rp++] = 0;
y0 ^= Utils::loadMachineEndian<uint64_t>(_r);
y1 ^= Utils::loadMachineEndian<uint64_t>(_r + 8);
s_gfmul(h0, h1, y0, y1);
}
y0 ^= Utils::hton((uint64_t)_len << 3U);
s_gfmul(h0, h1, y0, y1);
uint64_t iv2[2];
Utils::copy<12>(iv2, _iv);
#if __BYTE_ORDER == __BIG_ENDIAN
reinterpret_cast<uint32_t *>(iv2)[3] = 0x00000001;
#else
reinterpret_cast<uint32_t *>(iv2)[3] = 0x01000000;
#endif
_aes.encrypt(iv2, iv2);
Utils::storeMachineEndian<uint64_t>(tag, iv2[0] ^ y0);
Utils::storeMachineEndian<uint64_t>(tag + 8, iv2[1] ^ y1);
}
// AES-CTR ------------------------------------------------------------------------------------------------------------
void AES::CTR::crypt(const void *const input, unsigned int len) noexcept
{
const uint8_t *in = reinterpret_cast<const uint8_t *>(input);
uint8_t *out = _out;
#ifdef ZT_AES_AESNI
if (likely(Utils::CPUID.aes)) {
p_aesNICrypt(in, out, len);
return;
}
#endif // ZT_AES_AESNI
#ifdef ZT_AES_NEON
if (Utils::ARMCAP.aes) {
p_armCrypt(in, out, len);
return;
}
#endif // ZT_AES_NEON
uint64_t keyStream[2];
uint32_t ctr = Utils::ntoh(reinterpret_cast<uint32_t *>(_ctr)[3]);
unsigned int totalLen = _len;
if ((totalLen & 15U)) {
for (;;) {
if (!len) {
_len = (totalLen + len);
return;
}
--len;
out[totalLen++] = *(in++);
if (!(totalLen & 15U)) {
_aes.p_encryptSW(reinterpret_cast<const uint8_t *>(_ctr), reinterpret_cast<uint8_t *>(keyStream));
reinterpret_cast<uint32_t *>(_ctr)[3] = Utils::hton(++ctr);
uint8_t *outblk = out + (totalLen - 16);
for (int i = 0; i < 16; ++i)
outblk[i] ^= reinterpret_cast<uint8_t *>(keyStream)[i];
break;
}
}
}
out += totalLen;
_len = (totalLen + len);
if (likely(len >= 16)) {
const uint32_t *const restrict rk = _aes.p_k.sw.ek;
const uint32_t ctr0rk0 = Utils::ntoh(reinterpret_cast<const uint32_t *>(_ctr)[0]) ^ rk[0];
const uint32_t ctr1rk1 = Utils::ntoh(reinterpret_cast<const uint32_t *>(_ctr)[1]) ^ rk[1];
const uint32_t ctr2rk2 = Utils::ntoh(reinterpret_cast<const uint32_t *>(_ctr)[2]) ^ rk[2];
const uint32_t m8 = 0x000000ff;
const uint32_t m8_8 = 0x0000ff00;
const uint32_t m8_16 = 0x00ff0000;
const uint32_t m8_24 = 0xff000000;
if (likely((((uintptr_t)out & 7U) == 0U) && (((uintptr_t)in & 7U) == 0U))) {
do {
uint32_t s0, s1, s2, s3, t0, t1, t2, t3;
s0 = ctr0rk0;
s1 = ctr1rk1;
s2 = ctr2rk2;
s3 = ctr++ ^ rk[3];
const uint64_t in0 = *reinterpret_cast<const uint64_t *>(in);
const uint64_t in1 = *reinterpret_cast<const uint64_t *>(in + 8);
in += 16;
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[4];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[5];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[6];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[7];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[8];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[9];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[10];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[11];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[12];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[13];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[14];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[15];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[16];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[17];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[18];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[19];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[20];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[21];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[22];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[23];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[24];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[25];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[26];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[27];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[28];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[29];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[30];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[31];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[32];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[33];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[34];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[35];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[36];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[37];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[38];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[39];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[40];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[41];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[42];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[43];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[44];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[45];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[46];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[47];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[48];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[49];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[50];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[51];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[52];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[53];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[54];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[55];
s0 = (Te2_r(t0 >> 24U) & m8_24) ^ (Te3_r((t1 >> 16U) & m8) & m8_16) ^ (Te0[(t2 >> 8U) & m8] & m8_8) ^ (Te1_r(t3 & m8) & m8) ^ rk[56];
s1 = (Te2_r(t1 >> 24U) & m8_24) ^ (Te3_r((t2 >> 16U) & m8) & m8_16) ^ (Te0[(t3 >> 8U) & m8] & m8_8) ^ (Te1_r(t0 & m8) & m8) ^ rk[57];
s2 = (Te2_r(t2 >> 24U) & m8_24) ^ (Te3_r((t3 >> 16U) & m8) & m8_16) ^ (Te0[(t0 >> 8U) & m8] & m8_8) ^ (Te1_r(t1 & m8) & m8) ^ rk[58];
s3 = (Te2_r(t3 >> 24U) & m8_24) ^ (Te3_r((t0 >> 16U) & m8) & m8_16) ^ (Te0[(t1 >> 8U) & m8] & m8_8) ^ (Te1_r(t2 & m8) & m8) ^ rk[59];
*reinterpret_cast<uint64_t *>(out) = in0 ^ Utils::hton(((uint64_t)s0 << 32U) | (uint64_t)s1);
*reinterpret_cast<uint64_t *>(out + 8) = in1 ^ Utils::hton(((uint64_t)s2 << 32U) | (uint64_t)s3);
out += 16;
} while ((len -= 16) >= 16);
}
else {
do {
uint32_t s0, s1, s2, s3, t0, t1, t2, t3;
s0 = ctr0rk0;
s1 = ctr1rk1;
s2 = ctr2rk2;
s3 = ctr++ ^ rk[3];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[4];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[5];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[6];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[7];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[8];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[9];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[10];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[11];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[12];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[13];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[14];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[15];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[16];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[17];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[18];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[19];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[20];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[21];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[22];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[23];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[24];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[25];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[26];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[27];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[28];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[29];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[30];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[31];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[32];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[33];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[34];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[35];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[36];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[37];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[38];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[39];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[40];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[41];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[42];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[43];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[44];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[45];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[46];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[47];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[48];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[49];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[50];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[51];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[52];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[53];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[54];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[55];
s0 = (Te2_r(t0 >> 24U) & m8_24) ^ (Te3_r((t1 >> 16U) & m8) & m8_16) ^ (Te0[(t2 >> 8U) & m8] & m8_8) ^ (Te1_r(t3 & m8) & m8) ^ rk[56];
s1 = (Te2_r(t1 >> 24U) & m8_24) ^ (Te3_r((t2 >> 16U) & m8) & m8_16) ^ (Te0[(t3 >> 8U) & m8] & m8_8) ^ (Te1_r(t0 & m8) & m8) ^ rk[57];
s2 = (Te2_r(t2 >> 24U) & m8_24) ^ (Te3_r((t3 >> 16U) & m8) & m8_16) ^ (Te0[(t0 >> 8U) & m8] & m8_8) ^ (Te1_r(t1 & m8) & m8) ^ rk[58];
s3 = (Te2_r(t3 >> 24U) & m8_24) ^ (Te3_r((t0 >> 16U) & m8) & m8_16) ^ (Te0[(t1 >> 8U) & m8] & m8_8) ^ (Te1_r(t2 & m8) & m8) ^ rk[59];
out[0] = in[0] ^ (uint8_t)(s0 >> 24U);
out[1] = in[1] ^ (uint8_t)(s0 >> 16U);
out[2] = in[2] ^ (uint8_t)(s0 >> 8U);
out[3] = in[3] ^ (uint8_t)s0;
out[4] = in[4] ^ (uint8_t)(s1 >> 24U);
out[5] = in[5] ^ (uint8_t)(s1 >> 16U);
out[6] = in[6] ^ (uint8_t)(s1 >> 8U);
out[7] = in[7] ^ (uint8_t)s1;
out[8] = in[8] ^ (uint8_t)(s2 >> 24U);
out[9] = in[9] ^ (uint8_t)(s2 >> 16U);
out[10] = in[10] ^ (uint8_t)(s2 >> 8U);
out[11] = in[11] ^ (uint8_t)s2;
out[12] = in[12] ^ (uint8_t)(s3 >> 24U);
out[13] = in[13] ^ (uint8_t)(s3 >> 16U);
out[14] = in[14] ^ (uint8_t)(s3 >> 8U);
out[15] = in[15] ^ (uint8_t)s3;
out += 16;
in += 16;
} while ((len -= 16) >= 16);
}
reinterpret_cast<uint32_t *>(_ctr)[3] = Utils::hton(ctr);
}
// Any remaining input is placed in _out. This will be picked up and crypted
// on subsequent calls to crypt() or finish() as it'll mean _len will not be
// an even multiple of 16.
while (len) {
--len;
*(out++) = *(in++);
}
}
void AES::CTR::finish() noexcept
{
uint8_t tmp[16];
const unsigned int rem = _len & 15U;
if (rem) {
_aes.encrypt(_ctr, tmp);
for (unsigned int i = 0, j = _len - rem; i < rem; ++i)
_out[j + i] ^= tmp[i];
}
}
// Software AES and AES key expansion ---------------------------------------------------------------------------------
const uint32_t AES::Te0[256] = { 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d, 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554, 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d, 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a, 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87, 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b, 0x41adadec, 0xb3d4d467,
0x5fa2a2fd, 0x45afafea, 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b, 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a, 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f, 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108, 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f, 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e,
0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5, 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d, 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f, 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e, 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb, 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce, 0x5229297b, 0xdde3e33e,
0x5e2f2f71, 0x13848497, 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c, 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed, 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b, 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a, 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16, 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81, 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3, 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a, 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504, 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163, 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d, 0x81cdcd4c, 0x180c0c14,
0x26131335, 0xc3ecec2f, 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739, 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47, 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395, 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f, 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883, 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c,
0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76, 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e, 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4, 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6, 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b, 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7, 0x018d8d8c, 0xb1d5d564,
0x9c4e4ed2, 0x49a9a9e0, 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25, 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818, 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72, 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651, 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21, 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa, 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12, 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0, 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9, 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133, 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7, 0x2d9b9bb6, 0x3c1e1e22,
0x15878792, 0xc9e9e920, 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a, 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17, 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8, 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11, 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a };
const uint32_t AES::Te4[256] = { 0x63636363, 0x7c7c7c7c, 0x77777777, 0x7b7b7b7b, 0xf2f2f2f2, 0x6b6b6b6b, 0x6f6f6f6f, 0xc5c5c5c5, 0x30303030, 0x01010101, 0x67676767, 0x2b2b2b2b, 0xfefefefe, 0xd7d7d7d7, 0xabababab, 0x76767676, 0xcacacaca, 0x82828282, 0xc9c9c9c9, 0x7d7d7d7d, 0xfafafafa, 0x59595959, 0x47474747, 0xf0f0f0f0, 0xadadadad, 0xd4d4d4d4,
0xa2a2a2a2, 0xafafafaf, 0x9c9c9c9c, 0xa4a4a4a4, 0x72727272, 0xc0c0c0c0, 0xb7b7b7b7, 0xfdfdfdfd, 0x93939393, 0x26262626, 0x36363636, 0x3f3f3f3f, 0xf7f7f7f7, 0xcccccccc, 0x34343434, 0xa5a5a5a5, 0xe5e5e5e5, 0xf1f1f1f1, 0x71717171, 0xd8d8d8d8, 0x31313131, 0x15151515, 0x04040404, 0xc7c7c7c7, 0x23232323, 0xc3c3c3c3,
0x18181818, 0x96969696, 0x05050505, 0x9a9a9a9a, 0x07070707, 0x12121212, 0x80808080, 0xe2e2e2e2, 0xebebebeb, 0x27272727, 0xb2b2b2b2, 0x75757575, 0x09090909, 0x83838383, 0x2c2c2c2c, 0x1a1a1a1a, 0x1b1b1b1b, 0x6e6e6e6e, 0x5a5a5a5a, 0xa0a0a0a0, 0x52525252, 0x3b3b3b3b, 0xd6d6d6d6, 0xb3b3b3b3, 0x29292929, 0xe3e3e3e3,
0x2f2f2f2f, 0x84848484, 0x53535353, 0xd1d1d1d1, 0x00000000, 0xedededed, 0x20202020, 0xfcfcfcfc, 0xb1b1b1b1, 0x5b5b5b5b, 0x6a6a6a6a, 0xcbcbcbcb, 0xbebebebe, 0x39393939, 0x4a4a4a4a, 0x4c4c4c4c, 0x58585858, 0xcfcfcfcf, 0xd0d0d0d0, 0xefefefef, 0xaaaaaaaa, 0xfbfbfbfb, 0x43434343, 0x4d4d4d4d, 0x33333333, 0x85858585,
0x45454545, 0xf9f9f9f9, 0x02020202, 0x7f7f7f7f, 0x50505050, 0x3c3c3c3c, 0x9f9f9f9f, 0xa8a8a8a8, 0x51515151, 0xa3a3a3a3, 0x40404040, 0x8f8f8f8f, 0x92929292, 0x9d9d9d9d, 0x38383838, 0xf5f5f5f5, 0xbcbcbcbc, 0xb6b6b6b6, 0xdadadada, 0x21212121, 0x10101010, 0xffffffff, 0xf3f3f3f3, 0xd2d2d2d2, 0xcdcdcdcd, 0x0c0c0c0c,
0x13131313, 0xecececec, 0x5f5f5f5f, 0x97979797, 0x44444444, 0x17171717, 0xc4c4c4c4, 0xa7a7a7a7, 0x7e7e7e7e, 0x3d3d3d3d, 0x64646464, 0x5d5d5d5d, 0x19191919, 0x73737373, 0x60606060, 0x81818181, 0x4f4f4f4f, 0xdcdcdcdc, 0x22222222, 0x2a2a2a2a, 0x90909090, 0x88888888, 0x46464646, 0xeeeeeeee, 0xb8b8b8b8, 0x14141414,
0xdededede, 0x5e5e5e5e, 0x0b0b0b0b, 0xdbdbdbdb, 0xe0e0e0e0, 0x32323232, 0x3a3a3a3a, 0x0a0a0a0a, 0x49494949, 0x06060606, 0x24242424, 0x5c5c5c5c, 0xc2c2c2c2, 0xd3d3d3d3, 0xacacacac, 0x62626262, 0x91919191, 0x95959595, 0xe4e4e4e4, 0x79797979, 0xe7e7e7e7, 0xc8c8c8c8, 0x37373737, 0x6d6d6d6d, 0x8d8d8d8d, 0xd5d5d5d5,
0x4e4e4e4e, 0xa9a9a9a9, 0x6c6c6c6c, 0x56565656, 0xf4f4f4f4, 0xeaeaeaea, 0x65656565, 0x7a7a7a7a, 0xaeaeaeae, 0x08080808, 0xbabababa, 0x78787878, 0x25252525, 0x2e2e2e2e, 0x1c1c1c1c, 0xa6a6a6a6, 0xb4b4b4b4, 0xc6c6c6c6, 0xe8e8e8e8, 0xdddddddd, 0x74747474, 0x1f1f1f1f, 0x4b4b4b4b, 0xbdbdbdbd, 0x8b8b8b8b, 0x8a8a8a8a,
0x70707070, 0x3e3e3e3e, 0xb5b5b5b5, 0x66666666, 0x48484848, 0x03030303, 0xf6f6f6f6, 0x0e0e0e0e, 0x61616161, 0x35353535, 0x57575757, 0xb9b9b9b9, 0x86868686, 0xc1c1c1c1, 0x1d1d1d1d, 0x9e9e9e9e, 0xe1e1e1e1, 0xf8f8f8f8, 0x98989898, 0x11111111, 0x69696969, 0xd9d9d9d9, 0x8e8e8e8e, 0x94949494, 0x9b9b9b9b, 0x1e1e1e1e,
0x87878787, 0xe9e9e9e9, 0xcececece, 0x55555555, 0x28282828, 0xdfdfdfdf, 0x8c8c8c8c, 0xa1a1a1a1, 0x89898989, 0x0d0d0d0d, 0xbfbfbfbf, 0xe6e6e6e6, 0x42424242, 0x68686868, 0x41414141, 0x99999999, 0x2d2d2d2d, 0x0f0f0f0f, 0xb0b0b0b0, 0x54545454, 0xbbbbbbbb, 0x16161616 };
const uint32_t AES::Td0[256] = { 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96, 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393, 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25, 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f, 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1, 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6, 0x038f5fe7, 0x15929c95,
0xbf6d7aeb, 0x955259da, 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844, 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd, 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4, 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45, 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94, 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7,
0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a, 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5, 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c, 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1, 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a, 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75, 0x0b83ec39, 0x4060efaa,
0x5e719f06, 0xbd6e1051, 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46, 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff, 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77, 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb, 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000, 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927, 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a, 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e, 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16, 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d, 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8, 0x57f11985, 0xaf75074c,
0xee99ddbb, 0xa37f60fd, 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34, 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163, 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120, 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d, 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0, 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422,
0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef, 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36, 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4, 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662, 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5, 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3, 0xc8ac993b, 0x10187da7,
0xe89c636e, 0xdb3bbb7b, 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8, 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6, 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6, 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0, 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815, 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df, 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f, 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e, 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713, 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89, 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c, 0x9cd2df59, 0x55f2733f,
0x1814ce79, 0x73c737bf, 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86, 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f, 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541, 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190, 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742 };
const uint8_t AES::Td4[256] = { 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66,
0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a,
0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8,
0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d };
const uint32_t AES::rcon[15] = { 0x01000000, 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000, 0x40000000, 0x80000000, 0x1B000000, 0x36000000, 0x6c000000, 0xd8000000, 0xab000000, 0x4d000000, 0x9a000000 };
void AES::p_initSW(const uint8_t *key) noexcept
{
uint32_t *rk = p_k.sw.ek;
rk[0] = Utils::loadBigEndian<uint32_t>(key);
rk[1] = Utils::loadBigEndian<uint32_t>(key + 4);
rk[2] = Utils::loadBigEndian<uint32_t>(key + 8);
rk[3] = Utils::loadBigEndian<uint32_t>(key + 12);
rk[4] = Utils::loadBigEndian<uint32_t>(key + 16);
rk[5] = Utils::loadBigEndian<uint32_t>(key + 20);
rk[6] = Utils::loadBigEndian<uint32_t>(key + 24);
rk[7] = Utils::loadBigEndian<uint32_t>(key + 28);
for (int i = 0;;) {
uint32_t temp = rk[7];
rk[8] = rk[0] ^ (Te2_r((temp >> 16U) & 0xffU) & 0xff000000U) ^ (Te3_r((temp >> 8U) & 0xffU) & 0x00ff0000U) ^ (Te0[(temp)&0xffU] & 0x0000ff00U) ^ (Te1_r(temp >> 24U) & 0x000000ffU) ^ rcon[i];
rk[9] = rk[1] ^ rk[8];
rk[10] = rk[2] ^ rk[9];
rk[11] = rk[3] ^ rk[10];
if (++i == 7)
break;
temp = rk[11];
rk[12] = rk[4] ^ (Te2_r(temp >> 24U) & 0xff000000U) ^ (Te3_r((temp >> 16U) & 0xffU) & 0x00ff0000U) ^ (Te0[(temp >> 8U) & 0xffU] & 0x0000ff00U) ^ (Te1_r((temp)&0xffU) & 0x000000ffU);
rk[13] = rk[5] ^ rk[12];
rk[14] = rk[6] ^ rk[13];
rk[15] = rk[7] ^ rk[14];
rk += 8;
}
p_encryptSW((const uint8_t *)Utils::ZERO256, (uint8_t *)p_k.sw.h);
p_k.sw.h[0] = Utils::ntoh(p_k.sw.h[0]);
p_k.sw.h[1] = Utils::ntoh(p_k.sw.h[1]);
for (int i = 0; i < 60; ++i)
p_k.sw.dk[i] = p_k.sw.ek[i];
rk = p_k.sw.dk;
for (int i = 0, j = 56; i < j; i += 4, j -= 4) {
uint32_t temp = rk[i];
rk[i] = rk[j];
rk[j] = temp;
temp = rk[i + 1];
rk[i + 1] = rk[j + 1];
rk[j + 1] = temp;
temp = rk[i + 2];
rk[i + 2] = rk[j + 2];
rk[j + 2] = temp;
temp = rk[i + 3];
rk[i + 3] = rk[j + 3];
rk[j + 3] = temp;
}
for (int i = 1; i < 14; ++i) {
rk += 4;
rk[0] = Td0[Te4[(rk[0] >> 24U)] & 0xffU] ^ Td1_r(Te4[(rk[0] >> 16U) & 0xffU] & 0xffU) ^ Td2_r(Te4[(rk[0] >> 8U) & 0xffU] & 0xffU) ^ Td3_r(Te4[(rk[0]) & 0xffU] & 0xffU);
rk[1] = Td0[Te4[(rk[1] >> 24U)] & 0xffU] ^ Td1_r(Te4[(rk[1] >> 16U) & 0xffU] & 0xffU) ^ Td2_r(Te4[(rk[1] >> 8U) & 0xffU] & 0xffU) ^ Td3_r(Te4[(rk[1]) & 0xffU] & 0xffU);
rk[2] = Td0[Te4[(rk[2] >> 24U)] & 0xffU] ^ Td1_r(Te4[(rk[2] >> 16U) & 0xffU] & 0xffU) ^ Td2_r(Te4[(rk[2] >> 8U) & 0xffU] & 0xffU) ^ Td3_r(Te4[(rk[2]) & 0xffU] & 0xffU);
rk[3] = Td0[Te4[(rk[3] >> 24U)] & 0xffU] ^ Td1_r(Te4[(rk[3] >> 16U) & 0xffU] & 0xffU) ^ Td2_r(Te4[(rk[3] >> 8U) & 0xffU] & 0xffU) ^ Td3_r(Te4[(rk[3]) & 0xffU] & 0xffU);
}
}
void AES::p_encryptSW(const uint8_t *in, uint8_t *out) const noexcept
{
const uint32_t *const restrict rk = p_k.sw.ek;
const uint32_t m8 = 0x000000ff;
const uint32_t m8_8 = 0x0000ff00;
const uint32_t m8_16 = 0x00ff0000;
const uint32_t m8_24 = 0xff000000;
uint32_t s0 = Utils::loadBigEndian<uint32_t>(in) ^ rk[0];
uint32_t s1 = Utils::loadBigEndian<uint32_t>(in + 4) ^ rk[1];
uint32_t s2 = Utils::loadBigEndian<uint32_t>(in + 8) ^ rk[2];
uint32_t s3 = Utils::loadBigEndian<uint32_t>(in + 12) ^ rk[3];
uint32_t t0, t1, t2, t3;
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[4];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[5];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[6];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[7];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[8];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[9];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[10];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[11];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[12];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[13];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[14];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[15];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[16];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[17];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[18];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[19];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[20];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[21];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[22];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[23];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[24];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[25];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[26];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[27];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[28];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[29];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[30];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[31];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[32];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[33];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[34];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[35];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[36];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[37];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[38];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[39];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[40];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[41];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[42];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[43];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[44];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[45];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[46];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[47];
s0 = Te0[t0 >> 24U] ^ Te1_r((t1 >> 16U) & m8) ^ Te2_r((t2 >> 8U) & m8) ^ Te3_r(t3 & m8) ^ rk[48];
s1 = Te0[t1 >> 24U] ^ Te1_r((t2 >> 16U) & m8) ^ Te2_r((t3 >> 8U) & m8) ^ Te3_r(t0 & m8) ^ rk[49];
s2 = Te0[t2 >> 24U] ^ Te1_r((t3 >> 16U) & m8) ^ Te2_r((t0 >> 8U) & m8) ^ Te3_r(t1 & m8) ^ rk[50];
s3 = Te0[t3 >> 24U] ^ Te1_r((t0 >> 16U) & m8) ^ Te2_r((t1 >> 8U) & m8) ^ Te3_r(t2 & m8) ^ rk[51];
t0 = Te0[s0 >> 24U] ^ Te1_r((s1 >> 16U) & m8) ^ Te2_r((s2 >> 8U) & m8) ^ Te3_r(s3 & m8) ^ rk[52];
t1 = Te0[s1 >> 24U] ^ Te1_r((s2 >> 16U) & m8) ^ Te2_r((s3 >> 8U) & m8) ^ Te3_r(s0 & m8) ^ rk[53];
t2 = Te0[s2 >> 24U] ^ Te1_r((s3 >> 16U) & m8) ^ Te2_r((s0 >> 8U) & m8) ^ Te3_r(s1 & m8) ^ rk[54];
t3 = Te0[s3 >> 24U] ^ Te1_r((s0 >> 16U) & m8) ^ Te2_r((s1 >> 8U) & m8) ^ Te3_r(s2 & m8) ^ rk[55];
s0 = (Te2_r(t0 >> 24U) & m8_24) ^ (Te3_r((t1 >> 16U) & m8) & m8_16) ^ (Te0[(t2 >> 8U) & m8] & m8_8) ^ (Te1_r(t3 & m8) & m8) ^ rk[56];
s1 = (Te2_r(t1 >> 24U) & m8_24) ^ (Te3_r((t2 >> 16U) & m8) & m8_16) ^ (Te0[(t3 >> 8U) & m8] & m8_8) ^ (Te1_r(t0 & m8) & m8) ^ rk[57];
s2 = (Te2_r(t2 >> 24U) & m8_24) ^ (Te3_r((t3 >> 16U) & m8) & m8_16) ^ (Te0[(t0 >> 8U) & m8] & m8_8) ^ (Te1_r(t1 & m8) & m8) ^ rk[58];
s3 = (Te2_r(t3 >> 24U) & m8_24) ^ (Te3_r((t0 >> 16U) & m8) & m8_16) ^ (Te0[(t1 >> 8U) & m8] & m8_8) ^ (Te1_r(t2 & m8) & m8) ^ rk[59];
Utils::storeBigEndian<uint32_t>(out, s0);
Utils::storeBigEndian<uint32_t>(out + 4, s1);
Utils::storeBigEndian<uint32_t>(out + 8, s2);
Utils::storeBigEndian<uint32_t>(out + 12, s3);
}
void AES::p_decryptSW(const uint8_t *in, uint8_t *out) const noexcept
{
const uint32_t *restrict rk = p_k.sw.dk;
const uint32_t m8 = 0x000000ff;
uint32_t s0 = Utils::loadBigEndian<uint32_t>(in) ^ rk[0];
uint32_t s1 = Utils::loadBigEndian<uint32_t>(in + 4) ^ rk[1];
uint32_t s2 = Utils::loadBigEndian<uint32_t>(in + 8) ^ rk[2];
uint32_t s3 = Utils::loadBigEndian<uint32_t>(in + 12) ^ rk[3];
uint32_t t0, t1, t2, t3;
t0 = Td0[s0 >> 24U] ^ Td1_r((s3 >> 16U) & m8) ^ Td2_r((s2 >> 8U) & m8) ^ Td3_r(s1 & m8) ^ rk[4];
t1 = Td0[s1 >> 24U] ^ Td1_r((s0 >> 16U) & m8) ^ Td2_r((s3 >> 8U) & m8) ^ Td3_r(s2 & m8) ^ rk[5];
t2 = Td0[s2 >> 24U] ^ Td1_r((s1 >> 16U) & m8) ^ Td2_r((s0 >> 8U) & m8) ^ Td3_r(s3 & m8) ^ rk[6];
t3 = Td0[s3 >> 24U] ^ Td1_r((s2 >> 16U) & m8) ^ Td2_r((s1 >> 8U) & m8) ^ Td3_r(s0 & m8) ^ rk[7];
s0 = Td0[t0 >> 24U] ^ Td1_r((t3 >> 16U) & m8) ^ Td2_r((t2 >> 8U) & m8) ^ Td3_r(t1 & m8) ^ rk[8];
s1 = Td0[t1 >> 24U] ^ Td1_r((t0 >> 16U) & m8) ^ Td2_r((t3 >> 8U) & m8) ^ Td3_r(t2 & m8) ^ rk[9];
s2 = Td0[t2 >> 24U] ^ Td1_r((t1 >> 16U) & m8) ^ Td2_r((t0 >> 8U) & m8) ^ Td3_r(t3 & m8) ^ rk[10];
s3 = Td0[t3 >> 24U] ^ Td1_r((t2 >> 16U) & m8) ^ Td2_r((t1 >> 8U) & m8) ^ Td3_r(t0 & m8) ^ rk[11];
t0 = Td0[s0 >> 24U] ^ Td1_r((s3 >> 16U) & m8) ^ Td2_r((s2 >> 8U) & m8) ^ Td3_r(s1 & m8) ^ rk[12];
t1 = Td0[s1 >> 24U] ^ Td1_r((s0 >> 16U) & m8) ^ Td2_r((s3 >> 8U) & m8) ^ Td3_r(s2 & m8) ^ rk[13];
t2 = Td0[s2 >> 24U] ^ Td1_r((s1 >> 16U) & m8) ^ Td2_r((s0 >> 8U) & m8) ^ Td3_r(s3 & m8) ^ rk[14];
t3 = Td0[s3 >> 24U] ^ Td1_r((s2 >> 16U) & m8) ^ Td2_r((s1 >> 8U) & m8) ^ Td3_r(s0 & m8) ^ rk[15];
s0 = Td0[t0 >> 24U] ^ Td1_r((t3 >> 16U) & m8) ^ Td2_r((t2 >> 8U) & m8) ^ Td3_r(t1 & m8) ^ rk[16];
s1 = Td0[t1 >> 24U] ^ Td1_r((t0 >> 16U) & m8) ^ Td2_r((t3 >> 8U) & m8) ^ Td3_r(t2 & m8) ^ rk[17];
s2 = Td0[t2 >> 24U] ^ Td1_r((t1 >> 16U) & m8) ^ Td2_r((t0 >> 8U) & m8) ^ Td3_r(t3 & m8) ^ rk[18];
s3 = Td0[t3 >> 24U] ^ Td1_r((t2 >> 16U) & m8) ^ Td2_r((t1 >> 8U) & m8) ^ Td3_r(t0 & m8) ^ rk[19];
t0 = Td0[s0 >> 24U] ^ Td1_r((s3 >> 16U) & m8) ^ Td2_r((s2 >> 8U) & m8) ^ Td3_r(s1 & m8) ^ rk[20];
t1 = Td0[s1 >> 24U] ^ Td1_r((s0 >> 16U) & m8) ^ Td2_r((s3 >> 8U) & m8) ^ Td3_r(s2 & m8) ^ rk[21];
t2 = Td0[s2 >> 24U] ^ Td1_r((s1 >> 16U) & m8) ^ Td2_r((s0 >> 8U) & m8) ^ Td3_r(s3 & m8) ^ rk[22];
t3 = Td0[s3 >> 24U] ^ Td1_r((s2 >> 16U) & m8) ^ Td2_r((s1 >> 8U) & m8) ^ Td3_r(s0 & m8) ^ rk[23];
s0 = Td0[t0 >> 24U] ^ Td1_r((t3 >> 16U) & m8) ^ Td2_r((t2 >> 8U) & m8) ^ Td3_r(t1 & m8) ^ rk[24];
s1 = Td0[t1 >> 24U] ^ Td1_r((t0 >> 16U) & m8) ^ Td2_r((t3 >> 8U) & m8) ^ Td3_r(t2 & m8) ^ rk[25];
s2 = Td0[t2 >> 24U] ^ Td1_r((t1 >> 16U) & m8) ^ Td2_r((t0 >> 8U) & m8) ^ Td3_r(t3 & m8) ^ rk[26];
s3 = Td0[t3 >> 24U] ^ Td1_r((t2 >> 16U) & m8) ^ Td2_r((t1 >> 8U) & m8) ^ Td3_r(t0 & m8) ^ rk[27];
t0 = Td0[s0 >> 24U] ^ Td1_r((s3 >> 16U) & m8) ^ Td2_r((s2 >> 8U) & m8) ^ Td3_r(s1 & m8) ^ rk[28];
t1 = Td0[s1 >> 24U] ^ Td1_r((s0 >> 16U) & m8) ^ Td2_r((s3 >> 8U) & m8) ^ Td3_r(s2 & m8) ^ rk[29];
t2 = Td0[s2 >> 24U] ^ Td1_r((s1 >> 16U) & m8) ^ Td2_r((s0 >> 8U) & m8) ^ Td3_r(s3 & m8) ^ rk[30];
t3 = Td0[s3 >> 24U] ^ Td1_r((s2 >> 16U) & m8) ^ Td2_r((s1 >> 8U) & m8) ^ Td3_r(s0 & m8) ^ rk[31];
s0 = Td0[t0 >> 24U] ^ Td1_r((t3 >> 16U) & m8) ^ Td2_r((t2 >> 8U) & m8) ^ Td3_r(t1 & m8) ^ rk[32];
s1 = Td0[t1 >> 24U] ^ Td1_r((t0 >> 16U) & m8) ^ Td2_r((t3 >> 8U) & m8) ^ Td3_r(t2 & m8) ^ rk[33];
s2 = Td0[t2 >> 24U] ^ Td1_r((t1 >> 16U) & m8) ^ Td2_r((t0 >> 8U) & m8) ^ Td3_r(t3 & m8) ^ rk[34];
s3 = Td0[t3 >> 24U] ^ Td1_r((t2 >> 16U) & m8) ^ Td2_r((t1 >> 8U) & m8) ^ Td3_r(t0 & m8) ^ rk[35];
t0 = Td0[s0 >> 24U] ^ Td1_r((s3 >> 16U) & m8) ^ Td2_r((s2 >> 8U) & m8) ^ Td3_r(s1 & m8) ^ rk[36];
t1 = Td0[s1 >> 24U] ^ Td1_r((s0 >> 16U) & m8) ^ Td2_r((s3 >> 8U) & m8) ^ Td3_r(s2 & m8) ^ rk[37];
t2 = Td0[s2 >> 24U] ^ Td1_r((s1 >> 16U) & m8) ^ Td2_r((s0 >> 8U) & m8) ^ Td3_r(s3 & m8) ^ rk[38];
t3 = Td0[s3 >> 24U] ^ Td1_r((s2 >> 16U) & m8) ^ Td2_r((s1 >> 8U) & m8) ^ Td3_r(s0 & m8) ^ rk[39];
s0 = Td0[t0 >> 24U] ^ Td1_r((t3 >> 16U) & m8) ^ Td2_r((t2 >> 8U) & m8) ^ Td3_r(t1 & m8) ^ rk[40];
s1 = Td0[t1 >> 24U] ^ Td1_r((t0 >> 16U) & m8) ^ Td2_r((t3 >> 8U) & m8) ^ Td3_r(t2 & m8) ^ rk[41];
s2 = Td0[t2 >> 24U] ^ Td1_r((t1 >> 16U) & m8) ^ Td2_r((t0 >> 8U) & m8) ^ Td3_r(t3 & m8) ^ rk[42];
s3 = Td0[t3 >> 24U] ^ Td1_r((t2 >> 16U) & m8) ^ Td2_r((t1 >> 8U) & m8) ^ Td3_r(t0 & m8) ^ rk[43];
t0 = Td0[s0 >> 24U] ^ Td1_r((s3 >> 16U) & m8) ^ Td2_r((s2 >> 8U) & m8) ^ Td3_r(s1 & m8) ^ rk[44];
t1 = Td0[s1 >> 24U] ^ Td1_r((s0 >> 16U) & m8) ^ Td2_r((s3 >> 8U) & m8) ^ Td3_r(s2 & m8) ^ rk[45];
t2 = Td0[s2 >> 24U] ^ Td1_r((s1 >> 16U) & m8) ^ Td2_r((s0 >> 8U) & m8) ^ Td3_r(s3 & m8) ^ rk[46];
t3 = Td0[s3 >> 24U] ^ Td1_r((s2 >> 16U) & m8) ^ Td2_r((s1 >> 8U) & m8) ^ Td3_r(s0 & m8) ^ rk[47];
s0 = Td0[t0 >> 24U] ^ Td1_r((t3 >> 16U) & m8) ^ Td2_r((t2 >> 8U) & m8) ^ Td3_r(t1 & m8) ^ rk[48];
s1 = Td0[t1 >> 24U] ^ Td1_r((t0 >> 16U) & m8) ^ Td2_r((t3 >> 8U) & m8) ^ Td3_r(t2 & m8) ^ rk[49];
s2 = Td0[t2 >> 24U] ^ Td1_r((t1 >> 16U) & m8) ^ Td2_r((t0 >> 8U) & m8) ^ Td3_r(t3 & m8) ^ rk[50];
s3 = Td0[t3 >> 24U] ^ Td1_r((t2 >> 16U) & m8) ^ Td2_r((t1 >> 8U) & m8) ^ Td3_r(t0 & m8) ^ rk[51];
t0 = Td0[s0 >> 24U] ^ Td1_r((s3 >> 16U) & m8) ^ Td2_r((s2 >> 8U) & m8) ^ Td3_r(s1 & m8) ^ rk[52];
t1 = Td0[s1 >> 24U] ^ Td1_r((s0 >> 16U) & m8) ^ Td2_r((s3 >> 8U) & m8) ^ Td3_r(s2 & m8) ^ rk[53];
t2 = Td0[s2 >> 24U] ^ Td1_r((s1 >> 16U) & m8) ^ Td2_r((s0 >> 8U) & m8) ^ Td3_r(s3 & m8) ^ rk[54];
t3 = Td0[s3 >> 24U] ^ Td1_r((s2 >> 16U) & m8) ^ Td2_r((s1 >> 8U) & m8) ^ Td3_r(s0 & m8) ^ rk[55];
s0 = (Td4[t0 >> 24U] << 24U) ^ (Td4[(t3 >> 16U) & m8] << 16U) ^ (Td4[(t2 >> 8U) & m8] << 8U) ^ (Td4[(t1)&m8]) ^ rk[56];
s1 = (Td4[t1 >> 24U] << 24U) ^ (Td4[(t0 >> 16U) & m8] << 16U) ^ (Td4[(t3 >> 8U) & m8] << 8U) ^ (Td4[(t2)&m8]) ^ rk[57];
s2 = (Td4[t2 >> 24U] << 24U) ^ (Td4[(t1 >> 16U) & m8] << 16U) ^ (Td4[(t0 >> 8U) & m8] << 8U) ^ (Td4[(t3)&m8]) ^ rk[58];
s3 = (Td4[t3 >> 24U] << 24U) ^ (Td4[(t2 >> 16U) & m8] << 16U) ^ (Td4[(t1 >> 8U) & m8] << 8U) ^ (Td4[(t0)&m8]) ^ rk[59];
Utils::storeBigEndian<uint32_t>(out, s0);
Utils::storeBigEndian<uint32_t>(out + 4, s1);
Utils::storeBigEndian<uint32_t>(out + 8, s2);
Utils::storeBigEndian<uint32_t>(out + 12, s3);
}
} // namespace ZeroTier

View file

@ -1,580 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_AES_HPP
#define ZT_AES_HPP
#include "Constants.hpp"
#include "SHA512.hpp"
#include "Utils.hpp"
// Uncomment to disable all hardware acceleration (usually for testing)
//#define ZT_AES_NO_ACCEL
#if !defined(ZT_AES_NO_ACCEL) && defined(ZT_ARCH_X64)
#define ZT_AES_AESNI 1
#endif
#if !defined(ZT_AES_NO_ACCEL) && defined(ZT_ARCH_ARM_HAS_NEON)
#define ZT_AES_NEON 1
#endif
#ifndef ZT_INLINE
#define ZT_INLINE inline
#endif
namespace ZeroTier {
/**
* AES-256 and pals including GMAC, CTR, etc.
*
* This includes hardware acceleration for certain processors. The software
* mode is fallback and is significantly slower.
*/
class AES {
public:
/**
* @return True if this system has hardware AES acceleration
*/
static ZT_INLINE bool accelerated()
{
#ifndef ZT_AES_NO_ACCEL
#ifdef ZT_AES_AESNI
#define ZT_HAVE_HW_AES_IMPL 1
return Utils::CPUID.aes;
#endif
#ifdef ZT_AES_NEON
#define ZT_HAVE_HW_AES_IMPL 1
return Utils::ARMCAP.aes;
#endif
#ifndef ZT_HAVE_HW_AES_IMPL
return false;
#endif
#else
return false;
#endif
}
/**
* Create an un-initialized AES instance (must call init() before use)
*/
ZT_INLINE AES() noexcept {}
/**
* Create an AES instance with the given key
*
* @param key 256-bit key
*/
explicit ZT_INLINE AES(const void *const key) noexcept { this->init(key); }
ZT_INLINE ~AES() { Utils::burn(&p_k, sizeof(p_k)); }
/**
* Set (or re-set) this AES256 cipher's key
*
* @param key 256-bit / 32-byte key
*/
ZT_INLINE void init(const void *const key) noexcept
{
#ifdef ZT_AES_AESNI
if (likely(Utils::CPUID.aes)) {
p_init_aesni(reinterpret_cast<const uint8_t *>(key));
return;
}
#endif
#ifdef ZT_AES_NEON
if (Utils::ARMCAP.aes) {
p_init_armneon_crypto(reinterpret_cast<const uint8_t *>(key));
return;
}
#endif
p_initSW(reinterpret_cast<const uint8_t *>(key));
}
/**
* Encrypt a single AES block
*
* @param in Input block
* @param out Output block (can be same as input)
*/
ZT_INLINE void encrypt(const void *const in, void *const out) const noexcept
{
#ifdef ZT_AES_AESNI
if (likely(Utils::CPUID.aes)) {
p_encrypt_aesni(in, out);
return;
}
#endif
#ifdef ZT_AES_NEON
if (Utils::ARMCAP.aes) {
p_encrypt_armneon_crypto(in, out);
return;
}
#endif
p_encryptSW(reinterpret_cast<const uint8_t *>(in), reinterpret_cast<uint8_t *>(out));
}
/**
* Decrypt a single AES block
*
* @param in Input block
* @param out Output block (can be same as input)
*/
ZT_INLINE void decrypt(const void *const in, void *const out) const noexcept
{
#ifdef ZT_AES_AESNI
if (likely(Utils::CPUID.aes)) {
p_decrypt_aesni(in, out);
return;
}
#endif
#ifdef ZT_AES_NEON
if (Utils::ARMCAP.aes) {
p_decrypt_armneon_crypto(in, out);
return;
}
#endif
p_decryptSW(reinterpret_cast<const uint8_t *>(in), reinterpret_cast<uint8_t *>(out));
}
class GMACSIVEncryptor;
class GMACSIVDecryptor;
/**
* Streaming GMAC calculator
*/
class GMAC {
friend class GMACSIVEncryptor;
friend class GMACSIVDecryptor;
public:
/**
* @return True if this system has hardware GMAC acceleration
*/
static ZT_INLINE bool accelerated()
{
#ifdef ZT_AES_AESNI
return Utils::CPUID.aes;
#else
#ifdef ZT_AES_NEON
return Utils::ARMCAP.pmull;
#else
return false;
#endif
#endif
}
/**
* Create a new instance of GMAC (must be initialized with init() before use)
*
* @param aes Keyed AES instance to use
*/
ZT_INLINE GMAC(const AES &aes) : _aes(aes) {}
/**
* Reset and initialize for a new GMAC calculation
*
* @param iv 96-bit initialization vector (pad with zeroes if actual IV is shorter)
*/
ZT_INLINE void init(const uint8_t iv[12]) noexcept
{
_rp = 0;
_len = 0;
// We fill the least significant 32 bits in the _iv field with 1 since in GCM mode
// this would hold the counter, but we're not doing GCM just GMAC. That means the
// counter always stays just 1.
#ifdef ZT_AES_AESNI // also implies an x64 processor
*reinterpret_cast<uint64_t *>(_iv) = *reinterpret_cast<const uint64_t *>(iv);
*reinterpret_cast<uint32_t *>(_iv + 8) = *reinterpret_cast<const uint64_t *>(iv + 8);
*reinterpret_cast<uint32_t *>(_iv + 12) = 0x01000000; // 0x00000001 in big-endian byte order
#else
Utils::copy<12>(_iv, iv);
_iv[12] = 0;
_iv[13] = 0;
_iv[14] = 0;
_iv[15] = 1;
#endif
_y[0] = 0;
_y[1] = 0;
}
/**
* Process data through GMAC
*
* @param data Bytes to process
* @param len Length of input
*/
void update(const void *data, unsigned int len) noexcept;
/**
* Process any remaining cached bytes and generate tag
*
* Don't call finish() more than once or you'll get an invalid result.
*
* @param tag 128-bit GMAC tag (can be truncated)
*/
void finish(uint8_t tag[16]) noexcept;
private:
#ifdef ZT_AES_AESNI
void p_aesNIUpdate(const uint8_t *in, unsigned int len) noexcept;
void p_aesNIFinish(uint8_t tag[16]) noexcept;
#endif
#ifdef ZT_AES_NEON
void p_armUpdate(const uint8_t *in, unsigned int len) noexcept;
void p_armFinish(uint8_t tag[16]) noexcept;
#endif
const AES &_aes;
unsigned int _rp;
unsigned int _len;
uint8_t _r[16]; // remainder
uint8_t _iv[16];
uint64_t _y[2];
};
/**
* Streaming AES-CTR encrypt/decrypt
*
* NOTE: this doesn't support overflow of the counter in the least significant 32 bits.
* We will never encrypt more than a tiny fraction of 2^32 blocks, so this is left out as
* an optimization.
*/
class CTR {
friend class GMACSIVEncryptor;
friend class GMACSIVDecryptor;
public:
ZT_INLINE CTR(const AES &aes) noexcept : _aes(aes) {}
/**
* Initialize this CTR instance to encrypt a new stream
*
* @param iv Unique initialization vector and initial 32-bit counter (least significant 32 bits, big-endian)
* @param output Buffer to which to store output (MUST be large enough for total bytes processed!)
*/
ZT_INLINE void init(const uint8_t iv[16], void *const output) noexcept
{
Utils::copy<16>(_ctr, iv);
_out = reinterpret_cast<uint8_t *>(output);
_len = 0;
}
/**
* Initialize this CTR instance to encrypt a new stream
*
* @param iv Unique initialization vector
* @param ic Initial counter (must be in big-endian byte order!)
* @param output Buffer to which to store output (MUST be large enough for total bytes processed!)
*/
ZT_INLINE void init(const uint8_t iv[12], const uint32_t ic, void *const output) noexcept
{
Utils::copy<12>(_ctr, iv);
reinterpret_cast<uint32_t *>(_ctr)[3] = ic;
_out = reinterpret_cast<uint8_t *>(output);
_len = 0;
}
/**
* Encrypt or decrypt data, writing result to the output provided to init()
*
* @param input Input data
* @param len Length of input
*/
void crypt(const void *input, unsigned int len) noexcept;
/**
* Finish any remaining bytes if total bytes processed wasn't a multiple of 16
*
* Don't call more than once for a given stream or data may be corrupted.
*/
void finish() noexcept;
private:
#ifdef ZT_AES_AESNI
void p_aesNICrypt(const uint8_t *in, uint8_t *out, unsigned int len) noexcept;
#endif
#ifdef ZT_AES_NEON
void p_armCrypt(const uint8_t *in, uint8_t *out, unsigned int len) noexcept;
#endif
const AES &_aes;
uint64_t _ctr[2];
uint8_t *_out;
unsigned int _len;
};
/**
* Encryptor for AES-GMAC-SIV.
*
* Encryption requires two passes. The first pass starts after init
* with aad (if any) followed by update1() and finish1(). Then the
* update2() and finish2() methods must be used over the same data
* (but NOT AAD) again.
*
* This supports encryption of a maximum of 2^31 bytes of data per
* call to init().
*/
class GMACSIVEncryptor {
public:
/**
* Create a new AES-GMAC-SIV encryptor keyed with the provided AES instances
*
* @param k0 First of two AES instances keyed with K0
* @param k1 Second of two AES instances keyed with K1
*/
ZT_INLINE GMACSIVEncryptor(const AES &k0, const AES &k1) noexcept : _gmac(k0), _ctr(k1) {}
/**
* Initialize AES-GMAC-SIV
*
* @param iv IV in network byte order (byte order in which it will appear on the wire)
* @param output Pointer to buffer to receive ciphertext, must be large enough for all to-be-processed data!
*/
ZT_INLINE void init(const uint64_t iv, void *const output) noexcept
{
// Output buffer to receive the result of AES-CTR encryption.
_output = output;
// Initialize GMAC with 64-bit IV (and remaining 32 bits padded to zero).
_tag[0] = iv;
_tag[1] = 0;
_gmac.init(reinterpret_cast<const uint8_t *>(_tag));
}
/**
* Process AAD (additional authenticated data) that is not being encrypted.
*
* This MUST be called before update1() and finish1() if there is AAD to
* be included. This also MUST NOT be called more than once as the current
* code only supports one chunk of AAD.
*
* @param aad Additional authenticated data
* @param len Length of AAD in bytes
*/
ZT_INLINE void aad(const void *const aad, unsigned int len) noexcept
{
// Feed ADD into GMAC first
_gmac.update(aad, len);
// End of AAD is padded to a multiple of 16 bytes to ensure unique encoding.
len &= 0xfU;
if (len != 0)
_gmac.update(Utils::ZERO256, 16U - len);
}
/**
* First pass plaintext input function
*
* @param input Plaintext chunk
* @param len Length of plaintext chunk
*/
ZT_INLINE void update1(const void *const input, const unsigned int len) noexcept { _gmac.update(input, len); }
/**
* Finish first pass, compute CTR IV, initialize second pass.
*/
ZT_INLINE void finish1() noexcept
{
// Compute 128-bit GMAC tag.
uint64_t tmp[2];
_gmac.finish(reinterpret_cast<uint8_t *>(tmp));
// Shorten to 64 bits, concatenate with message IV, and encrypt with AES to
// yield the CTR IV and opaque IV/MAC blob. In ZeroTier's use of GMAC-SIV
// this get split into the packet ID (64 bits) and the MAC (64 bits) in each
// packet and then recombined on receipt for legacy reasons (but with no
// cryptographic or performance impact).
_tag[1] = tmp[0] ^ tmp[1]; // NOTE: _tag[0] already contains message IV, see init()
_ctr._aes.encrypt(_tag, _tag);
// Initialize CTR with 96-bit CTR nonce and 32-bit counter. The counter
// incorporates 31 more bits of entropy which should raise our security margin
// a bit, but this is not included in the worst case analysis of GMAC-SIV.
// The most significant bit of the counter is masked to zero to allow up to
// 2^31 bytes to be encrypted before the counter loops. Some CTR implementations
// increment the whole big-endian 128-bit integer in which case this could be
// used for more than 2^31 bytes, but ours does not for performance reasons
// and so 2^31 should be considered the input limit.
tmp[0] = _tag[0];
tmp[1] = _tag[1] & ZT_CONST_TO_BE_UINT64(0xffffffff7fffffffULL);
_ctr.init(reinterpret_cast<const uint8_t *>(tmp), _output);
}
/**
* Second pass plaintext input function
*
* The same plaintext must be fed in the second time. Chunk boundaries
* (between calls to update2()) do not have to be the same, just the order
* of the bytes.
*
* @param input Plaintext chunk
* @param len Length of plaintext chunk
*/
ZT_INLINE void update2(const void *const input, const unsigned int len) noexcept { _ctr.crypt(input, len); }
/**
* Finish second pass and return a pointer to the opaque 128-bit IV+MAC block
*
* The returned pointer remains valid as long as this object exists and init()
* is not called again.
*
* @return Pointer to 128-bit opaque IV+MAC (packed into two 64-bit integers)
*/
ZT_INLINE const uint64_t *finish2()
{
_ctr.finish();
return _tag;
}
private:
void *_output;
uint64_t _tag[2];
AES::GMAC _gmac;
AES::CTR _ctr;
};
/**
* Decryptor for AES-GMAC-SIV.
*
* GMAC-SIV decryption is single-pass. AAD (if any) must be processed first.
*/
class GMACSIVDecryptor {
public:
ZT_INLINE GMACSIVDecryptor(const AES &k0, const AES &k1) noexcept : _ctr(k1), _gmac(k0) {}
/**
* Initialize decryptor for a new message
*
* @param tag 128-bit combined IV/MAC originally created by GMAC-SIV encryption
* @param output Buffer in which to write output plaintext (must be large enough!)
*/
ZT_INLINE void init(const uint64_t tag[2], void *const output) noexcept
{
uint64_t tmp[2];
tmp[0] = tag[0];
tmp[1] = tag[1] & ZT_CONST_TO_BE_UINT64(0xffffffff7fffffffULL);
_ctr.init(reinterpret_cast<const uint8_t *>(tmp), output);
_ctr._aes.decrypt(tag, _ivMac);
tmp[0] = _ivMac[0];
tmp[1] = 0;
_gmac.init(reinterpret_cast<const uint8_t *>(tmp));
_output = output;
_decryptedLen = 0;
}
/**
* Process AAD (additional authenticated data) that wasn't encrypted
*
* @param aad Additional authenticated data
* @param len Length of AAD in bytes
*/
ZT_INLINE void aad(const void *const aad, unsigned int len) noexcept
{
_gmac.update(aad, len);
len &= 0xfU;
if (len != 0)
_gmac.update(Utils::ZERO256, 16 - len);
}
/**
* Feed ciphertext into the decryptor
*
* Unlike encryption, GMAC-SIV decryption requires only one pass.
*
* @param input Input ciphertext
* @param len Length of ciphertext
*/
ZT_INLINE void update(const void *const input, const unsigned int len) noexcept
{
_ctr.crypt(input, len);
_decryptedLen += len;
}
/**
* Flush decryption, compute MAC, and verify
*
* @return True if resulting plaintext (and AAD) pass message authentication check
*/
ZT_INLINE bool finish() noexcept
{
_ctr.finish();
uint64_t gmacTag[2];
_gmac.update(_output, _decryptedLen);
_gmac.finish(reinterpret_cast<uint8_t *>(gmacTag));
return (gmacTag[0] ^ gmacTag[1]) == _ivMac[1];
}
private:
uint64_t _ivMac[2];
AES::CTR _ctr;
AES::GMAC _gmac;
void *_output;
unsigned int _decryptedLen;
};
private:
static const uint32_t Te0[256];
static const uint32_t Te4[256];
static const uint32_t Td0[256];
static const uint8_t Td4[256];
static const uint32_t rcon[15];
void p_initSW(const uint8_t *key) noexcept;
void p_encryptSW(const uint8_t *in, uint8_t *out) const noexcept;
void p_decryptSW(const uint8_t *in, uint8_t *out) const noexcept;
union {
#ifdef ZT_AES_AESNI
struct {
__m128i k[28];
__m128i h[4]; // h, hh, hhh, hhhh
__m128i h2[4]; // _mm_xor_si128(_mm_shuffle_epi32(h, 78), h), etc.
} ni;
#endif
#ifdef ZT_AES_NEON
struct {
uint64_t hsw[2]; // in case it has AES but not PMULL, not sure if that ever happens
uint8x16_t ek[15];
uint8x16_t dk[15];
uint8x16_t h;
} neon;
#endif
struct {
uint64_t h[2];
uint32_t ek[60];
uint32_t dk[60];
} sw;
} p_k;
#ifdef ZT_AES_AESNI
void p_init_aesni(const uint8_t *key) noexcept;
void p_encrypt_aesni(const void *in, void *out) const noexcept;
void p_decrypt_aesni(const void *in, void *out) const noexcept;
#endif
#ifdef ZT_AES_NEON
void p_init_armneon_crypto(const uint8_t *key) noexcept;
void p_encrypt_armneon_crypto(const void *in, void *out) const noexcept;
void p_decrypt_armneon_crypto(const void *in, void *out) const noexcept;
#endif
};
} // namespace ZeroTier
#endif

View file

@ -1,689 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
// AES for X64 AES-NI extensions (no 32-bit X86 support). Supports AVX2 and
// AVX512 VAES for performance in excess of 10GiB/sec/core on newer chips.
#include "AES.hpp"
#include "Constants.hpp"
#ifdef ZT_AES_AESNI
#ifdef __GNUC__
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#endif
namespace ZeroTier {
namespace {
const __m128i s_sseSwapBytes = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
#ifdef __GNUC__
__attribute__((__target__("sse,sse2,ssse3,aes,pclmul")))
#endif
__m128i
p_gmacPCLMUL128(const __m128i h, __m128i y) noexcept
{
y = _mm_shuffle_epi8(y, s_sseSwapBytes);
__m128i t1 = _mm_clmulepi64_si128(h, y, 0x00);
__m128i t2 = _mm_clmulepi64_si128(h, y, 0x01);
__m128i t3 = _mm_clmulepi64_si128(h, y, 0x10);
__m128i t4 = _mm_clmulepi64_si128(h, y, 0x11);
t2 = _mm_xor_si128(t2, t3);
t3 = _mm_slli_si128(t2, 8);
t2 = _mm_srli_si128(t2, 8);
t1 = _mm_xor_si128(t1, t3);
t4 = _mm_xor_si128(t4, t2);
__m128i t5 = _mm_srli_epi32(t1, 31);
t1 = _mm_or_si128(_mm_slli_epi32(t1, 1), _mm_slli_si128(t5, 4));
t4 = _mm_or_si128(_mm_or_si128(_mm_slli_epi32(t4, 1), _mm_slli_si128(_mm_srli_epi32(t4, 31), 4)), _mm_srli_si128(t5, 12));
t5 = _mm_xor_si128(_mm_xor_si128(_mm_slli_epi32(t1, 31), _mm_slli_epi32(t1, 30)), _mm_slli_epi32(t1, 25));
t1 = _mm_xor_si128(t1, _mm_slli_si128(t5, 12));
t4 = _mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(_mm_xor_si128(t4, _mm_srli_si128(t5, 4)), t1), _mm_srli_epi32(t1, 2)), _mm_srli_epi32(t1, 7)), _mm_srli_epi32(t1, 1));
return _mm_shuffle_epi8(t4, s_sseSwapBytes);
}
/* Disable VAES stuff on compilers too old to compile these intrinsics,
* and MinGW64 also seems not to support them so disable on Windows.
* The performance gain can be significant but regular SSE is already so
* fast it's highly unlikely to be a rate limiting factor except on massive
* servers and network infrastructure stuff. */
#if !defined(__WINDOWS__) && ((__GNUC__ >= 8) || (__clang_major__ >= 7))
#define ZT_AES_VAES512 1
#ifdef __GNUC__
__attribute__((__target__("sse,sse2,sse3,ssse3,sse4,sse4.1,sse4.2,aes,avx,avx2,vaes,avx512f,avx512bw")))
#endif
void p_aesCtrInnerVAES512(unsigned int &len, const uint64_t c0, uint64_t &_c1, const uint8_t *&_in, uint8_t *&_out, const __m128i *const k) noexcept
{
uint64_t c1 = _c1;
const uint8_t *in = _in;
uint8_t *out = _out;
const __m512i kk0 = _mm512_broadcast_i32x4(k[0]);
const __m512i kk1 = _mm512_broadcast_i32x4(k[1]);
const __m512i kk2 = _mm512_broadcast_i32x4(k[2]);
const __m512i kk3 = _mm512_broadcast_i32x4(k[3]);
const __m512i kk4 = _mm512_broadcast_i32x4(k[4]);
const __m512i kk5 = _mm512_broadcast_i32x4(k[5]);
const __m512i kk6 = _mm512_broadcast_i32x4(k[6]);
const __m512i kk7 = _mm512_broadcast_i32x4(k[7]);
const __m512i kk8 = _mm512_broadcast_i32x4(k[8]);
const __m512i kk9 = _mm512_broadcast_i32x4(k[9]);
const __m512i kk10 = _mm512_broadcast_i32x4(k[10]);
const __m512i kk11 = _mm512_broadcast_i32x4(k[11]);
const __m512i kk12 = _mm512_broadcast_i32x4(k[12]);
const __m512i kk13 = _mm512_broadcast_i32x4(k[13]);
const __m512i kk14 = _mm512_broadcast_i32x4(k[14]);
_mm_prefetch(in, _MM_HINT_T0);
for (unsigned int i = 0, c = (len >> 6U); i < c; ++i) {
__m512i d0 = _mm512_set_epi64((long long)Utils::hton(c1 + 3ULL), (long long)c0, (long long)Utils::hton(c1 + 2ULL), (long long)c0, (long long)Utils::hton(c1 + 1ULL), (long long)c0, (long long)Utils::hton(c1), (long long)c0);
c1 += 4;
d0 = _mm512_xor_si512(d0, kk0);
d0 = _mm512_aesenc_epi128(d0, kk1);
d0 = _mm512_aesenc_epi128(d0, kk2);
d0 = _mm512_aesenc_epi128(d0, kk3);
d0 = _mm512_aesenc_epi128(d0, kk4);
d0 = _mm512_aesenc_epi128(d0, kk5);
d0 = _mm512_aesenc_epi128(d0, kk6);
d0 = _mm512_aesenc_epi128(d0, kk7);
d0 = _mm512_aesenc_epi128(d0, kk8);
d0 = _mm512_aesenc_epi128(d0, kk9);
d0 = _mm512_aesenc_epi128(d0, kk10);
d0 = _mm512_aesenc_epi128(d0, kk11);
d0 = _mm512_aesenc_epi128(d0, kk12);
d0 = _mm512_aesenc_epi128(d0, kk13);
d0 = _mm512_aesenclast_epi128(d0, kk14);
_mm512_storeu_si512(reinterpret_cast<__m512i *>(out), _mm512_xor_si512(_mm512_loadu_si512(reinterpret_cast<const __m512i *>(in)), d0));
in += 64;
out += 64;
_mm_prefetch(in, _MM_HINT_T0);
}
_c1 = c1;
_in = in;
_out = out;
len &= 63U;
}
#define ZT_AES_VAES256 1
#ifdef __GNUC__
__attribute__((__target__("sse,sse2,sse3,ssse3,sse4,sse4.1,sse4.2,aes,avx,avx2,vaes")))
#endif
void p_aesCtrInnerVAES256(unsigned int &len, const uint64_t c0, uint64_t &_c1, const uint8_t *&_in, uint8_t *&_out, const __m128i *const k) noexcept
{
uint64_t c1 = _c1;
const uint8_t *in = _in;
uint8_t *out = _out;
const __m256i kk0 = _mm256_broadcastsi128_si256(k[0]);
const __m256i kk1 = _mm256_broadcastsi128_si256(k[1]);
const __m256i kk2 = _mm256_broadcastsi128_si256(k[2]);
const __m256i kk3 = _mm256_broadcastsi128_si256(k[3]);
const __m256i kk4 = _mm256_broadcastsi128_si256(k[4]);
const __m256i kk5 = _mm256_broadcastsi128_si256(k[5]);
const __m256i kk6 = _mm256_broadcastsi128_si256(k[6]);
const __m256i kk7 = _mm256_broadcastsi128_si256(k[7]);
const __m256i kk8 = _mm256_broadcastsi128_si256(k[8]);
const __m256i kk9 = _mm256_broadcastsi128_si256(k[9]);
const __m256i kk10 = _mm256_broadcastsi128_si256(k[10]);
const __m256i kk11 = _mm256_broadcastsi128_si256(k[11]);
const __m256i kk12 = _mm256_broadcastsi128_si256(k[12]);
const __m256i kk13 = _mm256_broadcastsi128_si256(k[13]);
const __m256i kk14 = _mm256_broadcastsi128_si256(k[14]);
_mm_prefetch(in, _MM_HINT_T0);
for (unsigned int i = 0, c = (len >> 6U); i < c; ++i) {
__m256i d0 = _mm256_set_epi64x((long long)Utils::hton(c1 + 1ULL), (long long)c0, (long long)Utils::hton(c1), (long long)c0);
__m256i d1 = _mm256_set_epi64x((long long)Utils::hton(c1 + 3ULL), (long long)c0, (long long)Utils::hton(c1 + 2ULL), (long long)c0);
c1 += 4;
d0 = _mm256_xor_si256(d0, kk0);
d1 = _mm256_xor_si256(d1, kk0);
d0 = _mm256_aesenc_epi128(d0, kk1);
d1 = _mm256_aesenc_epi128(d1, kk1);
d0 = _mm256_aesenc_epi128(d0, kk2);
d1 = _mm256_aesenc_epi128(d1, kk2);
d0 = _mm256_aesenc_epi128(d0, kk3);
d1 = _mm256_aesenc_epi128(d1, kk3);
d0 = _mm256_aesenc_epi128(d0, kk4);
d1 = _mm256_aesenc_epi128(d1, kk4);
d0 = _mm256_aesenc_epi128(d0, kk5);
d1 = _mm256_aesenc_epi128(d1, kk5);
d0 = _mm256_aesenc_epi128(d0, kk6);
d1 = _mm256_aesenc_epi128(d1, kk6);
d0 = _mm256_aesenc_epi128(d0, kk7);
d1 = _mm256_aesenc_epi128(d1, kk7);
d0 = _mm256_aesenc_epi128(d0, kk8);
d1 = _mm256_aesenc_epi128(d1, kk8);
d0 = _mm256_aesenc_epi128(d0, kk9);
d1 = _mm256_aesenc_epi128(d1, kk9);
d0 = _mm256_aesenc_epi128(d0, kk10);
d1 = _mm256_aesenc_epi128(d1, kk10);
d0 = _mm256_aesenc_epi128(d0, kk11);
d1 = _mm256_aesenc_epi128(d1, kk11);
d0 = _mm256_aesenc_epi128(d0, kk12);
d1 = _mm256_aesenc_epi128(d1, kk12);
d0 = _mm256_aesenc_epi128(d0, kk13);
d1 = _mm256_aesenc_epi128(d1, kk13);
d0 = _mm256_aesenclast_epi128(d0, kk14);
d1 = _mm256_aesenclast_epi128(d1, kk14);
_mm256_storeu_si256(reinterpret_cast<__m256i *>(out), _mm256_xor_si256(d0, _mm256_loadu_si256(reinterpret_cast<const __m256i *>(in))));
_mm256_storeu_si256(reinterpret_cast<__m256i *>(out + 32), _mm256_xor_si256(d1, _mm256_loadu_si256(reinterpret_cast<const __m256i *>(in + 32))));
in += 64;
out += 64;
_mm_prefetch(in, _MM_HINT_T0);
}
_c1 = c1;
_in = in;
_out = out;
len &= 63U;
}
#endif // does compiler support AVX2 and AVX512 AES intrinsics?
#ifdef __GNUC__
__attribute__((__target__("sse,sse2")))
#endif
__m128i
p_init256_1_aesni(__m128i a, __m128i b) noexcept
{
__m128i x, y;
b = _mm_shuffle_epi32(b, 0xff);
y = _mm_slli_si128(a, 0x04);
x = _mm_xor_si128(a, y);
y = _mm_slli_si128(y, 0x04);
x = _mm_xor_si128(x, y);
y = _mm_slli_si128(y, 0x04);
x = _mm_xor_si128(x, y);
x = _mm_xor_si128(x, b);
return x;
}
#ifdef __GNUC__
__attribute__((__target__("sse,sse2,aes")))
#endif
__m128i
p_init256_2_aesni(__m128i a, __m128i b) noexcept
{
__m128i x, y, z;
y = _mm_aeskeygenassist_si128(a, 0x00);
z = _mm_shuffle_epi32(y, 0xaa);
y = _mm_slli_si128(b, 0x04);
x = _mm_xor_si128(b, y);
y = _mm_slli_si128(y, 0x04);
x = _mm_xor_si128(x, y);
y = _mm_slli_si128(y, 0x04);
x = _mm_xor_si128(x, y);
x = _mm_xor_si128(x, z);
return x;
}
} // anonymous namespace
#ifdef __GNUC__
__attribute__((__target__("sse,sse2,ssse3,pclmul,aes")))
#endif
void AES::GMAC::p_aesNIUpdate(const uint8_t *in, unsigned int len) noexcept
{
__m128i y = _mm_loadu_si128(reinterpret_cast<const __m128i *>(_y));
// Handle anything left over from a previous run that wasn't a multiple of 16 bytes.
if (_rp) {
for (;;) {
if (!len)
return;
--len;
_r[_rp++] = *(in++);
if (_rp == 16) {
y = p_gmacPCLMUL128(_aes.p_k.ni.h[0], _mm_xor_si128(y, _mm_loadu_si128(reinterpret_cast<__m128i *>(_r))));
break;
}
}
}
if (likely(len >= 64)) {
const __m128i sb = s_sseSwapBytes;
const __m128i h = _aes.p_k.ni.h[0];
const __m128i hh = _aes.p_k.ni.h[1];
const __m128i hhh = _aes.p_k.ni.h[2];
const __m128i hhhh = _aes.p_k.ni.h[3];
const __m128i h2 = _aes.p_k.ni.h2[0];
const __m128i hh2 = _aes.p_k.ni.h2[1];
const __m128i hhh2 = _aes.p_k.ni.h2[2];
const __m128i hhhh2 = _aes.p_k.ni.h2[3];
const uint8_t *const end64 = in + (len & ~((unsigned int)63));
len &= 63U;
_mm_prefetch(in, _MM_HINT_T0);
do {
__m128i d1 = _mm_shuffle_epi8(_mm_xor_si128(y, _mm_loadu_si128(reinterpret_cast<const __m128i *>(in))), sb);
__m128i d2 = _mm_shuffle_epi8(_mm_loadu_si128(reinterpret_cast<const __m128i *>(in + 16)), sb);
__m128i d3 = _mm_shuffle_epi8(_mm_loadu_si128(reinterpret_cast<const __m128i *>(in + 32)), sb);
__m128i d4 = _mm_shuffle_epi8(_mm_loadu_si128(reinterpret_cast<const __m128i *>(in + 48)), sb);
in += 64;
__m128i a = _mm_xor_si128(_mm_xor_si128(_mm_clmulepi64_si128(hhhh, d1, 0x00), _mm_clmulepi64_si128(hhh, d2, 0x00)), _mm_xor_si128(_mm_clmulepi64_si128(hh, d3, 0x00), _mm_clmulepi64_si128(h, d4, 0x00)));
__m128i b = _mm_xor_si128(_mm_xor_si128(_mm_clmulepi64_si128(hhhh, d1, 0x11), _mm_clmulepi64_si128(hhh, d2, 0x11)), _mm_xor_si128(_mm_clmulepi64_si128(hh, d3, 0x11), _mm_clmulepi64_si128(h, d4, 0x11)));
__m128i c = _mm_xor_si128(
_mm_xor_si128(
_mm_xor_si128(_mm_clmulepi64_si128(hhhh2, _mm_xor_si128(_mm_shuffle_epi32(d1, 78), d1), 0x00), _mm_clmulepi64_si128(hhh2, _mm_xor_si128(_mm_shuffle_epi32(d2, 78), d2), 0x00)),
_mm_xor_si128(_mm_clmulepi64_si128(hh2, _mm_xor_si128(_mm_shuffle_epi32(d3, 78), d3), 0x00), _mm_clmulepi64_si128(h2, _mm_xor_si128(_mm_shuffle_epi32(d4, 78), d4), 0x00))),
_mm_xor_si128(a, b));
a = _mm_xor_si128(_mm_slli_si128(c, 8), a);
b = _mm_xor_si128(_mm_srli_si128(c, 8), b);
c = _mm_srli_epi32(a, 31);
a = _mm_or_si128(_mm_slli_epi32(a, 1), _mm_slli_si128(c, 4));
b = _mm_or_si128(_mm_or_si128(_mm_slli_epi32(b, 1), _mm_slli_si128(_mm_srli_epi32(b, 31), 4)), _mm_srli_si128(c, 12));
c = _mm_xor_si128(_mm_slli_epi32(a, 31), _mm_xor_si128(_mm_slli_epi32(a, 30), _mm_slli_epi32(a, 25)));
a = _mm_xor_si128(a, _mm_slli_si128(c, 12));
b = _mm_xor_si128(b, _mm_xor_si128(a, _mm_xor_si128(_mm_xor_si128(_mm_srli_epi32(a, 1), _mm_srli_si128(c, 4)), _mm_xor_si128(_mm_srli_epi32(a, 2), _mm_srli_epi32(a, 7)))));
y = _mm_shuffle_epi8(b, sb);
_mm_prefetch(in, _MM_HINT_T0);
} while (likely(in != end64));
}
while (len >= 16) {
y = p_gmacPCLMUL128(_aes.p_k.ni.h[0], _mm_xor_si128(y, _mm_loadu_si128(reinterpret_cast<const __m128i *>(in))));
in += 16;
len -= 16;
}
_mm_storeu_si128(reinterpret_cast<__m128i *>(_y), y);
// Any overflow is cached for a later run or finish().
for (unsigned int i = 0; i < len; ++i)
_r[i] = in[i];
_rp = len; // len is always less than 16 here
}
#ifdef __GNUC__
__attribute__((__target__("sse,sse2,ssse3,pclmul,aes")))
#endif
void AES::GMAC::p_aesNIFinish(uint8_t tag[16]) noexcept
{
__m128i y = _mm_loadu_si128(reinterpret_cast<const __m128i *>(_y));
// Handle any remaining bytes, padding the last block with zeroes.
if (_rp) {
while (_rp < 16)
_r[_rp++] = 0;
y = p_gmacPCLMUL128(_aes.p_k.ni.h[0], _mm_xor_si128(y, _mm_loadu_si128(reinterpret_cast<__m128i *>(_r))));
}
// Interleave encryption of IV with the final GHASH of y XOR (length * 8).
// Then XOR these together to get the final tag.
const __m128i *const k = _aes.p_k.ni.k;
const __m128i h = _aes.p_k.ni.h[0];
y = _mm_xor_si128(y, _mm_set_epi64x(0LL, (long long)Utils::hton((uint64_t)_len << 3U)));
y = _mm_shuffle_epi8(y, s_sseSwapBytes);
__m128i encIV = _mm_xor_si128(_mm_loadu_si128(reinterpret_cast<const __m128i *>(_iv)), k[0]);
__m128i t1 = _mm_clmulepi64_si128(h, y, 0x00);
__m128i t2 = _mm_clmulepi64_si128(h, y, 0x01);
__m128i t3 = _mm_clmulepi64_si128(h, y, 0x10);
__m128i t4 = _mm_clmulepi64_si128(h, y, 0x11);
encIV = _mm_aesenc_si128(encIV, k[1]);
t2 = _mm_xor_si128(t2, t3);
t3 = _mm_slli_si128(t2, 8);
encIV = _mm_aesenc_si128(encIV, k[2]);
t2 = _mm_srli_si128(t2, 8);
t1 = _mm_xor_si128(t1, t3);
encIV = _mm_aesenc_si128(encIV, k[3]);
t4 = _mm_xor_si128(t4, t2);
__m128i t5 = _mm_srli_epi32(t1, 31);
t1 = _mm_slli_epi32(t1, 1);
__m128i t6 = _mm_srli_epi32(t4, 31);
encIV = _mm_aesenc_si128(encIV, k[4]);
t4 = _mm_slli_epi32(t4, 1);
t3 = _mm_srli_si128(t5, 12);
encIV = _mm_aesenc_si128(encIV, k[5]);
t6 = _mm_slli_si128(t6, 4);
t5 = _mm_slli_si128(t5, 4);
encIV = _mm_aesenc_si128(encIV, k[6]);
t1 = _mm_or_si128(t1, t5);
t4 = _mm_or_si128(t4, t6);
encIV = _mm_aesenc_si128(encIV, k[7]);
t4 = _mm_or_si128(t4, t3);
t5 = _mm_slli_epi32(t1, 31);
encIV = _mm_aesenc_si128(encIV, k[8]);
t6 = _mm_slli_epi32(t1, 30);
t3 = _mm_slli_epi32(t1, 25);
encIV = _mm_aesenc_si128(encIV, k[9]);
t5 = _mm_xor_si128(t5, t6);
t5 = _mm_xor_si128(t5, t3);
encIV = _mm_aesenc_si128(encIV, k[10]);
t6 = _mm_srli_si128(t5, 4);
t4 = _mm_xor_si128(t4, t6);
encIV = _mm_aesenc_si128(encIV, k[11]);
t5 = _mm_slli_si128(t5, 12);
t1 = _mm_xor_si128(t1, t5);
t4 = _mm_xor_si128(t4, t1);
t5 = _mm_srli_epi32(t1, 1);
encIV = _mm_aesenc_si128(encIV, k[12]);
t2 = _mm_srli_epi32(t1, 2);
t3 = _mm_srli_epi32(t1, 7);
encIV = _mm_aesenc_si128(encIV, k[13]);
t4 = _mm_xor_si128(t4, t2);
t4 = _mm_xor_si128(t4, t3);
encIV = _mm_aesenclast_si128(encIV, k[14]);
t4 = _mm_xor_si128(t4, t5);
_mm_storeu_si128(reinterpret_cast<__m128i *>(tag), _mm_xor_si128(_mm_shuffle_epi8(t4, s_sseSwapBytes), encIV));
}
#ifdef __GNUC__
__attribute__((__target__("sse,sse2,aes,pclmul")))
__attribute__((optimize("unroll-all-loops")))
#endif
void AES::CTR::p_aesNICrypt(const uint8_t *in, uint8_t *out, unsigned int len) noexcept
{
uint64_t c0 = _ctr[0], c1 = Utils::ntoh(_ctr[1]);
const __m128i k0 = _aes.p_k.ni.k[0];
const __m128i k1 = _aes.p_k.ni.k[1];
const __m128i k2 = _aes.p_k.ni.k[2];
const __m128i k3 = _aes.p_k.ni.k[3];
const __m128i k4 = _aes.p_k.ni.k[4];
const __m128i k5 = _aes.p_k.ni.k[5];
const __m128i k6 = _aes.p_k.ni.k[6];
const __m128i k7 = _aes.p_k.ni.k[7];
const __m128i k8 = _aes.p_k.ni.k[8];
const __m128i k9 = _aes.p_k.ni.k[9];
const __m128i k10 = _aes.p_k.ni.k[10];
const __m128i k11 = _aes.p_k.ni.k[11];
const __m128i k12 = _aes.p_k.ni.k[12];
const __m128i k13 = _aes.p_k.ni.k[13];
const __m128i k14 = _aes.p_k.ni.k[14];
// Complete any unfinished blocks from previous calls to crypt().
unsigned int totalLen = _len;
if ((totalLen & 15U)) {
for (;;) {
if (unlikely(!len)) {
_ctr[1] = Utils::hton(c1);
_len = totalLen;
return;
}
--len;
out[totalLen++] = *(in++);
if (!(totalLen & 15U)) {
__m128i d0 = _mm_set_epi64x((long long)Utils::hton(c1++), (long long)c0);
d0 = _mm_xor_si128(d0, k0);
d0 = _mm_aesenc_si128(d0, k1);
d0 = _mm_aesenc_si128(d0, k2);
d0 = _mm_aesenc_si128(d0, k3);
d0 = _mm_aesenc_si128(d0, k4);
d0 = _mm_aesenc_si128(d0, k5);
d0 = _mm_aesenc_si128(d0, k6);
d0 = _mm_aesenc_si128(d0, k7);
d0 = _mm_aesenc_si128(d0, k8);
d0 = _mm_aesenc_si128(d0, k9);
d0 = _mm_aesenc_si128(d0, k10);
__m128i *const outblk = reinterpret_cast<__m128i *>(out + (totalLen - 16));
d0 = _mm_aesenc_si128(d0, k11);
const __m128i p0 = _mm_loadu_si128(outblk);
d0 = _mm_aesenc_si128(d0, k12);
d0 = _mm_aesenc_si128(d0, k13);
d0 = _mm_aesenclast_si128(d0, k14);
_mm_storeu_si128(outblk, _mm_xor_si128(p0, d0));
break;
}
}
}
out += totalLen;
_len = totalLen + len;
if (likely(len >= 64)) {
#if defined(ZT_AES_VAES512) && defined(ZT_AES_VAES256)
if (Utils::CPUID.avx512f) {
p_aesCtrInnerVAES512(len, _ctr[0], c1, in, out, _aes.p_k.ni.k);
}
else if (Utils::CPUID.vaes) {
p_aesCtrInnerVAES256(len, _ctr[0], c1, in, out, _aes.p_k.ni.k);
}
else {
#endif
#if !defined(ZT_AES_VAES512) && defined(ZT_AES_VAES256)
if (Utils::CPUID.vaes) {
p_aesCtrInnerVAES256(len, _ctr[0], c1, in, out, k);
goto skip_conventional_aesni_64;
}
else {
#endif
const uint8_t *const eof64 = in + (len & ~((unsigned int)63));
len &= 63;
_mm_prefetch(in, _MM_HINT_T0);
do {
__m128i d0 = _mm_set_epi64x((long long)Utils::hton(c1), (long long)c0);
__m128i d1 = _mm_set_epi64x((long long)Utils::hton(c1 + 1ULL), (long long)c0);
__m128i d2 = _mm_set_epi64x((long long)Utils::hton(c1 + 2ULL), (long long)c0);
__m128i d3 = _mm_set_epi64x((long long)Utils::hton(c1 + 3ULL), (long long)c0);
c1 += 4;
d0 = _mm_xor_si128(d0, k0);
d1 = _mm_xor_si128(d1, k0);
d2 = _mm_xor_si128(d2, k0);
d3 = _mm_xor_si128(d3, k0);
d0 = _mm_aesenc_si128(d0, k1);
d1 = _mm_aesenc_si128(d1, k1);
d2 = _mm_aesenc_si128(d2, k1);
d3 = _mm_aesenc_si128(d3, k1);
d0 = _mm_aesenc_si128(d0, k2);
d1 = _mm_aesenc_si128(d1, k2);
d2 = _mm_aesenc_si128(d2, k2);
d3 = _mm_aesenc_si128(d3, k2);
d0 = _mm_aesenc_si128(d0, k3);
d1 = _mm_aesenc_si128(d1, k3);
d2 = _mm_aesenc_si128(d2, k3);
d3 = _mm_aesenc_si128(d3, k3);
d0 = _mm_aesenc_si128(d0, k4);
d1 = _mm_aesenc_si128(d1, k4);
d2 = _mm_aesenc_si128(d2, k4);
d3 = _mm_aesenc_si128(d3, k4);
d0 = _mm_aesenc_si128(d0, k5);
d1 = _mm_aesenc_si128(d1, k5);
d2 = _mm_aesenc_si128(d2, k5);
d3 = _mm_aesenc_si128(d3, k5);
d0 = _mm_aesenc_si128(d0, k6);
d1 = _mm_aesenc_si128(d1, k6);
d2 = _mm_aesenc_si128(d2, k6);
d3 = _mm_aesenc_si128(d3, k6);
d0 = _mm_aesenc_si128(d0, k7);
d1 = _mm_aesenc_si128(d1, k7);
d2 = _mm_aesenc_si128(d2, k7);
d3 = _mm_aesenc_si128(d3, k7);
d0 = _mm_aesenc_si128(d0, k8);
d1 = _mm_aesenc_si128(d1, k8);
d2 = _mm_aesenc_si128(d2, k8);
d3 = _mm_aesenc_si128(d3, k8);
d0 = _mm_aesenc_si128(d0, k9);
d1 = _mm_aesenc_si128(d1, k9);
d2 = _mm_aesenc_si128(d2, k9);
d3 = _mm_aesenc_si128(d3, k9);
d0 = _mm_aesenc_si128(d0, k10);
d1 = _mm_aesenc_si128(d1, k10);
d2 = _mm_aesenc_si128(d2, k10);
d3 = _mm_aesenc_si128(d3, k10);
d0 = _mm_aesenc_si128(d0, k11);
d1 = _mm_aesenc_si128(d1, k11);
d2 = _mm_aesenc_si128(d2, k11);
d3 = _mm_aesenc_si128(d3, k11);
d0 = _mm_aesenc_si128(d0, k12);
d1 = _mm_aesenc_si128(d1, k12);
d2 = _mm_aesenc_si128(d2, k12);
d3 = _mm_aesenc_si128(d3, k12);
d0 = _mm_aesenc_si128(d0, k13);
d1 = _mm_aesenc_si128(d1, k13);
d2 = _mm_aesenc_si128(d2, k13);
d3 = _mm_aesenc_si128(d3, k13);
d0 = _mm_xor_si128(_mm_aesenclast_si128(d0, k14), _mm_loadu_si128(reinterpret_cast<const __m128i *>(in)));
d1 = _mm_xor_si128(_mm_aesenclast_si128(d1, k14), _mm_loadu_si128(reinterpret_cast<const __m128i *>(in + 16)));
d2 = _mm_xor_si128(_mm_aesenclast_si128(d2, k14), _mm_loadu_si128(reinterpret_cast<const __m128i *>(in + 32)));
d3 = _mm_xor_si128(_mm_aesenclast_si128(d3, k14), _mm_loadu_si128(reinterpret_cast<const __m128i *>(in + 48)));
_mm_storeu_si128(reinterpret_cast<__m128i *>(out), d0);
_mm_storeu_si128(reinterpret_cast<__m128i *>(out + 16), d1);
_mm_storeu_si128(reinterpret_cast<__m128i *>(out + 32), d2);
_mm_storeu_si128(reinterpret_cast<__m128i *>(out + 48), d3);
in += 64;
out += 64;
_mm_prefetch(in, _MM_HINT_T0);
} while (likely(in != eof64));
#if defined(ZT_AES_VAES512) || defined(ZT_AES_VAES256)
}
#endif
}
while (len >= 16) {
__m128i d0 = _mm_set_epi64x((long long)Utils::hton(c1++), (long long)c0);
d0 = _mm_xor_si128(d0, k0);
d0 = _mm_aesenc_si128(d0, k1);
d0 = _mm_aesenc_si128(d0, k2);
d0 = _mm_aesenc_si128(d0, k3);
d0 = _mm_aesenc_si128(d0, k4);
d0 = _mm_aesenc_si128(d0, k5);
d0 = _mm_aesenc_si128(d0, k6);
d0 = _mm_aesenc_si128(d0, k7);
d0 = _mm_aesenc_si128(d0, k8);
d0 = _mm_aesenc_si128(d0, k9);
d0 = _mm_aesenc_si128(d0, k10);
d0 = _mm_aesenc_si128(d0, k11);
d0 = _mm_aesenc_si128(d0, k12);
d0 = _mm_aesenc_si128(d0, k13);
_mm_storeu_si128(reinterpret_cast<__m128i *>(out), _mm_xor_si128(_mm_aesenclast_si128(d0, k14), _mm_loadu_si128(reinterpret_cast<const __m128i *>(in))));
in += 16;
len -= 16;
out += 16;
}
// Any remaining input is placed in _out. This will be picked up and crypted
// on subsequent calls to crypt() or finish() as it'll mean _len will not be
// an even multiple of 16.
for (unsigned int i = 0; i < len; ++i)
out[i] = in[i];
_ctr[1] = Utils::hton(c1);
}
#ifdef __GNUC__
__attribute__((__target__("sse,sse2,ssse3,aes,pclmul")))
#endif
void
AES::p_init_aesni(const uint8_t *key) noexcept
{
__m128i t1, t2, k1, k2, k3, k4, k5, k6, k7, k8, k9, k10, k11, k12, k13;
p_k.ni.k[0] = t1 = _mm_loadu_si128((const __m128i *)key);
p_k.ni.k[1] = k1 = t2 = _mm_loadu_si128((const __m128i *)(key + 16));
p_k.ni.k[2] = k2 = t1 = p_init256_1_aesni(t1, _mm_aeskeygenassist_si128(t2, 0x01));
p_k.ni.k[3] = k3 = t2 = p_init256_2_aesni(t1, t2);
p_k.ni.k[4] = k4 = t1 = p_init256_1_aesni(t1, _mm_aeskeygenassist_si128(t2, 0x02));
p_k.ni.k[5] = k5 = t2 = p_init256_2_aesni(t1, t2);
p_k.ni.k[6] = k6 = t1 = p_init256_1_aesni(t1, _mm_aeskeygenassist_si128(t2, 0x04));
p_k.ni.k[7] = k7 = t2 = p_init256_2_aesni(t1, t2);
p_k.ni.k[8] = k8 = t1 = p_init256_1_aesni(t1, _mm_aeskeygenassist_si128(t2, 0x08));
p_k.ni.k[9] = k9 = t2 = p_init256_2_aesni(t1, t2);
p_k.ni.k[10] = k10 = t1 = p_init256_1_aesni(t1, _mm_aeskeygenassist_si128(t2, 0x10));
p_k.ni.k[11] = k11 = t2 = p_init256_2_aesni(t1, t2);
p_k.ni.k[12] = k12 = t1 = p_init256_1_aesni(t1, _mm_aeskeygenassist_si128(t2, 0x20));
p_k.ni.k[13] = k13 = t2 = p_init256_2_aesni(t1, t2);
p_k.ni.k[14] = p_init256_1_aesni(t1, _mm_aeskeygenassist_si128(t2, 0x40));
p_k.ni.k[15] = _mm_aesimc_si128(k13);
p_k.ni.k[16] = _mm_aesimc_si128(k12);
p_k.ni.k[17] = _mm_aesimc_si128(k11);
p_k.ni.k[18] = _mm_aesimc_si128(k10);
p_k.ni.k[19] = _mm_aesimc_si128(k9);
p_k.ni.k[20] = _mm_aesimc_si128(k8);
p_k.ni.k[21] = _mm_aesimc_si128(k7);
p_k.ni.k[22] = _mm_aesimc_si128(k6);
p_k.ni.k[23] = _mm_aesimc_si128(k5);
p_k.ni.k[24] = _mm_aesimc_si128(k4);
p_k.ni.k[25] = _mm_aesimc_si128(k3);
p_k.ni.k[26] = _mm_aesimc_si128(k2);
p_k.ni.k[27] = _mm_aesimc_si128(k1);
__m128i h = p_k.ni.k[0]; // _mm_xor_si128(_mm_setzero_si128(),_k.ni.k[0]);
h = _mm_aesenc_si128(h, k1);
h = _mm_aesenc_si128(h, k2);
h = _mm_aesenc_si128(h, k3);
h = _mm_aesenc_si128(h, k4);
h = _mm_aesenc_si128(h, k5);
h = _mm_aesenc_si128(h, k6);
h = _mm_aesenc_si128(h, k7);
h = _mm_aesenc_si128(h, k8);
h = _mm_aesenc_si128(h, k9);
h = _mm_aesenc_si128(h, k10);
h = _mm_aesenc_si128(h, k11);
h = _mm_aesenc_si128(h, k12);
h = _mm_aesenc_si128(h, k13);
h = _mm_aesenclast_si128(h, p_k.ni.k[14]);
__m128i hswap = _mm_shuffle_epi8(h, s_sseSwapBytes);
__m128i hh = p_gmacPCLMUL128(hswap, h);
__m128i hhh = p_gmacPCLMUL128(hswap, hh);
__m128i hhhh = p_gmacPCLMUL128(hswap, hhh);
p_k.ni.h[0] = hswap;
p_k.ni.h[1] = hh = _mm_shuffle_epi8(hh, s_sseSwapBytes);
p_k.ni.h[2] = hhh = _mm_shuffle_epi8(hhh, s_sseSwapBytes);
p_k.ni.h[3] = hhhh = _mm_shuffle_epi8(hhhh, s_sseSwapBytes);
p_k.ni.h2[0] = _mm_xor_si128(_mm_shuffle_epi32(hswap, 78), hswap);
p_k.ni.h2[1] = _mm_xor_si128(_mm_shuffle_epi32(hh, 78), hh);
p_k.ni.h2[2] = _mm_xor_si128(_mm_shuffle_epi32(hhh, 78), hhh);
p_k.ni.h2[3] = _mm_xor_si128(_mm_shuffle_epi32(hhhh, 78), hhhh);
}
#ifdef __GNUC__
__attribute__((__target__("sse,sse2,aes")))
#endif
void
AES::p_encrypt_aesni(const void *const in, void *const out) const noexcept
{
__m128i tmp = _mm_loadu_si128((const __m128i *)in);
tmp = _mm_xor_si128(tmp, p_k.ni.k[0]);
tmp = _mm_aesenc_si128(tmp, p_k.ni.k[1]);
tmp = _mm_aesenc_si128(tmp, p_k.ni.k[2]);
tmp = _mm_aesenc_si128(tmp, p_k.ni.k[3]);
tmp = _mm_aesenc_si128(tmp, p_k.ni.k[4]);
tmp = _mm_aesenc_si128(tmp, p_k.ni.k[5]);
tmp = _mm_aesenc_si128(tmp, p_k.ni.k[6]);
tmp = _mm_aesenc_si128(tmp, p_k.ni.k[7]);
tmp = _mm_aesenc_si128(tmp, p_k.ni.k[8]);
tmp = _mm_aesenc_si128(tmp, p_k.ni.k[9]);
tmp = _mm_aesenc_si128(tmp, p_k.ni.k[10]);
tmp = _mm_aesenc_si128(tmp, p_k.ni.k[11]);
tmp = _mm_aesenc_si128(tmp, p_k.ni.k[12]);
tmp = _mm_aesenc_si128(tmp, p_k.ni.k[13]);
_mm_storeu_si128((__m128i *)out, _mm_aesenclast_si128(tmp, p_k.ni.k[14]));
}
#ifdef __GNUC__
__attribute__((__target__("sse,sse2,aes")))
#endif
void
AES::p_decrypt_aesni(const void *in, void *out) const noexcept
{
__m128i tmp = _mm_loadu_si128((const __m128i *)in);
tmp = _mm_xor_si128(tmp, p_k.ni.k[14]);
tmp = _mm_aesdec_si128(tmp, p_k.ni.k[15]);
tmp = _mm_aesdec_si128(tmp, p_k.ni.k[16]);
tmp = _mm_aesdec_si128(tmp, p_k.ni.k[17]);
tmp = _mm_aesdec_si128(tmp, p_k.ni.k[18]);
tmp = _mm_aesdec_si128(tmp, p_k.ni.k[19]);
tmp = _mm_aesdec_si128(tmp, p_k.ni.k[20]);
tmp = _mm_aesdec_si128(tmp, p_k.ni.k[21]);
tmp = _mm_aesdec_si128(tmp, p_k.ni.k[22]);
tmp = _mm_aesdec_si128(tmp, p_k.ni.k[23]);
tmp = _mm_aesdec_si128(tmp, p_k.ni.k[24]);
tmp = _mm_aesdec_si128(tmp, p_k.ni.k[25]);
tmp = _mm_aesdec_si128(tmp, p_k.ni.k[26]);
tmp = _mm_aesdec_si128(tmp, p_k.ni.k[27]);
_mm_storeu_si128((__m128i *)out, _mm_aesdeclast_si128(tmp, p_k.ni.k[0]));
}
} // namespace ZeroTier
#endif // ZT_AES_AESNI

View file

@ -1,392 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
// AES for ARM crypto extensions and NEON.
#include "AES.hpp"
#include "Constants.hpp"
#ifdef ZT_AES_NEON
namespace ZeroTier {
namespace {
ZT_INLINE uint8x16_t s_clmul_armneon_crypto(uint8x16_t h, uint8x16_t y, const uint8_t b[16]) noexcept
{
uint8x16_t r0, r1, t0, t1;
r0 = vld1q_u8(b);
const uint8x16_t z = veorq_u8(h, h);
y = veorq_u8(r0, y);
y = vrbitq_u8(y);
const uint8x16_t p = vreinterpretq_u8_u64(vdupq_n_u64(0x0000000000000087));
t0 = vextq_u8(y, y, 8);
__asm__ __volatile__("pmull %0.1q, %1.1d, %2.1d \n\t" : "=w"(r0) : "w"(h), "w"(y));
__asm__ __volatile__("pmull2 %0.1q, %1.2d, %2.2d \n\t" : "=w"(r1) : "w"(h), "w"(y));
__asm__ __volatile__("pmull %0.1q, %1.1d, %2.1d \n\t" : "=w"(t1) : "w"(h), "w"(t0));
__asm__ __volatile__("pmull2 %0.1q, %1.2d, %2.2d \n\t" : "=w"(t0) : "w"(h), "w"(t0));
t0 = veorq_u8(t0, t1);
t1 = vextq_u8(z, t0, 8);
r0 = veorq_u8(r0, t1);
t1 = vextq_u8(t0, z, 8);
r1 = veorq_u8(r1, t1);
__asm__ __volatile__("pmull2 %0.1q, %1.2d, %2.2d \n\t" : "=w"(t0) : "w"(r1), "w"(p));
t1 = vextq_u8(t0, z, 8);
r1 = veorq_u8(r1, t1);
t1 = vextq_u8(z, t0, 8);
r0 = veorq_u8(r0, t1);
__asm__ __volatile__("pmull %0.1q, %1.1d, %2.1d \n\t" : "=w"(t0) : "w"(r1), "w"(p));
return vrbitq_u8(veorq_u8(r0, t0));
}
} // anonymous namespace
void AES::GMAC::p_armUpdate(const uint8_t *in, unsigned int len) noexcept
{
uint8x16_t y = vld1q_u8(reinterpret_cast<const uint8_t *>(_y));
const uint8x16_t h = _aes.p_k.neon.h;
if (_rp) {
for (;;) {
if (!len)
return;
--len;
_r[_rp++] = *(in++);
if (_rp == 16) {
y = s_clmul_armneon_crypto(h, y, _r);
break;
}
}
}
while (len >= 16) {
y = s_clmul_armneon_crypto(h, y, in);
in += 16;
len -= 16;
}
vst1q_u8(reinterpret_cast<uint8_t *>(_y), y);
for (unsigned int i = 0; i < len; ++i)
_r[i] = in[i];
_rp = len; // len is always less than 16 here
}
void AES::GMAC::p_armFinish(uint8_t tag[16]) noexcept
{
uint64_t tmp[2];
uint8x16_t y = vld1q_u8(reinterpret_cast<const uint8_t *>(_y));
const uint8x16_t h = _aes.p_k.neon.h;
if (_rp) {
while (_rp < 16)
_r[_rp++] = 0;
y = s_clmul_armneon_crypto(h, y, _r);
}
tmp[0] = Utils::hton((uint64_t)_len << 3U);
tmp[1] = 0;
y = s_clmul_armneon_crypto(h, y, reinterpret_cast<const uint8_t *>(tmp));
Utils::copy<12>(tmp, _iv);
#if __BYTE_ORDER == __BIG_ENDIAN
reinterpret_cast<uint32_t *>(tmp)[3] = 0x00000001;
#else
reinterpret_cast<uint32_t *>(tmp)[3] = 0x01000000;
#endif
_aes.encrypt(tmp, tmp);
uint8x16_t yy = y;
Utils::storeMachineEndian<uint64_t>(tag, tmp[0] ^ reinterpret_cast<const uint64_t *>(&yy)[0]);
Utils::storeMachineEndian<uint64_t>(tag + 8, tmp[1] ^ reinterpret_cast<const uint64_t *>(&yy)[1]);
}
void AES::CTR::p_armCrypt(const uint8_t *in, uint8_t *out, unsigned int len) noexcept
{
uint8x16_t dd = vrev32q_u8(vld1q_u8(reinterpret_cast<uint8_t *>(_ctr)));
const uint32x4_t one = { 0, 0, 0, 1 };
uint8x16_t k0 = _aes.p_k.neon.ek[0];
uint8x16_t k1 = _aes.p_k.neon.ek[1];
uint8x16_t k2 = _aes.p_k.neon.ek[2];
uint8x16_t k3 = _aes.p_k.neon.ek[3];
uint8x16_t k4 = _aes.p_k.neon.ek[4];
uint8x16_t k5 = _aes.p_k.neon.ek[5];
uint8x16_t k6 = _aes.p_k.neon.ek[6];
uint8x16_t k7 = _aes.p_k.neon.ek[7];
uint8x16_t k8 = _aes.p_k.neon.ek[8];
uint8x16_t k9 = _aes.p_k.neon.ek[9];
uint8x16_t k10 = _aes.p_k.neon.ek[10];
uint8x16_t k11 = _aes.p_k.neon.ek[11];
uint8x16_t k12 = _aes.p_k.neon.ek[12];
uint8x16_t k13 = _aes.p_k.neon.ek[13];
uint8x16_t k14 = _aes.p_k.neon.ek[14];
unsigned int totalLen = _len;
if ((totalLen & 15U) != 0) {
for (;;) {
if (unlikely(!len)) {
vst1q_u8(reinterpret_cast<uint8_t *>(_ctr), vrev32q_u8(dd));
_len = totalLen;
return;
}
--len;
out[totalLen++] = *(in++);
if ((totalLen & 15U) == 0) {
uint8_t *const otmp = out + (totalLen - 16);
uint8x16_t d0 = vrev32q_u8(dd);
uint8x16_t pt = vld1q_u8(otmp);
d0 = vaesmcq_u8(vaeseq_u8(d0, k0));
d0 = vaesmcq_u8(vaeseq_u8(d0, k1));
d0 = vaesmcq_u8(vaeseq_u8(d0, k2));
d0 = vaesmcq_u8(vaeseq_u8(d0, k3));
d0 = vaesmcq_u8(vaeseq_u8(d0, k4));
d0 = vaesmcq_u8(vaeseq_u8(d0, k5));
d0 = vaesmcq_u8(vaeseq_u8(d0, k6));
d0 = vaesmcq_u8(vaeseq_u8(d0, k7));
d0 = vaesmcq_u8(vaeseq_u8(d0, k8));
d0 = vaesmcq_u8(vaeseq_u8(d0, k9));
d0 = vaesmcq_u8(vaeseq_u8(d0, k10));
d0 = vaesmcq_u8(vaeseq_u8(d0, k11));
d0 = vaesmcq_u8(vaeseq_u8(d0, k12));
d0 = veorq_u8(vaeseq_u8(d0, k13), k14);
vst1q_u8(otmp, veorq_u8(pt, d0));
dd = (uint8x16_t)vaddq_u32((uint32x4_t)dd, one);
break;
}
}
}
out += totalLen;
_len = totalLen + len;
if (likely(len >= 64)) {
const uint32x4_t four = vshlq_n_u32(one, 2);
uint8x16_t dd1 = (uint8x16_t)vaddq_u32((uint32x4_t)dd, one);
uint8x16_t dd2 = (uint8x16_t)vaddq_u32((uint32x4_t)dd1, one);
uint8x16_t dd3 = (uint8x16_t)vaddq_u32((uint32x4_t)dd2, one);
for (;;) {
len -= 64;
uint8x16_t d0 = vrev32q_u8(dd);
uint8x16_t d1 = vrev32q_u8(dd1);
uint8x16_t d2 = vrev32q_u8(dd2);
uint8x16_t d3 = vrev32q_u8(dd3);
uint8x16_t pt0 = vld1q_u8(in);
uint8x16_t pt1 = vld1q_u8(in + 16);
uint8x16_t pt2 = vld1q_u8(in + 32);
uint8x16_t pt3 = vld1q_u8(in + 48);
d0 = vaesmcq_u8(vaeseq_u8(d0, k0));
d1 = vaesmcq_u8(vaeseq_u8(d1, k0));
d2 = vaesmcq_u8(vaeseq_u8(d2, k0));
d3 = vaesmcq_u8(vaeseq_u8(d3, k0));
d0 = vaesmcq_u8(vaeseq_u8(d0, k1));
d1 = vaesmcq_u8(vaeseq_u8(d1, k1));
d2 = vaesmcq_u8(vaeseq_u8(d2, k1));
d3 = vaesmcq_u8(vaeseq_u8(d3, k1));
d0 = vaesmcq_u8(vaeseq_u8(d0, k2));
d1 = vaesmcq_u8(vaeseq_u8(d1, k2));
d2 = vaesmcq_u8(vaeseq_u8(d2, k2));
d3 = vaesmcq_u8(vaeseq_u8(d3, k2));
d0 = vaesmcq_u8(vaeseq_u8(d0, k3));
d1 = vaesmcq_u8(vaeseq_u8(d1, k3));
d2 = vaesmcq_u8(vaeseq_u8(d2, k3));
d3 = vaesmcq_u8(vaeseq_u8(d3, k3));
d0 = vaesmcq_u8(vaeseq_u8(d0, k4));
d1 = vaesmcq_u8(vaeseq_u8(d1, k4));
d2 = vaesmcq_u8(vaeseq_u8(d2, k4));
d3 = vaesmcq_u8(vaeseq_u8(d3, k4));
d0 = vaesmcq_u8(vaeseq_u8(d0, k5));
d1 = vaesmcq_u8(vaeseq_u8(d1, k5));
d2 = vaesmcq_u8(vaeseq_u8(d2, k5));
d3 = vaesmcq_u8(vaeseq_u8(d3, k5));
d0 = vaesmcq_u8(vaeseq_u8(d0, k6));
d1 = vaesmcq_u8(vaeseq_u8(d1, k6));
d2 = vaesmcq_u8(vaeseq_u8(d2, k6));
d3 = vaesmcq_u8(vaeseq_u8(d3, k6));
d0 = vaesmcq_u8(vaeseq_u8(d0, k7));
d1 = vaesmcq_u8(vaeseq_u8(d1, k7));
d2 = vaesmcq_u8(vaeseq_u8(d2, k7));
d3 = vaesmcq_u8(vaeseq_u8(d3, k7));
d0 = vaesmcq_u8(vaeseq_u8(d0, k8));
d1 = vaesmcq_u8(vaeseq_u8(d1, k8));
d2 = vaesmcq_u8(vaeseq_u8(d2, k8));
d3 = vaesmcq_u8(vaeseq_u8(d3, k8));
d0 = vaesmcq_u8(vaeseq_u8(d0, k9));
d1 = vaesmcq_u8(vaeseq_u8(d1, k9));
d2 = vaesmcq_u8(vaeseq_u8(d2, k9));
d3 = vaesmcq_u8(vaeseq_u8(d3, k9));
d0 = vaesmcq_u8(vaeseq_u8(d0, k10));
d1 = vaesmcq_u8(vaeseq_u8(d1, k10));
d2 = vaesmcq_u8(vaeseq_u8(d2, k10));
d3 = vaesmcq_u8(vaeseq_u8(d3, k10));
d0 = vaesmcq_u8(vaeseq_u8(d0, k11));
d1 = vaesmcq_u8(vaeseq_u8(d1, k11));
d2 = vaesmcq_u8(vaeseq_u8(d2, k11));
d3 = vaesmcq_u8(vaeseq_u8(d3, k11));
d0 = vaesmcq_u8(vaeseq_u8(d0, k12));
d1 = vaesmcq_u8(vaeseq_u8(d1, k12));
d2 = vaesmcq_u8(vaeseq_u8(d2, k12));
d3 = vaesmcq_u8(vaeseq_u8(d3, k12));
d0 = veorq_u8(vaeseq_u8(d0, k13), k14);
d1 = veorq_u8(vaeseq_u8(d1, k13), k14);
d2 = veorq_u8(vaeseq_u8(d2, k13), k14);
d3 = veorq_u8(vaeseq_u8(d3, k13), k14);
d0 = veorq_u8(pt0, d0);
d1 = veorq_u8(pt1, d1);
d2 = veorq_u8(pt2, d2);
d3 = veorq_u8(pt3, d3);
vst1q_u8(out, d0);
vst1q_u8(out + 16, d1);
vst1q_u8(out + 32, d2);
vst1q_u8(out + 48, d3);
out += 64;
in += 64;
dd = (uint8x16_t)vaddq_u32((uint32x4_t)dd, four);
if (unlikely(len < 64))
break;
dd1 = (uint8x16_t)vaddq_u32((uint32x4_t)dd1, four);
dd2 = (uint8x16_t)vaddq_u32((uint32x4_t)dd2, four);
dd3 = (uint8x16_t)vaddq_u32((uint32x4_t)dd3, four);
}
}
while (len >= 16) {
len -= 16;
uint8x16_t d0 = vrev32q_u8(dd);
uint8x16_t pt = vld1q_u8(in);
in += 16;
dd = (uint8x16_t)vaddq_u32((uint32x4_t)dd, one);
d0 = vaesmcq_u8(vaeseq_u8(d0, k0));
d0 = vaesmcq_u8(vaeseq_u8(d0, k1));
d0 = vaesmcq_u8(vaeseq_u8(d0, k2));
d0 = vaesmcq_u8(vaeseq_u8(d0, k3));
d0 = vaesmcq_u8(vaeseq_u8(d0, k4));
d0 = vaesmcq_u8(vaeseq_u8(d0, k5));
d0 = vaesmcq_u8(vaeseq_u8(d0, k6));
d0 = vaesmcq_u8(vaeseq_u8(d0, k7));
d0 = vaesmcq_u8(vaeseq_u8(d0, k8));
d0 = vaesmcq_u8(vaeseq_u8(d0, k9));
d0 = vaesmcq_u8(vaeseq_u8(d0, k10));
d0 = vaesmcq_u8(vaeseq_u8(d0, k11));
d0 = vaesmcq_u8(vaeseq_u8(d0, k12));
d0 = veorq_u8(vaeseq_u8(d0, k13), k14);
vst1q_u8(out, veorq_u8(pt, d0));
out += 16;
}
// Any remaining input is placed in _out. This will be picked up and crypted
// on subsequent calls to crypt() or finish() as it'll mean _len will not be
// an even multiple of 16.
for (unsigned int i = 0; i < len; ++i)
out[i] = in[i];
vst1q_u8(reinterpret_cast<uint8_t *>(_ctr), vrev32q_u8(dd));
}
#define ZT_INIT_ARMNEON_CRYPTO_SUBWORD(w) ((uint32_t)s_sbox[w & 0xffU] + ((uint32_t)s_sbox[(w >> 8U) & 0xffU] << 8U) + ((uint32_t)s_sbox[(w >> 16U) & 0xffU] << 16U) + ((uint32_t)s_sbox[(w >> 24U) & 0xffU] << 24U))
#define ZT_INIT_ARMNEON_CRYPTO_ROTWORD(w) (((w) << 8U) | ((w) >> 24U))
#define ZT_INIT_ARMNEON_CRYPTO_NK 8
#define ZT_INIT_ARMNEON_CRYPTO_NB 4
#define ZT_INIT_ARMNEON_CRYPTO_NR 14
void AES::p_init_armneon_crypto(const uint8_t *key) noexcept
{
static const uint8_t s_sbox[256] = { 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3,
0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14,
0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 };
uint64_t h[2];
uint32_t *const w = reinterpret_cast<uint32_t *>(p_k.neon.ek);
for (unsigned int i = 0; i < ZT_INIT_ARMNEON_CRYPTO_NK; ++i) {
const unsigned int j = i * 4;
w[i] = ((uint32_t)key[j] << 24U) | ((uint32_t)key[j + 1] << 16U) | ((uint32_t)key[j + 2] << 8U) | (uint32_t)key[j + 3];
}
for (unsigned int i = ZT_INIT_ARMNEON_CRYPTO_NK; i < (ZT_INIT_ARMNEON_CRYPTO_NB * (ZT_INIT_ARMNEON_CRYPTO_NR + 1)); ++i) {
uint32_t t = w[i - 1];
const unsigned int imod = i & (ZT_INIT_ARMNEON_CRYPTO_NK - 1);
if (imod == 0) {
t = ZT_INIT_ARMNEON_CRYPTO_SUBWORD(ZT_INIT_ARMNEON_CRYPTO_ROTWORD(t)) ^ rcon[(i - 1) / ZT_INIT_ARMNEON_CRYPTO_NK];
}
else if (imod == 4) {
t = ZT_INIT_ARMNEON_CRYPTO_SUBWORD(t);
}
w[i] = w[i - ZT_INIT_ARMNEON_CRYPTO_NK] ^ t;
}
for (unsigned int i = 0; i < (ZT_INIT_ARMNEON_CRYPTO_NB * (ZT_INIT_ARMNEON_CRYPTO_NR + 1)); ++i)
w[i] = Utils::hton(w[i]);
p_k.neon.dk[0] = p_k.neon.ek[14];
for (int i = 1; i < 14; ++i)
p_k.neon.dk[i] = vaesimcq_u8(p_k.neon.ek[14 - i]);
p_k.neon.dk[14] = p_k.neon.ek[0];
p_encrypt_armneon_crypto(Utils::ZERO256, h);
Utils::copy<16>(&(p_k.neon.h), h);
p_k.neon.h = vrbitq_u8(p_k.neon.h);
p_k.sw.h[0] = Utils::ntoh(h[0]);
p_k.sw.h[1] = Utils::ntoh(h[1]);
}
void AES::p_encrypt_armneon_crypto(const void *const in, void *const out) const noexcept
{
uint8x16_t tmp = vld1q_u8(reinterpret_cast<const uint8_t *>(in));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[0]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[1]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[2]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[3]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[4]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[5]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[6]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[7]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[8]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[9]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[10]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[11]));
tmp = vaesmcq_u8(vaeseq_u8(tmp, p_k.neon.ek[12]));
tmp = veorq_u8(vaeseq_u8(tmp, p_k.neon.ek[13]), p_k.neon.ek[14]);
vst1q_u8(reinterpret_cast<uint8_t *>(out), tmp);
}
void AES::p_decrypt_armneon_crypto(const void *const in, void *const out) const noexcept
{
uint8x16_t tmp = vld1q_u8(reinterpret_cast<const uint8_t *>(in));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[0]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[1]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[2]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[3]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[4]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[5]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[6]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[7]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[8]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[9]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[10]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[11]));
tmp = vaesimcq_u8(vaesdq_u8(tmp, p_k.neon.dk[12]));
tmp = veorq_u8(vaesdq_u8(tmp, p_k.neon.dk[13]), p_k.neon.dk[14]);
vst1q_u8(reinterpret_cast<uint8_t *>(out), tmp);
}
} // namespace ZeroTier
#endif // ZT_AES_NEON

View file

@ -1,143 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_ADDRESS_HPP
#define ZT_ADDRESS_HPP
#include "Constants.hpp"
#include "Containers.hpp"
#include "TriviallyCopyable.hpp"
#include "Utils.hpp"
#define ZT_ADDRESS_STRING_SIZE_MAX (ZT_ADDRESS_LENGTH_HEX + 1)
namespace ZeroTier {
/**
* A ZeroTier address
*
* This is merely a 40-bit short address packed into a uint64_t and wrapped with methods.
*/
class Address : public TriviallyCopyable {
public:
ZT_INLINE Address() noexcept : _a(0) {}
ZT_INLINE Address(const uint64_t a) noexcept : _a(a) {}
explicit ZT_INLINE Address(const uint8_t b[5]) noexcept : _a(((uint64_t)b[0] << 32U) | ((uint64_t)b[1] << 24U) | ((uint64_t)b[2] << 16U) | ((uint64_t)b[3] << 8U) | (uint64_t)b[4]) {}
ZT_INLINE Address &operator=(const uint64_t a) noexcept
{
_a = a;
return *this;
}
/**
* @param bits Raw address -- 5 bytes, big-endian byte order
* @param len Length of array
*/
ZT_INLINE void setTo(const uint8_t b[5]) noexcept { _a = ((uint64_t)b[0] << 32U) | ((uint64_t)b[1] << 24U) | ((uint64_t)b[2] << 16U) | ((uint64_t)b[3] << 8U) | (uint64_t)b[4]; }
/**
* @param bits Buffer to hold 5-byte address in big-endian byte order
* @param len Length of array
*/
ZT_INLINE void copyTo(uint8_t b[5]) const noexcept
{
const uint64_t a = _a;
b[0] = (uint8_t)(a >> 32U);
b[1] = (uint8_t)(a >> 24U);
b[2] = (uint8_t)(a >> 16U);
b[3] = (uint8_t)(a >> 8U);
b[4] = (uint8_t)a;
}
/**
* @return Integer containing address (0 to 2^40)
*/
ZT_INLINE uint64_t toInt() const noexcept { return _a; }
/**
* Set address to zero/NIL
*/
ZT_INLINE void zero() noexcept { _a = 0; }
/**
* @param s String with at least 11 characters of space available (10 + terminating NULL)
* @return Hexadecimal string
*/
ZT_INLINE char *toString(char s[ZT_ADDRESS_STRING_SIZE_MAX]) const noexcept
{
for (unsigned int i = 0; i < 10; ++i) {
s[i] = Utils::HEXCHARS[(uintptr_t)(_a >> (36U - (i * 4U))) & 0xfU];
}
return s;
}
ZT_INLINE String toString() const
{
char s[ZT_ADDRESS_STRING_SIZE_MAX];
toString(s);
return String(s);
}
/**
* Check if this address is reserved
*
* The all-zero null address and any address beginning with 0xff are
* reserved. (0xff is reserved for future use to designate possibly
* longer addresses, addresses based on IPv6 innards, etc.)
*
* @return True if address is reserved and may not be used
*/
ZT_INLINE bool isReserved() const noexcept { return ((!_a) || ((_a >> 32U) == ZT_ADDRESS_RESERVED_PREFIX)); }
ZT_INLINE unsigned long hashCode() const noexcept { return (unsigned long)_a; }
ZT_INLINE operator bool() const noexcept { return (_a != 0); }
ZT_INLINE operator uint64_t() const noexcept { return _a; }
ZT_INLINE bool operator==(const Address &a) const noexcept { return _a == a._a; }
ZT_INLINE bool operator!=(const Address &a) const noexcept { return _a != a._a; }
ZT_INLINE bool operator>(const Address &a) const noexcept { return _a > a._a; }
ZT_INLINE bool operator<(const Address &a) const noexcept { return _a < a._a; }
ZT_INLINE bool operator>=(const Address &a) const noexcept { return _a >= a._a; }
ZT_INLINE bool operator<=(const Address &a) const noexcept { return _a <= a._a; }
ZT_INLINE bool operator==(const uint64_t a) const noexcept { return _a == a; }
ZT_INLINE bool operator!=(const uint64_t a) const noexcept { return _a != a; }
ZT_INLINE bool operator>(const uint64_t a) const noexcept { return _a > a; }
ZT_INLINE bool operator<(const uint64_t a) const noexcept { return _a < a; }
ZT_INLINE bool operator>=(const uint64_t a) const noexcept { return _a >= a; }
ZT_INLINE bool operator<=(const uint64_t a) const noexcept { return _a <= a; }
private:
uint64_t _a;
};
static_assert(sizeof(Address) == sizeof(uint64_t), "Address has unnecessary extra padding");
} // namespace ZeroTier
#endif

View file

@ -1,102 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "Buf.hpp"
#include "Spinlock.hpp"
namespace ZeroTier {
static std::atomic<uintptr_t> s_pool(0);
static std::atomic<long> s_allocated(0);
// uintptr_max can never be a valid pointer, so use it to indicate that s_pool is locked (very short duration spinlock)
#define ZT_ATOMIC_PTR_LOCKED (~((uintptr_t)0))
void *Buf::operator new(std::size_t sz)
{
uintptr_t bb;
for (;;) {
bb = s_pool.exchange(ZT_ATOMIC_PTR_LOCKED, std::memory_order_acquire);
if (likely(bb != ZT_ATOMIC_PTR_LOCKED)) {
Buf *b;
if (likely(bb != 0)) {
b = reinterpret_cast<Buf *>(bb);
s_pool.store(b->__nextInPool, std::memory_order_release);
}
else {
s_pool.store(0, std::memory_order_release);
b = reinterpret_cast<Buf *>(malloc(sz));
if (!b)
throw Utils::BadAllocException;
s_allocated.fetch_add(1, std::memory_order_relaxed);
}
b->__refCount.store(0, std::memory_order_relaxed);
return reinterpret_cast<void *>(b);
}
Spinlock::pause();
}
}
void Buf::operator delete(void *ptr)
{
if (likely(ptr != nullptr)) {
if (s_allocated.load(std::memory_order_relaxed) > ZT_BUF_MAX_POOL_SIZE) {
s_allocated.fetch_sub(1, std::memory_order_relaxed);
free(ptr);
}
else {
uintptr_t bb;
for (;;) {
bb = s_pool.exchange(ZT_ATOMIC_PTR_LOCKED, std::memory_order_acquire);
if (likely(bb != ZT_ATOMIC_PTR_LOCKED)) {
reinterpret_cast<Buf *>(ptr)->__nextInPool = bb;
s_pool.store(reinterpret_cast<uintptr_t>(ptr), std::memory_order_release);
return;
}
Spinlock::pause();
}
}
}
}
void Buf::freePool() noexcept
{
uintptr_t bb;
for (;;) {
bb = s_pool.exchange(ZT_ATOMIC_PTR_LOCKED, std::memory_order_acquire);
if (likely(bb != ZT_ATOMIC_PTR_LOCKED)) {
s_pool.store(0, std::memory_order_release);
while (bb != 0) {
const uintptr_t next = reinterpret_cast<Buf *>(bb)->__nextInPool;
s_allocated.fetch_sub(1, std::memory_order_relaxed);
free(reinterpret_cast<void *>(bb));
bb = next;
}
return;
}
Spinlock::pause();
}
}
long Buf::poolAllocated() noexcept { return s_allocated.load(std::memory_order_relaxed); }
} // namespace ZeroTier

View file

@ -1,791 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_BUF_HPP
#define ZT_BUF_HPP
#include "Constants.hpp"
#include "FCV.hpp"
#include "Mutex.hpp"
#include "SharedPtr.hpp"
#include "TriviallyCopyable.hpp"
#include "Utils.hpp"
#include <algorithm>
#include <new>
#include <stdexcept>
#include <utility>
// Buffers are 16384 bytes in size because this is the smallest size that can hold any packet
// and is a power of two. It needs to be a power of two because masking is significantly faster
// than integer division modulus.
#define ZT_BUF_MEM_SIZE 0x00004000
#define ZT_BUF_MEM_MASK 0x00003fffU
// Sanity limit on maximum buffer pool size
#define ZT_BUF_MAX_POOL_SIZE 1024
namespace ZeroTier {
/**
* Buffer and methods for branch-free bounds-checked data assembly and parsing
*
* This implements an extremely fast buffer for packet assembly and parsing that avoids
* branching whenever possible. To be safe it must be used correctly!
*
* The read methods are prefixed by 'r', and write methods with 'w'. All methods take
* an iterator, which is just an int that should be initialized to 0 (or whatever starting
* position is desired). All read methods will advance the iterator regardless of outcome.
*
* Read and write methods fail silently in the event of overflow. They do not corrupt or
* access memory outside the bounds of Buf, but will otherwise produce undefined results.
*
* IT IS THE RESPONSIBILITY OF THE USER of this class to use the readOverflow() and
* writeOverflow() static methods to check the iterator for overflow after each series
* of reads and writes and BEFORE ANY PARSING or other decisions are made on the basis
* of the data obtained from a buffer. Failure to do so can result in bugs due
* to parsing and branching on undefined or corrupt data.
*
* ^^ THIS IS VERY IMPORTANT ^^
*
* A typical packet assembly consists of repeated calls to the write methods followed by
* a check to writeOverflow() before final packet armoring and transport. A typical packet
* disassembly and parsing consists of a series of read calls to obtain the packet's
* fields followed by a call to readOverflow() to check that these fields are valid. The
* packet is discarded if readOverflow() returns true. Some packet parsers may make
* additional reads and in this case readOverflow() must be checked after each set of
* reads to ensure that overflow did not occur.
*
* Buf uses a lock-free pool for extremely fast allocation and deallocation.
*
* Buf can optionally take a template parameter that will be placed in the 'data'
* union as 'fields.' This must be a basic plain data type and must be no larger than
* ZT_BUF_MEM_SIZE. It's typically a packed struct.
*
* Buf instances with different template parameters can freely be cast to one another
* as there is no actual difference in size or layout.
*
* @tparam U Type to overlap with data bytes in data union (can't be larger than ZT_BUF_MEM_SIZE)
*/
class Buf {
friend class SharedPtr<Buf>;
public:
// New and delete operators that allocate Buf instances from a shared lock-free memory pool.
static void *operator new(std::size_t sz);
static void operator delete(void *ptr);
/**
* Raw data held in buffer
*
* The additional eight bytes should not be used and should be considered undefined.
* They exist to allow reads and writes of integer types to silently overflow if a
* read or write is performed at the end of the buffer.
*/
uint8_t unsafeData[ZT_BUF_MEM_SIZE + 8];
/**
* Free all instances of Buf in shared pool.
*
* New buffers will be created and the pool repopulated if get() is called
* and outstanding buffers will still be returned to the pool. This just
* frees buffers currently held in reserve.
*/
static void freePool() noexcept;
/**
* @return Number of Buf objects currently allocated via pool mechanism
*/
static long poolAllocated() noexcept;
/**
* Slice is almost exactly like the built-in slice data structure in Go
*/
struct Slice : TriviallyCopyable {
ZT_INLINE Slice(const SharedPtr<Buf> &b_, const unsigned int s_, const unsigned int e_) noexcept : b(b_), s(s_), e(e_) {}
ZT_INLINE Slice() noexcept : b(), s(0), e(0) {}
ZT_INLINE operator bool() const noexcept { return (b); }
ZT_INLINE unsigned int size() const noexcept { return (e - s); }
ZT_INLINE void zero() noexcept
{
b.zero();
s = 0;
e = 0;
}
/**
* Buffer holding slice data
*/
SharedPtr<Buf> b;
/**
* Index of start of data in slice
*/
unsigned int s;
/**
* Index of end of data in slice (make sure it's greater than or equal to 's'!)
*/
unsigned int e;
};
/**
* A vector of slices making up a packet that might span more than one buffer.
*/
class PacketVector : public ZeroTier::FCV<Slice, ZT_MAX_PACKET_FRAGMENTS> {
public:
ZT_INLINE PacketVector() : ZeroTier::FCV<Slice, ZT_MAX_PACKET_FRAGMENTS>() {}
ZT_INLINE unsigned int totalSize() const noexcept
{
unsigned int size = 0;
for (PacketVector::const_iterator s(begin()); s != end(); ++s)
size += s->e - s->s;
return size;
}
/**
* Merge this packet vector into a single destination buffer
*
* @param b Destination buffer
* @return Size of data in destination or -1 on error
*/
ZT_INLINE int mergeCopy(Buf &b) const noexcept
{
unsigned int size = 0;
for (PacketVector::const_iterator s(begin()); s != end(); ++s) {
const unsigned int start = s->s;
const unsigned int rem = s->e - start;
if (likely((size + rem) <= ZT_BUF_MEM_SIZE)) {
Utils::copy(b.unsafeData + size, s->b->unsafeData + start, rem);
size += rem;
}
else {
return -1;
}
}
return (int)size;
}
/**
* Merge this packet vector into a single destination buffer with an arbitrary copy function
*
* This can be used to e.g. simultaneously merge and decrypt a packet.
*
* @param b Destination buffer
* @param simpleCopyBefore Don't start using copyFunction until this index (0 to always use)
* @param copyFunction Function to invoke with memcpy-like arguments: (dest, source, size)
* @tparam F Type of copyFunction (typically inferred)
* @return Size of data in destination or -1 on error
*/
template <typename F> ZT_INLINE int mergeMap(Buf &b, const unsigned int simpleCopyBefore, F copyFunction) const noexcept
{
unsigned int size = 0;
for (PacketVector::const_iterator s(begin()); s != end(); ++s) {
unsigned int start = s->s;
unsigned int rem = s->e - start;
if (likely((size + rem) <= ZT_BUF_MEM_SIZE)) {
if (size < simpleCopyBefore) {
unsigned int sc = simpleCopyBefore - size;
if (unlikely(sc > rem))
sc = rem;
Utils::copy(b.unsafeData + size, s->b->unsafeData + start, sc);
start += sc;
rem -= sc;
}
if (likely(rem > 0)) {
copyFunction(b.unsafeData + size, s->b->unsafeData + start, rem);
size += rem;
}
}
else {
return -1;
}
}
return (int)size;
}
};
/**
* Create a new uninitialized buffer with undefined contents (use clear() to zero if needed)
*/
ZT_INLINE Buf() noexcept : __nextInPool(0), __refCount(0) {}
/**
* Create a new buffer and copy data into it
*/
ZT_INLINE Buf(const void *const data, const unsigned int len) noexcept : __refCount(0) { Utils::copy(unsafeData, data, len); }
ZT_INLINE Buf(const Buf &b2) noexcept : __nextInPool(0), __refCount(0) { Utils::copy<ZT_BUF_MEM_SIZE>(unsafeData, b2.unsafeData); }
ZT_INLINE Buf &operator=(const Buf &b2) noexcept
{
if (this != &b2)
Utils::copy<ZT_BUF_MEM_SIZE>(unsafeData, b2.unsafeData);
return *this;
}
/**
* Check for overflow beyond the size of the buffer
*
* This is used to check for overflow when writing. It returns true if the iterator
* has passed beyond the capacity of the buffer.
*
* @param ii Iterator to check
* @return True if iterator has read past the size of the buffer
*/
static ZT_INLINE bool writeOverflow(const int &ii) noexcept { return ((ii - ZT_BUF_MEM_SIZE) > 0); }
/**
* Check for overflow beyond the size of the data that should be in the buffer
*
* This is used to check for overflow when reading, with the second argument being the
* size of the meaningful data actually present in the buffer.
*
* @param ii Iterator to check
* @param size Size of data that should be in buffer
* @return True if iterator has read past the size of the data
*/
static ZT_INLINE bool readOverflow(const int &ii, const unsigned int size) noexcept { return ((ii - (int)size) > 0); }
/**
* Set all memory to zero
*/
ZT_INLINE void clear() noexcept { Utils::zero<ZT_BUF_MEM_SIZE>(unsafeData); }
/**
* Read a byte
*
* @param ii Index value-result parameter (incremented by 1)
* @return Byte (undefined on overflow)
*/
ZT_INLINE uint8_t rI8(int &ii) const noexcept
{
const int s = ii++;
return unsafeData[(unsigned int)s & ZT_BUF_MEM_MASK];
}
/**
* Read a 16-bit integer
*
* @param ii Index value-result parameter (incremented by 2)
* @return Integer (undefined on overflow)
*/
ZT_INLINE uint16_t rI16(int &ii) const noexcept
{
const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
ii += 2;
#ifdef ZT_NO_UNALIGNED_ACCESS
return (((uint16_t)unsafeData[s] << 8U) | (uint16_t)unsafeData[s + 1]);
#else
return Utils::ntoh(*reinterpret_cast<const uint16_t *>(unsafeData + s));
#endif
}
/**
* Read a 32-bit integer
*
* @param ii Index value-result parameter (incremented by 4)
* @return Integer (undefined on overflow)
*/
ZT_INLINE uint32_t rI32(int &ii) const noexcept
{
const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
ii += 4;
#ifdef ZT_NO_UNALIGNED_ACCESS
return (((uint32_t)unsafeData[s] << 24U) | ((uint32_t)unsafeData[s + 1] << 16U) | ((uint32_t)unsafeData[s + 2] << 8U) | (uint32_t)unsafeData[s + 3]);
#else
return Utils::ntoh(*reinterpret_cast<const uint32_t *>(unsafeData + s));
#endif
}
/**
* Read a 64-bit integer
*
* @param ii Index value-result parameter (incremented by 8)
* @return Integer (undefined on overflow)
*/
ZT_INLINE uint64_t rI64(int &ii) const noexcept
{
const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
ii += 8;
#ifdef ZT_NO_UNALIGNED_ACCESS
return (((uint64_t)unsafeData[s] << 56U) | ((uint64_t)unsafeData[s + 1] << 48U) | ((uint64_t)unsafeData[s + 2] << 40U) | ((uint64_t)unsafeData[s + 3] << 32U) | ((uint64_t)unsafeData[s + 4] << 24U) | ((uint64_t)unsafeData[s + 5] << 16U) | ((uint64_t)unsafeData[s + 6] << 8U) | (uint64_t)unsafeData[s + 7]);
#else
return Utils::ntoh(*reinterpret_cast<const uint64_t *>(unsafeData + s));
#endif
}
/**
* Read an object supporting the marshal/unmarshal interface
*
* If the return value is negative the object's state is undefined. A return value of
* zero typically also indicates a problem, though this may depend on the object type.
*
* Since objects may be invalid even if there is no overflow, it's important to check
* the return value of this function in all cases and discard invalid packets as it
* indicates.
*
* @tparam T Object type
* @param ii Index value-result parameter (incremented by object's size in bytes)
* @param obj Object to read
* @return Bytes read or a negative value on unmarshal error (passed from object) or overflow
*/
template <typename T> ZT_INLINE int rO(int &ii, T &obj) const noexcept
{
if (likely(ii < ZT_BUF_MEM_SIZE)) {
int ms = obj.unmarshal(unsafeData + ii, ZT_BUF_MEM_SIZE - ii);
if (ms > 0)
ii += ms;
return ms;
}
return -1;
}
/**
* Read a C-style string from the buffer, making a copy and advancing the iterator
*
* Use this if the buffer's memory may get changed between reading and processing
* what is read.
*
* @param ii Index value-result parameter (incremented by length of string)
* @param buf Buffer to receive string
* @param bufSize Capacity of buffer in bytes
* @return Pointer to buf or NULL on overflow or error
*/
ZT_INLINE char *rS(int &ii, char *const buf, const unsigned int bufSize) const noexcept
{
const char *const s = (const char *)(unsafeData + ii);
const int sii = ii;
while (ii < ZT_BUF_MEM_SIZE) {
if (unsafeData[ii++] == 0) {
const int l = ii - sii;
if (unlikely((unsigned int)l > bufSize))
return nullptr;
Utils::copy(buf, s, l);
return buf;
}
}
return nullptr;
}
/**
* Obtain a pointer to a C-style string in the buffer without copying and advance the iterator
*
* The iterator is advanced even if this fails and returns NULL so that readOverflow()
* will indicate that an overflow occurred. As with other reads the string's contents are
* undefined if readOverflow() returns true.
*
* This version avoids a copy and so is faster if the buffer won't be modified between
* reading and processing.
*
* @param ii Index value-result parameter (incremented by length of string)
* @return Pointer to null-terminated C-style string or NULL on overflow or error
*/
ZT_INLINE const char *rSnc(int &ii) const noexcept
{
const char *const s = (const char *)(unsafeData + ii);
while (ii < ZT_BUF_MEM_SIZE) {
if (unsafeData[ii++] == 0)
return s;
}
return nullptr;
}
/**
* Read a byte array from the buffer, making a copy and advancing the iterator
*
* Use this if the buffer's memory may get changed between reading and processing
* what is read.
*
* @param ii Index value-result parameter (incremented by len)
* @param bytes Buffer to contain data to read
* @param len Length of buffer
* @return Pointer to data or NULL on overflow or error
*/
ZT_INLINE uint8_t *rB(int &ii, void *const bytes, const unsigned int len) const noexcept
{
if (likely(((ii += (int)len) <= ZT_BUF_MEM_SIZE))) {
Utils::copy(bytes, unsafeData + ii, len);
return reinterpret_cast<uint8_t *>(bytes);
}
return nullptr;
}
/**
* Obtain a pointer to a field in the buffer without copying and advance the iterator
*
* The iterator is advanced even if this fails and returns NULL so that readOverflow()
* will indicate that an overflow occurred.
*
* This version avoids a copy and so is faster if the buffer won't be modified between
* reading and processing.
*
* @param ii Index value-result parameter (incremented by len)
* @param len Length of data field to obtain a pointer to
* @return Pointer to field or NULL on overflow
*/
ZT_INLINE const uint8_t *rBnc(int &ii, unsigned int len) const noexcept
{
const uint8_t *const b = unsafeData + ii;
return ((ii += (int)len) <= ZT_BUF_MEM_SIZE) ? b : nullptr;
}
/**
* Load a value at an index that is compile time checked against the maximum buffer size
*
* @tparam I Static index
* @return Value
*/
template <unsigned int I> ZT_INLINE uint8_t lI8() const noexcept
{
static_assert(I < ZT_BUF_MEM_SIZE, "overflow");
return unsafeData[I];
}
/**
* Load a value at an index that is compile time checked against the maximum buffer size
*
* @tparam I Static index
* @return Value
*/
template <unsigned int I> ZT_INLINE uint8_t lI16() const noexcept
{
static_assert((I + 1) < ZT_BUF_MEM_SIZE, "overflow");
#ifdef ZT_NO_UNALIGNED_ACCESS
return (((uint16_t)unsafeData[I] << 8U) | (uint16_t)unsafeData[I + 1]);
#else
return Utils::ntoh(*reinterpret_cast<const uint16_t *>(unsafeData + I));
#endif
}
/**
* Load a value at an index that is compile time checked against the maximum buffer size
*
* @tparam I Static index
* @return Value
*/
template <unsigned int I> ZT_INLINE uint8_t lI32() const noexcept
{
static_assert((I + 3) < ZT_BUF_MEM_SIZE, "overflow");
#ifdef ZT_NO_UNALIGNED_ACCESS
return (((uint32_t)unsafeData[I] << 24U) | ((uint32_t)unsafeData[I + 1] << 16U) | ((uint32_t)unsafeData[I + 2] << 8U) | (uint32_t)unsafeData[I + 3]);
#else
return Utils::ntoh(*reinterpret_cast<const uint32_t *>(unsafeData + I));
#endif
}
/**
* Load a value at an index that is compile time checked against the maximum buffer size
*
* @tparam I Static index
* @return Value
*/
template <unsigned int I> ZT_INLINE uint8_t lI64() const noexcept
{
static_assert((I + 7) < ZT_BUF_MEM_SIZE, "overflow");
#ifdef ZT_NO_UNALIGNED_ACCESS
return (((uint64_t)unsafeData[I] << 56U) | ((uint64_t)unsafeData[I + 1] << 48U) | ((uint64_t)unsafeData[I + 2] << 40U) | ((uint64_t)unsafeData[I + 3] << 32U) | ((uint64_t)unsafeData[I + 4] << 24U) | ((uint64_t)unsafeData[I + 5] << 16U) | ((uint64_t)unsafeData[I + 6] << 8U) | (uint64_t)unsafeData[I + 7]);
#else
return Utils::ntoh(*reinterpret_cast<const uint64_t *>(unsafeData + I));
#endif
}
/**
* Load a value at an index without advancing the index
*
* Note that unlike the rI??() methods this does not increment ii and therefore
* will not necessarily result in a 'true' return from readOverflow(). It does
* however subject 'ii' to soft bounds masking like the gI??() methods.
*/
ZT_INLINE uint8_t lI8(const int ii) const noexcept { return unsafeData[(unsigned int)ii & ZT_BUF_MEM_MASK]; }
/**
* Load a value at an index without advancing the index
*
* Note that unlike the rI??() methods this does not increment ii and therefore
* will not necessarily result in a 'true' return from readOverflow(). It does
* however subject 'ii' to soft bounds masking like the gI??() methods.
*/
ZT_INLINE uint16_t lI16(const int ii) const noexcept
{
const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
#ifdef ZT_NO_UNALIGNED_ACCESS
return (((uint16_t)unsafeData[s] << 8U) | (uint16_t)unsafeData[s + 1]);
#else
return Utils::ntoh(*reinterpret_cast<const uint16_t *>(unsafeData + s));
#endif
}
/**
* Load a value at an index without advancing the index
*
* Note that unlike the rI??() methods this does not increment ii and therefore
* will not necessarily result in a 'true' return from readOverflow(). It does
* however subject 'ii' to soft bounds masking like the gI??() methods.
*/
ZT_INLINE uint32_t lI32(const int ii) const noexcept
{
const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
#ifdef ZT_NO_UNALIGNED_ACCESS
return (((uint32_t)unsafeData[s] << 24U) | ((uint32_t)unsafeData[s + 1] << 16U) | ((uint32_t)unsafeData[s + 2] << 8U) | (uint32_t)unsafeData[s + 3]);
#else
return Utils::ntoh(*reinterpret_cast<const uint32_t *>(unsafeData + s));
#endif
}
/**
* Load a value at an index without advancing the index
*
* Note that unlike the rI??() methods this does not increment ii and therefore
* will not necessarily result in a 'true' return from readOverflow(). It does
* however subject 'ii' to soft bounds masking like the gI??() methods.
*/
ZT_INLINE uint8_t lI64(const int ii) const noexcept
{
const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
#ifdef ZT_NO_UNALIGNED_ACCESS
return (((uint64_t)unsafeData[s] << 56U) | ((uint64_t)unsafeData[s + 1] << 48U) | ((uint64_t)unsafeData[s + 2] << 40U) | ((uint64_t)unsafeData[s + 3] << 32U) | ((uint64_t)unsafeData[s + 4] << 24U) | ((uint64_t)unsafeData[s + 5] << 16U) | ((uint64_t)unsafeData[s + 6] << 8U) | (uint64_t)unsafeData[s + 7]);
#else
return Utils::ntoh(*reinterpret_cast<const uint64_t *>(unsafeData + s));
#endif
}
/**
* Write a byte
*
* @param ii Index value-result parameter (incremented by 1)
* @param n Byte
*/
ZT_INLINE void wI8(int &ii, const uint8_t n) noexcept
{
const int s = ii++;
unsafeData[(unsigned int)s & ZT_BUF_MEM_MASK] = n;
}
/**
* Write a 16-bit integer in big-endian byte order
*
* @param ii Index value-result parameter (incremented by 2)
* @param n Integer
*/
ZT_INLINE void wI16(int &ii, const uint16_t n) noexcept
{
const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
ii += 2;
#ifdef ZT_NO_UNALIGNED_ACCESS
unsafeData[s] = (uint8_t)(n >> 8U);
unsafeData[s + 1] = (uint8_t)n;
#else
*reinterpret_cast<uint16_t *>(unsafeData + s) = Utils::hton(n);
#endif
}
/**
* Write a 32-bit integer in big-endian byte order
*
* @param ii Index value-result parameter (incremented by 4)
* @param n Integer
*/
ZT_INLINE void wI32(int &ii, const uint32_t n) noexcept
{
const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
ii += 4;
#ifdef ZT_NO_UNALIGNED_ACCESS
unsafeData[s] = (uint8_t)(n >> 24U);
unsafeData[s + 1] = (uint8_t)(n >> 16U);
unsafeData[s + 2] = (uint8_t)(n >> 8U);
unsafeData[s + 3] = (uint8_t)n;
#else
*reinterpret_cast<uint32_t *>(unsafeData + s) = Utils::hton(n);
#endif
}
/**
* Write a 64-bit integer in big-endian byte order
*
* @param ii Index value-result parameter (incremented by 8)
* @param n Integer
*/
ZT_INLINE void wI64(int &ii, const uint64_t n) noexcept
{
const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
ii += 8;
#ifdef ZT_NO_UNALIGNED_ACCESS
unsafeData[s] = (uint8_t)(n >> 56U);
unsafeData[s + 1] = (uint8_t)(n >> 48U);
unsafeData[s + 2] = (uint8_t)(n >> 40U);
unsafeData[s + 3] = (uint8_t)(n >> 32U);
unsafeData[s + 4] = (uint8_t)(n >> 24U);
unsafeData[s + 5] = (uint8_t)(n >> 16U);
unsafeData[s + 6] = (uint8_t)(n >> 8U);
unsafeData[s + 7] = (uint8_t)n;
#else
*reinterpret_cast<uint64_t *>(unsafeData + s) = Utils::hton(n);
#endif
}
/**
* Write an object implementing the marshal interface
*
* @tparam T Object type
* @param ii Index value-result parameter (incremented by size of object)
* @param t Object to write
*/
template <typename T> ZT_INLINE void wO(int &ii, T &t) noexcept
{
const int s = ii;
if (likely((s + T::marshalSizeMax()) <= ZT_BUF_MEM_SIZE)) {
int ms = t.marshal(unsafeData + s);
if (ms > 0)
ii += ms;
}
else {
ii += T::marshalSizeMax(); // mark as overflowed even if we didn't do anything
}
}
/**
* Write a C-style null-terminated string (including the trailing zero)
*
* @param ii Index value-result parameter (incremented by length of string)
* @param s String to write (writes an empty string if this is NULL)
*/
ZT_INLINE void wS(int &ii, const char *s) noexcept
{
if (s) {
char c;
do {
c = *(s++);
wI8(ii, (uint8_t)c);
} while (c);
}
else {
wI8(ii, 0);
}
}
/**
* Write a byte array
*
* @param ii Index value-result parameter (incremented by len)
* @param bytes Bytes to write
* @param len Size of data in bytes
*/
ZT_INLINE void wB(int &ii, const void *const bytes, const unsigned int len) noexcept
{
const int s = ii;
if (likely((ii += (int)len) <= ZT_BUF_MEM_SIZE))
Utils::copy(unsafeData + s, bytes, len);
}
/**
* Write zeroes
*
* @param ii Index value-result parameter (incremented by len)
* @param len Number of zero bytes to write
*/
ZT_INLINE void wZ(int &ii, const unsigned int len) noexcept
{
const int s = ii;
if (likely((ii += (int)len) <= ZT_BUF_MEM_SIZE))
Utils::zero(unsafeData + s, len);
}
/**
* Write secure random bytes
*
* @param ii Index value-result parameter (incremented by len)
* @param len Number of random bytes to write
*/
ZT_INLINE void wR(int &ii, const unsigned int len) noexcept
{
const int s = ii;
if (likely((ii += (int)len) <= ZT_BUF_MEM_SIZE))
Utils::getSecureRandom(unsafeData + s, len);
}
/**
* Store a byte without advancing the index
*/
ZT_INLINE void sI8(const int ii, const uint8_t n) noexcept { unsafeData[(unsigned int)ii & ZT_BUF_MEM_MASK] = n; }
/**
* Store an integer without advancing the index
*/
ZT_INLINE void sI16(const int ii, const uint16_t n) noexcept
{
const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
#ifdef ZT_NO_UNALIGNED_ACCESS
unsafeData[s] = (uint8_t)(n >> 8U);
unsafeData[s + 1] = (uint8_t)n;
#else
*reinterpret_cast<uint16_t *>(unsafeData + s) = Utils::hton(n);
#endif
}
/**
* Store an integer without advancing the index
*/
ZT_INLINE void sI32(const int ii, const uint32_t n) noexcept
{
const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
#ifdef ZT_NO_UNALIGNED_ACCESS
unsafeData[s] = (uint8_t)(n >> 24U);
unsafeData[s + 1] = (uint8_t)(n >> 16U);
unsafeData[s + 2] = (uint8_t)(n >> 8U);
unsafeData[s + 3] = (uint8_t)n;
#else
*reinterpret_cast<uint32_t *>(unsafeData + s) = Utils::hton(n);
#endif
}
/**
* Store an integer without advancing the index
*/
ZT_INLINE void sI64(const int ii, const uint64_t n) noexcept
{
const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
#ifdef ZT_NO_UNALIGNED_ACCESS
unsafeData[s] = (uint8_t)(n >> 56U);
unsafeData[s + 1] = (uint8_t)(n >> 48U);
unsafeData[s + 2] = (uint8_t)(n >> 40U);
unsafeData[s + 3] = (uint8_t)(n >> 32U);
unsafeData[s + 4] = (uint8_t)(n >> 24U);
unsafeData[s + 5] = (uint8_t)(n >> 16U);
unsafeData[s + 6] = (uint8_t)(n >> 8U);
unsafeData[s + 7] = (uint8_t)n;
#else
*reinterpret_cast<uint64_t *>(unsafeData + s) = Utils::hton(n);
#endif
}
/**
* @return Capacity of this buffer (usable size of data.bytes)
*/
static constexpr unsigned int capacity() noexcept { return ZT_BUF_MEM_SIZE; }
private:
volatile uintptr_t __nextInPool;
std::atomic<int> __refCount;
};
} // namespace ZeroTier
#endif

File diff suppressed because it is too large Load diff

View file

@ -1,129 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
/*
* The code in C25519.cpp is in the public domain rather than being under
* ZeroTier's license. Other than ZeroTier shims it contains public domain
* C25519/Ed25519 code by D. J. Bernstein and Matthew Dempsky.
*/
#ifndef ZT_C25519_HPP
#define ZT_C25519_HPP
#include "Constants.hpp"
#include "Utils.hpp"
namespace ZeroTier {
#define ZT_C25519_ECDH_PUBLIC_KEY_SIZE 32
#define ZT_C25519_ECDH_PRIVATE_KEY_SIZE 32
#define ZT_C25519_COMBINED_PUBLIC_KEY_SIZE 64
#define ZT_C25519_COMBINED_PRIVATE_KEY_SIZE 64
#define ZT_C25519_SIGNATURE_LEN 96
#define ZT_C25519_ECDH_SHARED_SECRET_SIZE 32
/**
* A combined Curve25519 ECDH and Ed25519 signature engine
*/
class C25519 {
public:
/**
* Generate a set of two 25519 keys: a C25519 ECDH key pair and an Ed25519 EDDSA key pair.
*/
static void generateCombined(uint8_t pub[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE], uint8_t priv[ZT_C25519_COMBINED_PRIVATE_KEY_SIZE]);
/**
* Generate a C25519 ECDH key pair only.
*/
static void generateC25519(uint8_t pub[ZT_C25519_ECDH_PUBLIC_KEY_SIZE], uint8_t priv[ZT_C25519_ECDH_PRIVATE_KEY_SIZE]);
/**
* Generate a key pair satisfying a condition
*
* This begins with a random keypair from a random secret key and then
* iteratively increments the random secret until cond(kp) returns true.
* This is used to compute key pairs in which the public key, its hash
* or some other aspect of it satisfies some condition, such as for a
* hashcash criteria.
*
* @param cond Condition function or function object
* @return Key pair where cond(kp) returns true
* @tparam F Type of 'cond'
*/
template <typename F> static ZT_INLINE void generateSatisfying(F cond, uint8_t pub[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE], uint8_t priv[ZT_C25519_COMBINED_PRIVATE_KEY_SIZE])
{
Utils::getSecureRandom(priv, ZT_C25519_COMBINED_PRIVATE_KEY_SIZE);
s_calcPubED(pub, priv); // do Ed25519 key -- bytes 32-63 of pub and priv
do {
++(((uint64_t *)priv)[1]);
--(((uint64_t *)priv)[2]);
s_calcPubDH(pub, priv); // keep regenerating bytes 0-31 until satisfied
} while (!cond(pub));
}
/**
* Perform C25519 ECC key agreement
*
* Actual key bytes are generated from one or more SHA-512 digests of
* the raw result of key agreement.
*
* @param mine My private key
* @param their Their public key
* @param rawkey Buffer to receive raw (not hashed) agreed upon key
*/
static void agree(const uint8_t mine[ZT_C25519_ECDH_PRIVATE_KEY_SIZE], const uint8_t their[ZT_C25519_ECDH_PUBLIC_KEY_SIZE], uint8_t rawkey[ZT_C25519_ECDH_SHARED_SECRET_SIZE]);
/**
* Sign a message with a sender's key pair
*
* LEGACY: ZeroTier's ed25519 signatures contain an extra 32 bytes which are the first
* 32 bytes of SHA512(msg). These exist because an early version of the ZeroTier multicast
* algorithm did a lot of signature verification and we wanted a way to skip the more
* expensive ed25519 verification if the signature was obviously wrong.
*
* This verify() function will accept a 64 or 96 bit signature, checking the last 32
* bytes only if present.
*
* @param myPrivate My private key
* @param myPublic My public key
* @param msg Message to sign
* @param len Length of message in bytes
* @param signature Buffer to fill with signature -- MUST be 96 bytes in length
*/
static void sign(const uint8_t myPrivate[ZT_C25519_COMBINED_PRIVATE_KEY_SIZE], const uint8_t myPublic[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE], const void *msg, unsigned int len, void *signature);
/**
* Verify a message's signature
*
* @param their Public key to verify against
* @param msg Message to verify signature integrity against
* @param len Length of message in bytes
* @param signature Signature bytes
* @param siglen Length of signature in bytes
* @return True if signature is valid and the message is authentic and unmodified
*/
static bool verify(const uint8_t their[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE], const void *msg, unsigned int len, const void *signature, unsigned int siglen);
private:
// derive first 32 bytes of kp.pub from first 32 bytes of kp.priv
// this is the ECDH key
static void s_calcPubDH(uint8_t *pub, const uint8_t *priv);
// derive 2nd 32 bytes of kp.pub from 2nd 32 bytes of kp.priv
// this is the Ed25519 sign/verify key
static void s_calcPubED(uint8_t *pub, const uint8_t *priv);
};
} // namespace ZeroTier
#endif

View file

@ -1,896 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "CallContext.hpp"
#include "Certificate.hpp"
#include "Constants.hpp"
#include "ECC384.hpp"
#include "Identity.hpp"
#include "InetAddress.hpp"
#include "Locator.hpp"
#include "Node.hpp"
#include "VL1.hpp"
#include "VL2.hpp"
extern "C" {
/********************************************************************************************************************/
// These macros make the idiom of passing buffers to outside code via the API work properly even
// if the first address of Buf does not overlap with its data field, since the C++ standard does
// not absolutely guarantee this.
#define ZT_PTRTOBUF(p) ((ZeroTier::Buf *)(((uintptr_t)(p)) - ((uintptr_t) & (((ZeroTier::Buf *)0)->unsafeData[0]))))
#define ZT_BUFTOPTR(b) ((void *)(&((b)->unsafeData[0])))
ZT_MAYBE_UNUSED void *ZT_getBuffer()
{
// When external code requests a Buf, grab one from the pool (or freshly allocated)
// and return it with its reference count left at zero. It's the responsibility of
// external code to bring it back via freeBuffer() or one of the processX() calls.
// When this occurs it's either sent back to the pool with Buf's delete operator or
// wrapped in a SharedPtr<> to be passed into the core.
try {
return ZT_BUFTOPTR(new ZeroTier::Buf());
}
catch (...) {
return nullptr; // can only happen on out of memory condition
}
}
ZT_MAYBE_UNUSED void ZT_freeBuffer(void *b)
{
if (b)
delete ZT_PTRTOBUF(b);
}
struct p_queryResultBase {
void (*freeFunction)(const void *);
};
ZT_MAYBE_UNUSED void ZT_freeQueryResult(const void *qr)
{
if ((qr) && (reinterpret_cast<const p_queryResultBase *>(qr)->freeFunction))
reinterpret_cast<const p_queryResultBase *>(qr)->freeFunction(qr);
}
ZT_MAYBE_UNUSED void ZT_version(int *major, int *minor, int *revision, int *build)
{
if (major)
*major = ZEROTIER_VERSION_MAJOR;
if (minor)
*minor = ZEROTIER_VERSION_MINOR;
if (revision)
*revision = ZEROTIER_VERSION_REVISION;
if (build)
*build = ZEROTIER_VERSION_BUILD;
}
/********************************************************************************************************************/
ZT_MAYBE_UNUSED enum ZT_ResultCode ZT_Node_new(ZT_Node **node, int64_t clock, int64_t ticks, void *tptr, void *uptr, const struct ZT_Node_Callbacks *callbacks)
{
*node = nullptr;
try {
ZeroTier::CallContext cc(clock, ticks, tptr);
*node = reinterpret_cast<ZT_Node *>(new ZeroTier::Node(uptr, callbacks, cc));
return ZT_RESULT_OK;
}
catch (std::bad_alloc &exc) {
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
}
catch (std::runtime_error &exc) {
return ZT_RESULT_FATAL_ERROR_DATA_STORE_FAILED;
}
catch (...) {
return ZT_RESULT_ERROR_INTERNAL;
}
}
ZT_MAYBE_UNUSED void ZT_Node_delete(ZT_Node *node, int64_t clock, int64_t ticks, void *tptr)
{
try {
ZeroTier::CallContext cc(clock, ticks, tptr);
reinterpret_cast<ZeroTier::Node *>(node)->shutdown(cc);
delete (reinterpret_cast<ZeroTier::Node *>(node));
}
catch (...) {
}
}
ZT_MAYBE_UNUSED enum ZT_ResultCode ZT_Node_processWirePacket(ZT_Node *node, int64_t clock, int64_t ticks, void *tptr, int64_t localSocket, const ZT_InetAddress *remoteAddress, const void *packetData, unsigned int packetLength, int isZtBuffer, volatile int64_t *)
{
try {
ZeroTier::CallContext cc(clock, ticks, tptr);
ZeroTier::SharedPtr<ZeroTier::Buf> buf((isZtBuffer) ? ZT_PTRTOBUF(packetData) : new ZeroTier::Buf(packetData, packetLength & ZT_BUF_MEM_MASK));
reinterpret_cast<ZeroTier::Node *>(node)->context().vl1->onRemotePacket(cc, localSocket, *ZeroTier::asInetAddress(remoteAddress), buf, packetLength);
}
catch (std::bad_alloc &exc) {
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
}
catch (...) {
// "OK" since invalid packets are simply dropped, but the system is still up.
// We should never make it here, but if we did that would be the interpretation.
}
return ZT_RESULT_OK;
}
ZT_MAYBE_UNUSED enum ZT_ResultCode ZT_Node_processVirtualNetworkFrame(ZT_Node *node, int64_t clock, int64_t ticks, void *tptr, uint64_t nwid, uint64_t sourceMac, uint64_t destMac, unsigned int etherType, unsigned int vlanId, const void *frameData, unsigned int frameLength, int isZtBuffer, volatile int64_t *)
{
try {
ZeroTier::CallContext cc(clock, ticks, tptr);
const ZeroTier::Context &ctx = reinterpret_cast<ZeroTier::Node *>(node)->context();
ZeroTier::SharedPtr<ZeroTier::Network> network(ctx.networks->get(nwid));
if (likely(network)) {
ZeroTier::SharedPtr<ZeroTier::Buf> buf((isZtBuffer) ? ZT_PTRTOBUF(frameData) : new ZeroTier::Buf(frameData, frameLength & ZT_BUF_MEM_MASK));
ctx.vl2->onLocalEthernet(cc, network, ZeroTier::MAC(sourceMac), ZeroTier::MAC(destMac), etherType, vlanId, buf, frameLength);
return ZT_RESULT_OK;
}
else {
return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
}
}
catch (std::bad_alloc &exc) {
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
}
catch (...) {
return ZT_RESULT_ERROR_INTERNAL;
}
}
ZT_MAYBE_UNUSED enum ZT_ResultCode ZT_Node_processBackgroundTasks(ZT_Node *node, int64_t clock, int64_t ticks, void *tptr, volatile int64_t *nextBackgroundTaskDeadline)
{
try {
ZeroTier::CallContext cc(clock, ticks, tptr);
return reinterpret_cast<ZeroTier::Node *>(node)->processBackgroundTasks(cc, nextBackgroundTaskDeadline);
}
catch (std::bad_alloc &exc) {
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
}
catch (...) {
return ZT_RESULT_ERROR_INTERNAL;
}
}
ZT_MAYBE_UNUSED enum ZT_ResultCode ZT_Node_join(ZT_Node *node, int64_t clock, int64_t ticks, void *tptr, void *uptr, uint64_t nwid, const ZT_Fingerprint *controllerFingerprint)
{
try {
ZeroTier::CallContext cc(clock, ticks, tptr);
return reinterpret_cast<ZeroTier::Node *>(node)->join(nwid, controllerFingerprint, uptr, cc);
}
catch (std::bad_alloc &exc) {
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
}
catch (...) {
return ZT_RESULT_ERROR_INTERNAL;
}
}
ZT_MAYBE_UNUSED enum ZT_ResultCode ZT_Node_leave(ZT_Node *node, int64_t clock, int64_t ticks, void *tptr, void **uptr, uint64_t nwid)
{
try {
ZeroTier::CallContext cc(clock, ticks, tptr);
return reinterpret_cast<ZeroTier::Node *>(node)->leave(nwid, uptr, cc);
}
catch (std::bad_alloc &exc) {
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
}
catch (...) {
return ZT_RESULT_ERROR_INTERNAL;
}
}
ZT_MAYBE_UNUSED enum ZT_ResultCode ZT_Node_multicastSubscribe(ZT_Node *node, int64_t clock, int64_t ticks, void *tptr, uint64_t nwid, uint64_t multicastGroup, unsigned long multicastAdi)
{
try {
ZeroTier::CallContext cc(clock, ticks, tptr);
return reinterpret_cast<ZeroTier::Node *>(node)->multicastSubscribe(cc, nwid, multicastGroup, multicastAdi);
}
catch (std::bad_alloc &exc) {
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
}
catch (...) {
return ZT_RESULT_ERROR_INTERNAL;
}
}
ZT_MAYBE_UNUSED enum ZT_ResultCode ZT_Node_multicastUnsubscribe(ZT_Node *node, int64_t clock, int64_t ticks, void *tptr, uint64_t nwid, uint64_t multicastGroup, unsigned long multicastAdi)
{
try {
ZeroTier::CallContext cc(clock, ticks, tptr);
return reinterpret_cast<ZeroTier::Node *>(node)->multicastUnsubscribe(cc, nwid, multicastGroup, multicastAdi);
}
catch (std::bad_alloc &exc) {
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
}
catch (...) {
return ZT_RESULT_ERROR_INTERNAL;
}
}
ZT_MAYBE_UNUSED uint64_t ZT_Node_address(ZT_Node *node) { return reinterpret_cast<ZeroTier::Node *>(node)->context().identity.address().toInt(); }
ZT_MAYBE_UNUSED const ZT_Identity *ZT_Node_identity(ZT_Node *node) { return (const ZT_Identity *)(&(reinterpret_cast<ZeroTier::Node *>(node)->identity())); }
ZT_MAYBE_UNUSED void ZT_Node_status(ZT_Node *node, int64_t, int64_t, void *, ZT_NodeStatus *status)
{
try {
reinterpret_cast<ZeroTier::Node *>(node)->status(status);
}
catch (...) {
}
}
ZT_MAYBE_UNUSED ZT_PeerList *ZT_Node_peers(ZT_Node *node, int64_t clock, int64_t ticks, void *tptr)
{
try {
ZeroTier::CallContext cc(clock, ticks, tptr);
return reinterpret_cast<ZeroTier::Node *>(node)->peers(cc);
}
catch (...) {
return (ZT_PeerList *)0;
}
}
ZT_MAYBE_UNUSED ZT_VirtualNetworkConfig *ZT_Node_networkConfig(ZT_Node *node, int64_t, int64_t, void *, uint64_t nwid)
{
try {
return reinterpret_cast<ZeroTier::Node *>(node)->networkConfig(nwid);
}
catch (...) {
return (ZT_VirtualNetworkConfig *)0;
}
}
ZT_MAYBE_UNUSED ZT_VirtualNetworkList *ZT_Node_networks(ZT_Node *node)
{
try {
return reinterpret_cast<ZeroTier::Node *>(node)->networks();
}
catch (...) {
return (ZT_VirtualNetworkList *)0;
}
}
ZT_MAYBE_UNUSED void ZT_Node_setNetworkUserPtr(ZT_Node *node, uint64_t nwid, void *ptr)
{
try {
reinterpret_cast<ZeroTier::Node *>(node)->setNetworkUserPtr(nwid, ptr);
}
catch (...) {
}
}
ZT_MAYBE_UNUSED void ZT_Node_setInterfaceAddresses(ZT_Node *node, int64_t, int64_t, void *, const ZT_InterfaceAddress *addrs, unsigned int addrCount)
{
try {
reinterpret_cast<ZeroTier::Node *>(node)->setInterfaceAddresses(addrs, addrCount);
}
catch (...) {
}
}
ZT_MAYBE_UNUSED enum ZT_CertificateError ZT_Node_addCertificate(ZT_Node *node, int64_t clock, int64_t ticks, void *tptr, unsigned int localTrust, const ZT_Certificate *cert, const void *certData, unsigned int certSize)
{
try {
ZeroTier::CallContext cc(clock, ticks, tptr);
return reinterpret_cast<ZeroTier::Node *>(node)->addCertificate(cc, localTrust, cert, certData, certSize);
}
catch (...) {
return ZT_CERTIFICATE_ERROR_INVALID_FORMAT;
}
}
ZT_MAYBE_UNUSED enum ZT_ResultCode ZT_Node_deleteCertificate(ZT_Node *node, int64_t clock, int64_t ticks, void *tptr, const void *serialNo)
{
try {
ZeroTier::CallContext cc(clock, ticks, tptr);
return reinterpret_cast<ZeroTier::Node *>(node)->deleteCertificate(cc, serialNo);
}
catch (...) {
return ZT_RESULT_ERROR_INTERNAL;
}
}
ZT_MAYBE_UNUSED ZT_CertificateList *ZT_Node_listCertificates(ZT_Node *node, int64_t, int64_t, void *)
{
try {
return reinterpret_cast<ZeroTier::Node *>(node)->listCertificates();
}
catch (...) {
return nullptr;
}
}
ZT_MAYBE_UNUSED int ZT_Node_sendUserMessage(ZT_Node *node, int64_t clock, int64_t ticks, void *tptr, uint64_t dest, uint64_t typeId, const void *data, unsigned int len)
{
try {
ZeroTier::CallContext cc(clock, ticks, tptr);
return reinterpret_cast<ZeroTier::Node *>(node)->sendUserMessage(cc, dest, typeId, data, len);
}
catch (...) {
return 0;
}
}
ZT_MAYBE_UNUSED void ZT_Node_setController(ZT_Node *node, void *networkControllerInstance)
{
try {
reinterpret_cast<ZeroTier::Node *>(node)->setController(networkControllerInstance);
}
catch (...) {
}
}
/********************************************************************************************************************/
ZT_MAYBE_UNUSED ZT_Locator *ZT_Locator_create(int64_t rev, const ZT_Endpoint *endpoints, const ZT_EndpointAttributes *, unsigned int endpointCount, const ZT_Identity *signer)
{
try {
if ((!endpoints) || (endpointCount == 0) || (!signer))
return nullptr;
ZeroTier::Locator *loc = new ZeroTier::Locator();
for (unsigned int i = 0; i < endpointCount; ++i)
loc->add(reinterpret_cast<const ZeroTier::Endpoint *>(endpoints)[i], ZeroTier::Locator::EndpointAttributes::DEFAULT);
if (!loc->sign(rev, *reinterpret_cast<const ZeroTier::Identity *>(signer))) {
delete loc;
return nullptr;
}
return reinterpret_cast<ZT_Locator *>(loc);
}
catch (...) {
return nullptr;
}
}
ZT_MAYBE_UNUSED ZT_Locator *ZT_Locator_fromString(const char *str)
{
try {
if (!str)
return nullptr;
ZeroTier::Locator *loc = new ZeroTier::Locator();
if (!loc->fromString(str)) {
delete loc;
return nullptr;
}
return reinterpret_cast<ZT_Locator *>(loc);
}
catch (...) {
return nullptr;
}
}
ZT_MAYBE_UNUSED ZT_Locator *ZT_Locator_unmarshal(const void *data, unsigned int len)
{
try {
if ((!data) || (len == 0))
return nullptr;
ZeroTier::Locator *loc = new ZeroTier::Locator();
if (loc->unmarshal(reinterpret_cast<const uint8_t *>(data), (int)len) <= 0) {
delete loc;
return nullptr;
}
return reinterpret_cast<ZT_Locator *>(loc);
}
catch (...) {
return nullptr;
}
}
ZT_MAYBE_UNUSED int ZT_Locator_marshal(const ZT_Locator *loc, void *buf, unsigned int bufSize)
{
if ((!loc) || (bufSize < ZT_LOCATOR_MARSHAL_SIZE_MAX))
return -1;
return reinterpret_cast<const ZeroTier::Locator *>(loc)->marshal(reinterpret_cast<uint8_t *>(buf), false);
}
ZT_MAYBE_UNUSED char *ZT_Locator_toString(const ZT_Locator *loc, char *buf, int capacity)
{
if ((!loc) || (capacity < ZT_LOCATOR_STRING_SIZE_MAX))
return nullptr;
return reinterpret_cast<const ZeroTier::Locator *>(loc)->toString(buf);
}
ZT_MAYBE_UNUSED int64_t ZT_Locator_revision(const ZT_Locator *loc)
{
if (!loc)
return 0;
return reinterpret_cast<const ZeroTier::Locator *>(loc)->revision();
}
ZT_MAYBE_UNUSED uint64_t ZT_Locator_signer(const ZT_Locator *loc)
{
if (!loc)
return 0;
return reinterpret_cast<const ZeroTier::Locator *>(loc)->signer().toInt();
}
ZT_MAYBE_UNUSED int ZT_Locator_equals(const ZT_Locator *a, const ZT_Locator *b)
{
if (a) {
if (b) {
if (*reinterpret_cast<const ZeroTier::Locator *>(a) == *reinterpret_cast<const ZeroTier::Locator *>(b))
return 1;
}
}
else if (!b) {
return 1;
}
return 0;
}
ZT_MAYBE_UNUSED unsigned int ZT_Locator_endpointCount(const ZT_Locator *loc) { return (loc) ? (unsigned int)(reinterpret_cast<const ZeroTier::Locator *>(loc)->endpoints().size()) : 0; }
ZT_MAYBE_UNUSED const ZT_Endpoint *ZT_Locator_endpoint(const ZT_Locator *loc, const unsigned int ep)
{
if (!loc)
return nullptr;
if (ep >= (unsigned int)(reinterpret_cast<const ZeroTier::Locator *>(loc)->endpoints().size()))
return nullptr;
return reinterpret_cast<const ZT_Endpoint *>(&(reinterpret_cast<const ZeroTier::Locator *>(loc)->endpoints()[ep]));
}
ZT_MAYBE_UNUSED int ZT_Locator_verify(const ZT_Locator *loc, const ZT_Identity *signer)
{
if ((!loc) || (!signer))
return 0;
return reinterpret_cast<const ZeroTier::Locator *>(loc)->verify(*reinterpret_cast<const ZeroTier::Identity *>(signer)) ? 1 : 0;
}
ZT_MAYBE_UNUSED void ZT_Locator_delete(const ZT_Locator *loc)
{
if (loc)
delete reinterpret_cast<const ZeroTier::Locator *>(loc);
}
/********************************************************************************************************************/
ZT_MAYBE_UNUSED ZT_Identity *ZT_Identity_new(enum ZT_IdentityType type)
{
if ((type != ZT_IDENTITY_TYPE_C25519) && (type != ZT_IDENTITY_TYPE_P384))
return nullptr;
try {
ZeroTier::Identity *const id = new ZeroTier::Identity();
id->generate((ZeroTier::Identity::Type)type);
return reinterpret_cast<ZT_Identity *>(id);
}
catch (...) {
return nullptr;
}
}
ZT_MAYBE_UNUSED ZT_Identity *ZT_Identity_clone(const ZT_Identity *id)
{
if (id) {
try {
return reinterpret_cast<ZT_Identity *>(new ZeroTier::Identity(*reinterpret_cast<const ZeroTier::Identity *>(id)));
}
catch (...) {
return nullptr;
}
}
return nullptr;
}
ZT_MAYBE_UNUSED ZT_Identity *ZT_Identity_fromString(const char *idStr)
{
if (!idStr)
return nullptr;
try {
ZeroTier::Identity *const id = new ZeroTier::Identity();
if (!id->fromString(idStr)) {
delete id;
return nullptr;
}
return reinterpret_cast<ZT_Identity *>(id);
}
catch (...) {
return nullptr;
}
}
ZT_MAYBE_UNUSED int ZT_Identity_validate(const ZT_Identity *id)
{
if (!id)
return 0;
return reinterpret_cast<const ZeroTier::Identity *>(id)->locallyValidate() ? 1 : 0;
}
ZT_MAYBE_UNUSED unsigned int ZT_Identity_sign(const ZT_Identity *id, const void *data, unsigned int len, void *signature, unsigned int signatureBufferLength)
{
if (!id)
return 0;
if (signatureBufferLength < ZT_SIGNATURE_BUFFER_SIZE)
return 0;
return reinterpret_cast<const ZeroTier::Identity *>(id)->sign(data, len, signature, signatureBufferLength);
}
ZT_MAYBE_UNUSED int ZT_Identity_verify(const ZT_Identity *id, const void *data, unsigned int len, const void *signature, unsigned int sigLen)
{
if ((!id) || (!signature) || (!sigLen))
return 0;
return reinterpret_cast<const ZeroTier::Identity *>(id)->verify(data, len, signature, sigLen) ? 1 : 0;
}
ZT_MAYBE_UNUSED enum ZT_IdentityType ZT_Identity_type(const ZT_Identity *id)
{
if (!id)
return (ZT_IdentityType)0;
return (enum ZT_IdentityType) reinterpret_cast<const ZeroTier::Identity *>(id)->type();
}
ZT_MAYBE_UNUSED char *ZT_Identity_toString(const ZT_Identity *id, char *buf, int capacity, int includePrivate)
{
if ((!id) || (!buf) || (capacity < ZT_IDENTITY_STRING_BUFFER_LENGTH))
return nullptr;
reinterpret_cast<const ZeroTier::Identity *>(id)->toString(includePrivate != 0, buf);
return buf;
}
ZT_MAYBE_UNUSED int ZT_Identity_hasPrivate(const ZT_Identity *id)
{
if (!id)
return 0;
return reinterpret_cast<const ZeroTier::Identity *>(id)->hasPrivate() ? 1 : 0;
}
ZT_MAYBE_UNUSED uint64_t ZT_Identity_address(const ZT_Identity *id)
{
if (!id)
return 0;
return reinterpret_cast<const ZeroTier::Identity *>(id)->address();
}
ZT_MAYBE_UNUSED const ZT_Fingerprint *ZT_Identity_fingerprint(const ZT_Identity *id)
{
if (!id)
return nullptr;
return &(reinterpret_cast<const ZeroTier::Identity *>(id)->fingerprint());
}
ZT_MAYBE_UNUSED int ZT_Identity_compare(const ZT_Identity *a, const ZT_Identity *b)
{
if (a) {
if (b) {
if (*reinterpret_cast<const ZeroTier::Identity *>(a) < *reinterpret_cast<const ZeroTier::Identity *>(b)) {
return -1;
}
else if (*reinterpret_cast<const ZeroTier::Identity *>(b) < *reinterpret_cast<const ZeroTier::Identity *>(a)) {
return 1;
}
else {
return 0;
}
}
else {
return 1;
}
}
else if (b) {
return -1;
}
else {
return 0;
}
}
ZT_MAYBE_UNUSED void ZT_Identity_delete(const ZT_Identity *id)
{
if (id)
delete reinterpret_cast<const ZeroTier::Identity *>(id);
}
/********************************************************************************************************************/
ZT_MAYBE_UNUSED int ZT_Certificate_newKeyPair(const enum ZT_CertificatePublicKeyAlgorithm type, uint8_t publicKey[ZT_CERTIFICATE_MAX_PUBLIC_KEY_SIZE], int *const publicKeySize, uint8_t privateKey[ZT_CERTIFICATE_MAX_PRIVATE_KEY_SIZE], int *const privateKeySize)
{
try {
return ZeroTier::Certificate::newKeyPair(type, publicKey, publicKeySize, privateKey, privateKeySize) ? ZT_RESULT_OK : ZT_RESULT_ERROR_BAD_PARAMETER;
}
catch (...) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
}
ZT_MAYBE_UNUSED int ZT_Certificate_newCSR(const ZT_Certificate_Subject *subject, const void *const certificatePrivateKey, const int certificatePrivateKeySize, const void *const uniqueIdPrivateKey, const int uniqueIdPrivateKeySize, void *const csr, int *const csrSize)
{
try {
if ((!subject) || (!certificatePrivateKey) || (certificatePrivateKeySize <= 0))
return ZT_RESULT_ERROR_BAD_PARAMETER;
const ZeroTier::Vector<uint8_t> csrV(ZeroTier::Certificate::createCSR(*subject, certificatePrivateKey, (unsigned int)certificatePrivateKeySize, uniqueIdPrivateKey, (unsigned int)uniqueIdPrivateKeySize));
if (csrV.empty() || ((int)csrV.size() > *csrSize))
return ZT_RESULT_ERROR_BAD_PARAMETER;
ZeroTier::Utils::copy(csr, csrV.data(), (unsigned int)csrV.size());
*csrSize = (int)csrV.size();
return ZT_RESULT_OK;
}
catch (...) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
}
ZT_MAYBE_UNUSED ZT_Certificate *ZT_Certificate_sign(const ZT_Certificate *cert, const uint8_t issuer[ZT_CERTIFICATE_HASH_SIZE], const void *issuerPrivateKey, int issuerPrivateKeySize)
{
try {
ZeroTier::Certificate *const c = new ZeroTier::Certificate(*cert);
if (c->sign(issuer, issuerPrivateKey, issuerPrivateKeySize)) {
return c;
}
else {
delete c;
}
}
catch (...) {
}
return nullptr;
}
ZT_MAYBE_UNUSED enum ZT_CertificateError ZT_Certificate_decode(const ZT_Certificate **decodedCert, const void *cert, int certSize, int verify)
{
try {
if ((!decodedCert) || (!cert) || (certSize <= 0))
return ZT_CERTIFICATE_ERROR_INVALID_FORMAT;
*decodedCert = nullptr;
ZeroTier::Certificate *const c = new ZeroTier::Certificate();
if (!c->decode(cert, certSize)) {
delete c;
return ZT_CERTIFICATE_ERROR_INVALID_FORMAT;
}
if (verify) {
const ZT_CertificateError err = c->verify(-1, true);
if (err != ZT_CERTIFICATE_ERROR_NONE) {
delete c;
return err;
}
}
*decodedCert = c;
return ZT_CERTIFICATE_ERROR_NONE;
}
catch (...) {
return ZT_CERTIFICATE_ERROR_INVALID_FORMAT;
}
}
ZT_MAYBE_UNUSED int ZT_Certificate_encode(const ZT_Certificate *cert, void *encoded, int *encodedSize)
{
try {
if ((!cert) || (!encoded) || (!encodedSize))
return ZT_RESULT_ERROR_BAD_PARAMETER;
ZeroTier::Certificate c(*cert);
ZeroTier::Vector<uint8_t> enc(c.encode());
if ((int)enc.size() > *encodedSize)
return ZT_RESULT_ERROR_BAD_PARAMETER;
ZeroTier::Utils::copy(encoded, enc.data(), (unsigned int)enc.size());
*encodedSize = (int)enc.size();
return ZT_RESULT_OK;
}
catch (...) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
}
ZT_MAYBE_UNUSED enum ZT_CertificateError ZT_Certificate_verify(const ZT_Certificate *cert, int64_t clock)
{
try {
if (!cert)
return ZT_CERTIFICATE_ERROR_INVALID_FORMAT;
return ZeroTier::Certificate(*cert).verify(clock, true);
}
catch (...) {
return ZT_CERTIFICATE_ERROR_INVALID_FORMAT;
}
}
ZT_MAYBE_UNUSED const ZT_Certificate *ZT_Certificate_clone(const ZT_Certificate *cert)
{
try {
if (!cert)
return nullptr;
return (const ZT_Certificate *)(new ZeroTier::Certificate(*cert));
}
catch (...) {
return nullptr;
}
}
ZT_MAYBE_UNUSED void ZT_Certificate_delete(const ZT_Certificate *cert)
{
try {
if (cert)
delete (const ZeroTier::Certificate *)(cert);
}
catch (...) {
}
}
/********************************************************************************************************************/
ZT_MAYBE_UNUSED char *ZT_Endpoint_toString(const ZT_Endpoint *ep, char *buf, int capacity)
{
if ((!ep) || (!buf) || (capacity < ZT_ENDPOINT_STRING_SIZE_MAX))
return nullptr;
return reinterpret_cast<const ZeroTier::Endpoint *>(ep)->toString(buf);
}
ZT_MAYBE_UNUSED int ZT_Endpoint_fromString(ZT_Endpoint *ep, const char *str)
{
if ((!ep) || (!str))
return ZT_RESULT_ERROR_BAD_PARAMETER;
return reinterpret_cast<ZeroTier::Endpoint *>(ep)->fromString(str) ? ZT_RESULT_OK : ZT_RESULT_ERROR_BAD_PARAMETER;
}
ZT_MAYBE_UNUSED int ZT_Endpoint_fromBytes(ZT_Endpoint *ep, const void *bytes, unsigned int len)
{
if ((!ep) || (!bytes) || (!len))
return ZT_RESULT_ERROR_BAD_PARAMETER;
return (reinterpret_cast<ZeroTier::Endpoint *>(ep)->unmarshal(reinterpret_cast<const uint8_t *>(bytes), (int)len) > 0) ? 0 : ZT_RESULT_ERROR_BAD_PARAMETER;
}
/********************************************************************************************************************/
ZT_MAYBE_UNUSED char *ZT_Fingerprint_toString(const ZT_Fingerprint *fp, char *buf, int capacity)
{
if (capacity < ZT_FINGERPRINT_STRING_SIZE_MAX)
return nullptr;
return reinterpret_cast<const ZeroTier::Fingerprint *>(fp)->toString(buf);
}
ZT_MAYBE_UNUSED int ZT_Fingerprint_fromString(ZT_Fingerprint *fp, const char *s)
{
if ((!fp) || (!s))
return 0;
ZeroTier::Fingerprint f;
if (f.fromString(s)) {
*fp = f;
return 1;
}
return 0;
}
/********************************************************************************************************************/
ZT_MAYBE_UNUSED void ZT_InetAddress_clear(ZT_InetAddress *ia)
{
if (likely(ia != nullptr))
ZeroTier::Utils::zero<sizeof(ZT_InetAddress)>(ia);
}
ZT_MAYBE_UNUSED char *ZT_InetAddress_toString(const ZT_InetAddress *ia, char *buf, unsigned int cap)
{
if (likely((cap > 0) && (buf != nullptr))) {
if (likely((ia != nullptr) && (cap >= ZT_INETADDRESS_STRING_SIZE_MAX))) {
reinterpret_cast<const ZeroTier::InetAddress *>(ia)->toString(buf);
}
else {
buf[0] = 0;
}
}
return buf;
}
ZT_MAYBE_UNUSED int ZT_InetAddress_fromString(ZT_InetAddress *ia, const char *str)
{
if (likely((ia != nullptr) && (str != nullptr))) {
return (int)reinterpret_cast<ZeroTier::InetAddress *>(ia)->fromString(str);
}
return 0;
}
ZT_MAYBE_UNUSED void ZT_InetAddress_set(ZT_InetAddress *ia, const void *saddr)
{
if (likely(ia != nullptr))
(*reinterpret_cast<ZeroTier::InetAddress *>(ia)) = reinterpret_cast<const struct sockaddr *>(saddr);
}
ZT_MAYBE_UNUSED void ZT_InetAddress_setIpBytes(ZT_InetAddress *ia, const void *ipBytes, unsigned int ipLen, unsigned int port)
{
if (likely(ia != nullptr))
reinterpret_cast<ZeroTier::InetAddress *>(ia)->set(ipBytes, ipLen, port);
}
ZT_MAYBE_UNUSED void ZT_InetAddress_setPort(ZT_InetAddress *ia, unsigned int port)
{
if (likely(ia != nullptr))
reinterpret_cast<ZeroTier::InetAddress *>(ia)->setPort(port);
}
ZT_MAYBE_UNUSED unsigned int ZT_InetAddress_port(const ZT_InetAddress *ia)
{
if (likely(ia != nullptr))
return reinterpret_cast<const ZeroTier::InetAddress *>(ia)->port();
return 0;
}
ZT_MAYBE_UNUSED int ZT_InetAddress_isNil(const ZT_InetAddress *ia)
{
if (!ia)
return 0;
return (int)((bool)(*reinterpret_cast<const ZeroTier::InetAddress *>(ia)));
}
ZT_MAYBE_UNUSED int ZT_InetAddress_isV4(const ZT_InetAddress *ia)
{
if (!ia)
return 0;
return (int)(reinterpret_cast<const ZeroTier::InetAddress *>(ia))->isV4();
}
ZT_MAYBE_UNUSED int ZT_InetAddress_isV6(const ZT_InetAddress *ia)
{
if (!ia)
return 0;
return (int)(reinterpret_cast<const ZeroTier::InetAddress *>(ia))->isV6();
}
ZT_MAYBE_UNUSED unsigned int ZT_InetAddress_ipBytes(const ZT_InetAddress *ia, void *buf)
{
if (ia) {
switch (reinterpret_cast<const ZeroTier::InetAddress *>(ia)->as.sa.sa_family) {
case AF_INET: ZeroTier::Utils::copy<4>(buf, &(reinterpret_cast<const ZeroTier::InetAddress *>(ia)->as.sa_in.sin_addr.s_addr)); return 4;
case AF_INET6: ZeroTier::Utils::copy<16>(buf, reinterpret_cast<const ZeroTier::InetAddress *>(ia)->as.sa_in6.sin6_addr.s6_addr); return 16;
}
}
return 0;
}
ZT_MAYBE_UNUSED enum ZT_InetAddress_IpScope ZT_InetAddress_ipScope(const ZT_InetAddress *ia)
{
if (likely(ia != nullptr))
return reinterpret_cast<const ZeroTier::InetAddress *>(ia)->ipScope();
return ZT_IP_SCOPE_NONE;
}
ZT_MAYBE_UNUSED int ZT_InetAddress_compare(const ZT_InetAddress *a, const ZT_InetAddress *b)
{
if (a) {
if (b) {
if (*reinterpret_cast<const ZeroTier::InetAddress *>(a) < *reinterpret_cast<const ZeroTier::InetAddress *>(b)) {
return -1;
}
else if (*reinterpret_cast<const ZeroTier::InetAddress *>(b) < *reinterpret_cast<const ZeroTier::InetAddress *>(a)) {
return 1;
}
else {
return 0;
}
}
else {
return 1;
}
}
else if (b) {
return -1;
}
else {
return 0;
}
}
/********************************************************************************************************************/
ZT_MAYBE_UNUSED int ZT_Dictionary_parse(const void *const dict, const unsigned int len, void *const arg, void (*f)(void *, const char *, unsigned int, const void *, unsigned int))
{
ZeroTier::Dictionary d;
if (d.decode(dict, len)) {
for (ZeroTier::Dictionary::const_iterator i(d.begin()); i != d.end(); ++i) {
f(arg, i->first.c_str(), (unsigned int)i->first.length(), i->second.data(), (unsigned int)i->second.size());
}
return 1;
}
return 0;
}
/********************************************************************************************************************/
ZT_MAYBE_UNUSED uint64_t ZT_random() { return ZeroTier::Utils::random(); }
} // extern "C"

View file

@ -1,129 +0,0 @@
cmake_minimum_required(VERSION 3.0)
project(zt_core)
configure_file(
version.h.in
version.h
)
set(core_headers
zerotier.h
Address.hpp
Buf.hpp
C25519.hpp
CallContext.hpp
CapabilityCredential.hpp
Certificate.hpp
Context.hpp
Defaults.hpp
MembershipCredential.hpp
OwnershipCredential.hpp
Constants.hpp
Containers.hpp
Credential.hpp
Defragmenter.hpp
Dictionary.hpp
ECC384.hpp
Expect.hpp
FCV.hpp
Fingerprint.hpp
Identity.hpp
InetAddress.hpp
Locator.hpp
LZ4.hpp
MAC.hpp
Member.hpp
MIMC52.hpp
MulticastGroup.hpp
Mutex.hpp
Network.hpp
NetworkConfig.hpp
Node.hpp
OS.hpp
Path.hpp
Peer.hpp
Poly1305.hpp
Protocol.hpp
Salsa20.hpp
ScopedPtr.hpp
SelfAwareness.hpp
SHA512.hpp
SharedPtr.hpp
Spinlock.hpp
Store.hpp
SymmetricKey.hpp
TagCredential.hpp
TinyMap.hpp
Topology.hpp
Trace.hpp
TriviallyCopyable.hpp
TrustStore.hpp
Utils.hpp
VL1.hpp
VL2.hpp
)
set(core_src
AES.cpp
AES_aesni.cpp
AES_armcrypto.cpp
Buf.cpp
C25519.cpp
CAPI.cpp
CapabilityCredential.cpp
Certificate.cpp
Defaults.cpp
MembershipCredential.cpp
OwnershipCredential.cpp
Credential.cpp
Dictionary.cpp
ECC384.cpp
Endpoint.cpp
Identity.cpp
InetAddress.cpp
Locator.cpp
LZ4.cpp
Member.cpp
MIMC52.cpp
Network.cpp
NetworkConfig.cpp
Node.cpp
Path.cpp
Peer.cpp
Poly1305.cpp
RevocationCredential.cpp
Salsa20.cpp
SelfAwareness.cpp
SHA512.cpp
TagCredential.cpp
Topology.cpp
Trace.cpp
TrustStore.cpp
Utils.cpp
VL1.cpp
VL2.cpp
)
add_library(${PROJECT_NAME} STATIC ${core_src} ${core_headers})
target_include_directories(${PROJECT_NAME} PRIVATE ${CMAKE_BINARY_DIR})
if (WIN32)
set(libs ${libs} wsock32 ws2_32 rpcrt4 iphlpapi)
target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_17)
else (WIN32)
set(libs ${libs} pthread)
if (APPLE)
target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_17)
else (APPLE)
target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_11)
endif (APPLE)
endif (WIN32)
add_executable(zt_core_tests Tests.h Tests.cpp)
target_compile_definitions(zt_core_tests PRIVATE ZT_ENABLE_TESTS=1 ZT_STANDALONE_TESTS=1)
target_include_directories(
${PROJECT_NAME}
PUBLIC
${CMAKE_BINARY_DIR}/core
)
target_link_libraries(zt_core_tests zt_core ${libs})

View file

@ -1,59 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_CALLCONTEXT_HPP
#define ZT_CALLCONTEXT_HPP
#include "Constants.hpp"
namespace ZeroTier {
/**
* A per-API-call equivalent to the general context.
*
* This is created when external C API calls are made and follows the call
* graph around from function to function as needed. It's cleaner and probably
* faster than passing clock, ticks, and tPtr around everywhere.
*/
class CallContext {
public:
ZT_INLINE CallContext(const int64_t c, const int64_t t, void *const p) : clock(c), ticks(t), tPtr(p) {}
/**
* Real world time in milliseconds since Unix epoch or -1 if unknown.
*
* This is used for things like checking certificate expiration. If it's
* not known then the value may be inferred from peers/roots or some
* features may be disabled.
*/
const int64_t clock;
/**
* Monotonic process or system clock in milliseconds since an arbitrary point.
*
* This is never -1 or undefined and is used for most timings.
*/
const int64_t ticks;
/**
* An arbitrary pointer users pass into calls that follows the call chain
*
* By passing this back to callbacks state can be kept by the caller using
* a mechanism that is faster (on most platforms) than thread-local storage.
*/
void *const tPtr;
};
} // namespace ZeroTier
#endif

View file

@ -1,438 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "CapabilityCredential.hpp"
#include "Constants.hpp"
#include "MAC.hpp"
#include "Utils.hpp"
namespace ZeroTier {
CapabilityCredential::CapabilityCredential(const uint32_t id, const uint64_t nwid, const int64_t timestamp, const ZT_VirtualNetworkRule *const rules, const unsigned int ruleCount) noexcept : m_nwid(nwid), m_timestamp(timestamp), m_id(id), m_ruleCount((ruleCount < ZT_MAX_CAPABILITY_RULES) ? ruleCount : ZT_MAX_CAPABILITY_RULES), m_signatureLength(0)
{
Utils::zero<sizeof(m_rules)>(m_rules);
if (m_ruleCount > 0)
Utils::copy(m_rules, rules, sizeof(ZT_VirtualNetworkRule) * m_ruleCount);
Utils::zero<sizeof(m_signature)>(m_signature);
}
bool CapabilityCredential::sign(const Identity &from, const Address &to) noexcept
{
uint8_t buf[ZT_CAPABILITY_MARSHAL_SIZE_MAX + 16];
m_issuedTo = to;
m_signedBy = from.address();
m_signatureLength = from.sign(buf, (unsigned int)marshal(buf, true), m_signature, sizeof(m_signature));
return m_signatureLength > 0;
}
int CapabilityCredential::marshal(uint8_t data[ZT_CAPABILITY_MARSHAL_SIZE_MAX], const bool forSign) const noexcept
{
int p = 0;
if (forSign) {
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
Utils::storeBigEndian<uint64_t>(data + p, m_nwid);
Utils::storeBigEndian<uint64_t>(data + p + 8, (uint64_t)m_timestamp);
Utils::storeBigEndian<uint32_t>(data + p + 16, m_id);
p += 20;
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_ruleCount);
p += 2;
p += CapabilityCredential::marshalVirtualNetworkRules(data + p, m_rules, m_ruleCount);
// LEGACY: older versions supported multiple records with this being a maximum custody
// chain length. This is deprecated so set the max chain length to one.
data[p++] = (uint8_t)1;
if (!forSign) {
m_issuedTo.copyTo(data + p);
m_signedBy.copyTo(data + p + ZT_ADDRESS_LENGTH);
p += ZT_ADDRESS_LENGTH + ZT_ADDRESS_LENGTH;
data[p++] = 1; // LEGACY: old versions require a reserved byte here
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_signatureLength);
p += 2;
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
// LEGACY: older versions supported more than one record terminated by a zero address.
for (int k = 0; k < ZT_ADDRESS_LENGTH; ++k)
data[p++] = 0;
}
data[p++] = 0;
data[p++] = 0; // uint16_t size of additional fields, currently 0
if (forSign) {
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
return p;
}
int CapabilityCredential::unmarshal(const uint8_t *data, int len) noexcept
{
if (len < 22)
return -1;
m_nwid = Utils::loadBigEndian<uint64_t>(data);
m_timestamp = (int64_t)Utils::loadBigEndian<uint64_t>(data + 8);
m_id = Utils::loadBigEndian<uint32_t>(data + 16);
const unsigned int rc = Utils::loadBigEndian<uint16_t>(data + 20);
if (rc > ZT_MAX_CAPABILITY_RULES)
return -1;
const int rulesLen = unmarshalVirtualNetworkRules(data + 22, len - 22, m_rules, m_ruleCount, rc);
if (rulesLen < 0)
return rulesLen;
int p = 22 + rulesLen;
if (p >= len)
return -1;
++p; // LEGACY: skip old max record count
// LEGACY: since it was once supported to have multiple records, scan them all. Since
// this feature was never used, just set the signature and issued to and other related
// fields each time and we should only ever see one. If there's more than one and the
// last is not the controller, this credential will just fail validity check.
for (unsigned int i = 0;; ++i) {
if ((p + ZT_ADDRESS_LENGTH) > len)
return -1;
const Address to(data + p);
p += ZT_ADDRESS_LENGTH;
if (!to)
break;
m_issuedTo = to;
if ((p + ZT_ADDRESS_LENGTH) > len)
return -1;
m_signedBy.setTo(data + p);
p += ZT_ADDRESS_LENGTH + 1; // LEGACY: +1 to skip reserved field
if ((p + 2) > len)
return -1;
m_signatureLength = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
if ((m_signatureLength > sizeof(m_signature)) || ((p + (int)m_signatureLength) > len))
return -1;
Utils::copy(m_signature, data + p, m_signatureLength);
p += (int)m_signatureLength;
}
if ((p + 2) > len)
return -1;
p += 2 + Utils::loadBigEndian<uint16_t>(data + p);
if (p > len)
return -1;
return p;
}
int CapabilityCredential::marshalVirtualNetworkRules(uint8_t *data, const ZT_VirtualNetworkRule *const rules, const unsigned int ruleCount) noexcept
{
int p = 0;
for (unsigned int i = 0; i < ruleCount; ++i) {
data[p++] = rules[i].t;
switch ((ZT_VirtualNetworkRuleType)(rules[i].t & 0x3fU)) {
default: data[p++] = 0; break;
case ZT_NETWORK_RULE_ACTION_TEE:
case ZT_NETWORK_RULE_ACTION_WATCH:
case ZT_NETWORK_RULE_ACTION_REDIRECT:
data[p++] = 14;
Utils::storeBigEndian<uint64_t>(data + p, rules[i].v.fwd.address);
p += 8;
Utils::storeBigEndian<uint32_t>(data + p, rules[i].v.fwd.flags);
p += 4;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.fwd.length);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_SOURCE_ZEROTIER_ADDRESS:
case ZT_NETWORK_RULE_MATCH_DEST_ZEROTIER_ADDRESS:
data[p++] = 5;
Address(rules[i].v.zt).copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
break;
case ZT_NETWORK_RULE_MATCH_VLAN_ID:
data[p++] = 2;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.vlanId);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_VLAN_PCP:
data[p++] = 1;
data[p++] = rules[i].v.vlanPcp;
break;
case ZT_NETWORK_RULE_MATCH_VLAN_DEI:
data[p++] = 1;
data[p++] = rules[i].v.vlanDei;
break;
case ZT_NETWORK_RULE_MATCH_MAC_SOURCE:
case ZT_NETWORK_RULE_MATCH_MAC_DEST:
data[p++] = 6;
MAC(rules[i].v.mac).copyTo(data + p);
p += 6;
break;
case ZT_NETWORK_RULE_MATCH_IPV4_SOURCE:
case ZT_NETWORK_RULE_MATCH_IPV4_DEST:
data[p++] = 5;
data[p++] = reinterpret_cast<const uint8_t *>(&(rules[i].v.ipv4.ip))[0];
data[p++] = reinterpret_cast<const uint8_t *>(&(rules[i].v.ipv4.ip))[1];
data[p++] = reinterpret_cast<const uint8_t *>(&(rules[i].v.ipv4.ip))[2];
data[p++] = reinterpret_cast<const uint8_t *>(&(rules[i].v.ipv4.ip))[3];
data[p++] = rules[i].v.ipv4.mask;
break;
case ZT_NETWORK_RULE_MATCH_IPV6_SOURCE:
case ZT_NETWORK_RULE_MATCH_IPV6_DEST:
data[p++] = 17;
Utils::copy<16>(data + p, rules[i].v.ipv6.ip);
p += 16;
data[p++] = rules[i].v.ipv6.mask;
break;
case ZT_NETWORK_RULE_MATCH_IP_TOS:
data[p++] = 3;
data[p++] = rules[i].v.ipTos.mask;
data[p++] = rules[i].v.ipTos.value[0];
data[p++] = rules[i].v.ipTos.value[1];
break;
case ZT_NETWORK_RULE_MATCH_IP_PROTOCOL:
data[p++] = 1;
data[p++] = rules[i].v.ipProtocol;
break;
case ZT_NETWORK_RULE_MATCH_ETHERTYPE:
data[p++] = 2;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.etherType);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_ICMP:
data[p++] = 3;
data[p++] = rules[i].v.icmp.type;
data[p++] = rules[i].v.icmp.code;
data[p++] = rules[i].v.icmp.flags;
break;
case ZT_NETWORK_RULE_MATCH_IP_SOURCE_PORT_RANGE:
case ZT_NETWORK_RULE_MATCH_IP_DEST_PORT_RANGE:
data[p++] = 4;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.port[0]);
p += 2;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.port[1]);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_CHARACTERISTICS:
data[p++] = 8;
Utils::storeBigEndian<uint64_t>(data + p, rules[i].v.characteristics);
p += 8;
break;
case ZT_NETWORK_RULE_MATCH_FRAME_SIZE_RANGE:
data[p++] = 4;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.frameSize[0]);
p += 2;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.frameSize[1]);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_RANDOM:
data[p++] = 4;
Utils::storeBigEndian<uint32_t>(data + p, rules[i].v.randomProbability);
p += 4;
break;
case ZT_NETWORK_RULE_MATCH_TAGS_DIFFERENCE:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_AND:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_OR:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_XOR:
case ZT_NETWORK_RULE_MATCH_TAGS_EQUAL:
case ZT_NETWORK_RULE_MATCH_TAG_SENDER:
case ZT_NETWORK_RULE_MATCH_TAG_RECEIVER:
data[p++] = 8;
Utils::storeBigEndian<uint32_t>(data + p, rules[i].v.tag.id);
p += 4;
Utils::storeBigEndian<uint32_t>(data + p, rules[i].v.tag.value);
p += 4;
break;
case ZT_NETWORK_RULE_MATCH_INTEGER_RANGE:
data[p++] = 19;
Utils::storeBigEndian<uint64_t>(data + p, rules[i].v.intRange.start);
p += 8;
Utils::storeBigEndian<uint64_t>(data + p, rules[i].v.intRange.start + (uint64_t)rules[i].v.intRange.end);
p += 8;
Utils::storeBigEndian<uint16_t>(data + p, rules[i].v.intRange.idx);
p += 2;
data[p++] = rules[i].v.intRange.format;
break;
}
}
return p;
}
int CapabilityCredential::unmarshalVirtualNetworkRules(const uint8_t *const data, const int len, ZT_VirtualNetworkRule *const rules, unsigned int &ruleCount, const unsigned int maxRuleCount) noexcept
{
int p = 0;
unsigned int rc = 0;
while (rc < maxRuleCount) {
if (p >= len)
return -1;
rules[ruleCount].t = data[p++];
const int fieldLen = (int)data[p++];
if ((p + fieldLen) > len)
return -1;
switch ((ZT_VirtualNetworkRuleType)(rules[ruleCount].t & 0x3fU)) {
default: break;
case ZT_NETWORK_RULE_ACTION_TEE:
case ZT_NETWORK_RULE_ACTION_WATCH:
case ZT_NETWORK_RULE_ACTION_REDIRECT:
if ((p + 14) > len)
return -1;
rules[ruleCount].v.fwd.address = Utils::loadBigEndian<uint64_t>(data + p);
p += 8;
rules[ruleCount].v.fwd.flags = Utils::loadBigEndian<uint32_t>(data + p);
p += 4;
rules[ruleCount].v.fwd.length = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_SOURCE_ZEROTIER_ADDRESS:
case ZT_NETWORK_RULE_MATCH_DEST_ZEROTIER_ADDRESS:
if ((p + ZT_ADDRESS_LENGTH) > len)
return -1;
rules[ruleCount].v.zt = Address(data + p).toInt();
p += ZT_ADDRESS_LENGTH;
break;
case ZT_NETWORK_RULE_MATCH_VLAN_ID:
if ((p + 2) > len)
return -1;
rules[ruleCount].v.vlanId = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_VLAN_PCP:
if ((p + 1) > len)
return -1;
rules[ruleCount].v.vlanPcp = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_VLAN_DEI:
if ((p + 1) > len)
return -1;
rules[ruleCount].v.vlanDei = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_MAC_SOURCE:
case ZT_NETWORK_RULE_MATCH_MAC_DEST:
if ((p + 6) > len)
return -1;
Utils::copy<6>(rules[ruleCount].v.mac, data + p);
p += 6;
break;
case ZT_NETWORK_RULE_MATCH_IPV4_SOURCE:
case ZT_NETWORK_RULE_MATCH_IPV4_DEST:
if ((p + 5) > len)
return -1;
Utils::copy<4>(&(rules[ruleCount].v.ipv4.ip), data + p);
p += 4;
rules[ruleCount].v.ipv4.mask = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_IPV6_SOURCE:
case ZT_NETWORK_RULE_MATCH_IPV6_DEST:
if ((p + 17) > len)
return -1;
Utils::copy<16>(rules[ruleCount].v.ipv6.ip, data + p);
p += 16;
rules[ruleCount].v.ipv6.mask = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_IP_TOS:
if ((p + 3) > len)
return -1;
rules[ruleCount].v.ipTos.mask = data[p++];
rules[ruleCount].v.ipTos.value[0] = data[p++];
rules[ruleCount].v.ipTos.value[1] = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_IP_PROTOCOL:
if ((p + 1) > len)
return -1;
rules[ruleCount].v.ipProtocol = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_ETHERTYPE:
if ((p + 2) > len)
return -1;
rules[ruleCount].v.etherType = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_ICMP:
if ((p + 3) > len)
return -1;
rules[ruleCount].v.icmp.type = data[p++];
rules[ruleCount].v.icmp.code = data[p++];
rules[ruleCount].v.icmp.flags = data[p++];
break;
case ZT_NETWORK_RULE_MATCH_IP_SOURCE_PORT_RANGE:
case ZT_NETWORK_RULE_MATCH_IP_DEST_PORT_RANGE:
if ((p + 4) > len)
return -1;
rules[ruleCount].v.port[0] = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
rules[ruleCount].v.port[1] = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_CHARACTERISTICS:
if ((p + 8) > len)
return -1;
rules[ruleCount].v.characteristics = Utils::loadBigEndian<uint64_t>(data + p);
p += 8;
break;
case ZT_NETWORK_RULE_MATCH_FRAME_SIZE_RANGE:
if ((p + 4) > len)
return -1;
rules[ruleCount].v.frameSize[0] = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
rules[ruleCount].v.frameSize[1] = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
break;
case ZT_NETWORK_RULE_MATCH_RANDOM:
if ((p + 4) > len)
return -1;
rules[ruleCount].v.randomProbability = Utils::loadBigEndian<uint32_t>(data + p);
p += 4;
break;
case ZT_NETWORK_RULE_MATCH_TAGS_DIFFERENCE:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_AND:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_OR:
case ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_XOR:
case ZT_NETWORK_RULE_MATCH_TAGS_EQUAL:
case ZT_NETWORK_RULE_MATCH_TAG_SENDER:
case ZT_NETWORK_RULE_MATCH_TAG_RECEIVER:
if ((p + 4) > len)
return -1;
rules[ruleCount].v.tag.id = Utils::loadBigEndian<uint32_t>(data + p);
p += 4;
rules[ruleCount].v.tag.value = Utils::loadBigEndian<uint32_t>(data + p);
p += 4;
break;
case ZT_NETWORK_RULE_MATCH_INTEGER_RANGE:
if ((p + 19) > len)
return -1;
rules[ruleCount].v.intRange.start = Utils::loadBigEndian<uint64_t>(data + p);
p += 8;
rules[ruleCount].v.intRange.end = (uint32_t)(Utils::loadBigEndian<uint64_t>(data + p) - rules[ruleCount].v.intRange.start);
p += 8;
rules[ruleCount].v.intRange.idx = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
rules[ruleCount].v.intRange.format = data[p++];
break;
}
p += fieldLen;
++rc;
}
ruleCount = rc;
return p;
}
} // namespace ZeroTier

View file

@ -1,163 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_CAPABILITY_HPP
#define ZT_CAPABILITY_HPP
#include "Address.hpp"
#include "C25519.hpp"
#include "Constants.hpp"
#include "Credential.hpp"
#include "Identity.hpp"
#include "Utils.hpp"
#define ZT_VIRTUALNETWORKRULE_MARSHAL_SIZE_MAX 21
#define ZT_CAPABILITY_MARSHAL_SIZE_MAX (8 + 8 + 4 + 1 + 2 + (ZT_VIRTUALNETWORKRULE_MARSHAL_SIZE_MAX * ZT_MAX_CAPABILITY_RULES) + 2 + (5 + 5 + 1 + 2 + ZT_SIGNATURE_BUFFER_SIZE))
namespace ZeroTier {
class Context;
/**
* A set of grouped and signed network flow rules for a specific member.
*
* On the sending side the sender does the following for each packet:
*
* (1) Evaluates its capabilities in ascending order of ID to determine
* which capability allows it to transmit this packet.
* (2) If it has not done so lately, it then sends this capability to the
* receiving peer ("presents" it).
* (3) The sender then sends the packet.
*
* On the receiving side the receiver evaluates the capabilities presented
* by the sender. If any valid un-expired capability allows this packet it
* is accepted.
*
* Note that this is after evaluation of network scope rules and only if
* network scope rules do not deliver an explicit match.
*/
class CapabilityCredential : public Credential {
friend class Credential;
public:
static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_CAPABILITY; }
ZT_INLINE CapabilityCredential() noexcept { memoryZero(this); }
/**
* @param id Capability ID
* @param nwid Network ID
* @param timestamp Timestamp (at controller)
* @param mccl Maximum custody chain length (1 to create non-transferable capability)
* @param rules Network flow rules for this capability
* @param ruleCount Number of flow rules
*/
CapabilityCredential(const uint32_t id, const uint64_t nwid, const int64_t timestamp, const ZT_VirtualNetworkRule *const rules, const unsigned int ruleCount) noexcept;
/**
* @return Rules -- see ruleCount() for size of array
*/
ZT_INLINE const ZT_VirtualNetworkRule *rules() const noexcept { return m_rules; }
/**
* @return Number of rules in rules()
*/
ZT_INLINE unsigned int ruleCount() const noexcept { return m_ruleCount; }
ZT_INLINE uint32_t id() const noexcept { return m_id; }
ZT_INLINE uint64_t networkId() const noexcept { return m_nwid; }
ZT_INLINE int64_t timestamp() const noexcept { return m_timestamp; }
ZT_INLINE int64_t revision() const noexcept { return m_timestamp; }
ZT_INLINE const Address &issuedTo() const noexcept { return m_issuedTo; }
ZT_INLINE const Address &signer() const noexcept { return m_signedBy; }
ZT_INLINE const uint8_t *signature() const noexcept { return m_signature; }
ZT_INLINE unsigned int signatureLength() const noexcept { return m_signatureLength; }
/**
* Sign this capability and add signature to its chain of custody
*
* If this returns false, this object should be considered to be
* in an undefined state and should be discarded. False can be returned
* if there is no more room for signatures (max chain length reached)
* or if the 'from' identity does not include a secret key to allow
* it to sign anything.
*
* @param from Signing identity (must have secret)
* @param to Recipient of this signature
* @return True if signature successful and chain of custody appended
*/
bool sign(const Identity &from, const Address &to) noexcept;
/**
* Verify this capability's chain of custody and signatures
*
* @param RR Runtime environment to provide for peer lookup, etc.
*/
ZT_INLINE Credential::VerifyResult verify(const Context &ctx, const CallContext &cc) const noexcept { return s_verify(ctx, cc, *this); }
static constexpr int marshalSizeMax() noexcept { return ZT_CAPABILITY_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_CAPABILITY_MARSHAL_SIZE_MAX], bool forSign = false) const noexcept;
int unmarshal(const uint8_t *data, int len) noexcept;
/**
* Marshal a set of virtual network rules
*
* @param data Buffer to store rules (must be at least ruleCount * ZT_VIRTUALNETWORKRULE_MARSHAL_SIZE_MAX)
* @param rules Network rules
* @param ruleCount Number of rules
* @return Number of bytes written or -1 on error
*/
static int marshalVirtualNetworkRules(uint8_t *data, const ZT_VirtualNetworkRule *rules, unsigned int ruleCount) noexcept;
/**
* Unmarshal a set of virtual network rules
*
* @param data Rule set to unmarshal
* @param len Length of data
* @param rules Buffer to store rules
* @param ruleCount Result parameter to set to the number of rules decoded
* @param maxRuleCount Capacity of rules buffer
* @return Number of bytes unmarshaled or -1 on error
*/
static int unmarshalVirtualNetworkRules(const uint8_t *data, int len, ZT_VirtualNetworkRule *rules, unsigned int &ruleCount, unsigned int maxRuleCount) noexcept;
// Provides natural sort order by ID
ZT_INLINE bool operator<(const CapabilityCredential &c) const noexcept { return (m_id < c.m_id); }
ZT_INLINE bool operator==(const CapabilityCredential &c) const noexcept { return (memcmp(this, &c, sizeof(CapabilityCredential)) == 0); }
ZT_INLINE bool operator!=(const CapabilityCredential &c) const noexcept { return (memcmp(this, &c, sizeof(CapabilityCredential)) != 0); }
private:
uint64_t m_nwid;
int64_t m_timestamp;
uint32_t m_id;
unsigned int m_ruleCount;
ZT_VirtualNetworkRule m_rules[ZT_MAX_CAPABILITY_RULES];
Address m_issuedTo;
Address m_signedBy;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
};
} // namespace ZeroTier
#endif

View file

@ -1,717 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "Certificate.hpp"
#include "ECC384.hpp"
#include "SHA512.hpp"
#include "ScopedPtr.hpp"
namespace ZeroTier {
Certificate::Certificate() noexcept
{
ZT_Certificate *const sup = this;
Utils::zero<sizeof(ZT_Certificate)>(sup);
}
Certificate::Certificate(const ZT_Certificate &apiCert) : Certificate() { *this = apiCert; }
Certificate::Certificate(const Certificate &cert) : Certificate() { *this = cert; }
Certificate::~Certificate() {}
Certificate &Certificate::operator=(const ZT_Certificate &cert)
{
m_clear();
Utils::copy<sizeof(this->serialNo)>(this->serialNo, cert.serialNo);
this->usageFlags = cert.usageFlags;
this->timestamp = cert.timestamp;
this->validity[0] = cert.validity[0];
this->validity[1] = cert.validity[1];
this->subject.timestamp = cert.subject.timestamp;
if (cert.subject.identities != nullptr) {
for (unsigned int i = 0; i < cert.subject.identityCount; ++i) {
if (cert.subject.identities[i].identity) {
if (cert.subject.identities[i].locator) {
addSubjectIdentity(*reinterpret_cast<const Identity *>(cert.subject.identities[i].identity), *reinterpret_cast<const Locator *>(cert.subject.identities[i].locator));
}
else {
addSubjectIdentity(*reinterpret_cast<const Identity *>(cert.subject.identities[i].identity));
}
}
}
}
if (cert.subject.networks != nullptr) {
for (unsigned int i = 0; i < cert.subject.networkCount; ++i) {
if (cert.subject.networks[i].id) {
addSubjectNetwork(cert.subject.networks[i].id, cert.subject.networks[i].controller);
}
}
}
if (cert.subject.updateURLs != nullptr) {
for (unsigned int i = 0; i < cert.subject.updateURLCount; ++i) {
if (cert.subject.updateURLs[i]) {
addSubjectUpdateUrl(cert.subject.updateURLs[i]);
}
}
}
this->subject.identityCount = cert.subject.identityCount;
this->subject.networkCount = cert.subject.networkCount;
this->subject.updateURLCount = cert.subject.updateURLCount;
Utils::copy<sizeof(ZT_Certificate_Name)>(&(this->subject.name), &(cert.subject.name));
Utils::copy<sizeof(this->subject.uniqueId)>(this->subject.uniqueId, cert.subject.uniqueId);
Utils::copy<sizeof(this->subject.uniqueIdSignature)>(this->subject.uniqueIdSignature, cert.subject.uniqueIdSignature);
this->subject.uniqueIdSize = cert.subject.uniqueIdSize;
this->subject.uniqueIdSignatureSize = cert.subject.uniqueIdSignatureSize;
Utils::copy<sizeof(this->issuer)>(this->issuer, cert.issuer);
Utils::copy<sizeof(this->issuerPublicKey)>(this->issuerPublicKey, cert.issuerPublicKey);
Utils::copy<sizeof(this->publicKey)>(this->publicKey, cert.publicKey);
this->issuerPublicKeySize = cert.issuerPublicKeySize;
this->publicKeySize = cert.publicKeySize;
if ((cert.extendedAttributes != nullptr) && (cert.extendedAttributesSize > 0)) {
m_extendedAttributes.assign(cert.extendedAttributes, cert.extendedAttributes + cert.extendedAttributesSize);
this->extendedAttributes = m_extendedAttributes.data();
this->extendedAttributesSize = (unsigned int)m_extendedAttributes.size();
}
Utils::copy<sizeof(this->signature)>(this->signature, cert.signature);
this->signatureSize = cert.signatureSize;
this->maxPathLength = cert.maxPathLength;
return *this;
}
ZT_Certificate_Identity *Certificate::addSubjectIdentity(const Identity &id)
{
// Store a local copy of the actual identity.
m_identities.push_front(id);
m_identities.front().erasePrivateKey();
// Enlarge array of ZT_Certificate_Identity structs and set pointer to potentially reallocated array.
m_subjectIdentities.push_back(ZT_Certificate_Identity());
m_subjectIdentities.back().identity = &(m_identities.front());
m_subjectIdentities.back().locator = nullptr;
this->subject.identities = m_subjectIdentities.data();
this->subject.identityCount = (unsigned int)m_subjectIdentities.size();
return &(m_subjectIdentities.back());
}
ZT_Certificate_Identity *Certificate::addSubjectIdentity(const Identity &id, const Locator &loc)
{
// Add identity as above.
ZT_Certificate_Identity *const n = addSubjectIdentity(id);
// Store local copy of locator.
m_locators.push_front(loc);
// Set pointer to stored local copy of locator.
n->locator = &(m_locators.front());
return n;
}
ZT_Certificate_Network *Certificate::addSubjectNetwork(const uint64_t id, const ZT_Fingerprint &controller)
{
// Enlarge array of ZT_Certificate_Network and set pointer to potentially reallocated array.
m_subjectNetworks.resize(++this->subject.networkCount);
this->subject.networks = m_subjectNetworks.data();
// Set fields in new ZT_Certificate_Network structure.
m_subjectNetworks.back().id = id;
Utils::copy<sizeof(ZT_Fingerprint)>(&(m_subjectNetworks.back().controller), &controller);
return &(m_subjectNetworks.back());
}
void Certificate::addSubjectUpdateUrl(const char *url)
{
if ((url != nullptr) && (url[0] != 0)) {
// Store local copy of URL.
m_strings.push_front(url);
// Add pointer to local copy to pointer array and update C structure to point to
// potentially reallocated array.
m_updateUrls.push_back(m_strings.front().c_str());
this->subject.updateURLs = m_updateUrls.data();
this->subject.updateURLCount = (unsigned int)m_updateUrls.size();
}
}
Vector<uint8_t> Certificate::encode(const bool omitSignature) const
{
Vector<uint8_t> enc;
Dictionary d;
/*
* A Dictionary is used to encode certificates as it's a common and extensible
* format. Custom packed formats are used for credentials as these are smaller
* and faster to marshal/unmarshal.
*
* We use the slower actually-insert-keys method of building a dictionary
* instead of the faster append method because for signing and verification
* purposes the keys must be always be in order.
*/
if (this->usageFlags != 0)
d.add("f", this->usageFlags);
if (this->timestamp > 0)
d.add("t", (uint64_t)this->timestamp);
if (this->validity[0] > 0)
d.add("v#0", (uint64_t)this->validity[0]);
if (this->validity[1] > 0)
d.add("v#1", (uint64_t)this->validity[1]);
m_encodeSubject(this->subject, d, false);
if (!Utils::allZero(this->issuer, sizeof(this->issuer)))
d.add("i", this->issuer, sizeof(this->issuer));
if (this->issuerPublicKeySize > 0)
d.add("iPK", this->issuerPublicKey, this->issuerPublicKeySize);
if (this->publicKeySize > 0)
d.add("pK", this->publicKey, this->publicKeySize);
if (this->subjectSignatureSize > 0)
d.add("sS", this->subjectSignature, this->subjectSignatureSize);
if ((this->extendedAttributes != nullptr) && (this->extendedAttributesSize > 0))
d["x"].assign(this->extendedAttributes, this->extendedAttributes + this->extendedAttributesSize);
if ((!omitSignature) && (this->signatureSize > 0))
d["si"].assign(this->signature, this->signature + this->signatureSize);
if (this->maxPathLength > 0)
d.add("l", (uint64_t)this->maxPathLength);
d.encode(enc);
return enc;
}
bool Certificate::decode(const void *const data, const unsigned int len)
{
char tmp[32], tmp2[ZT_CERTIFICATE_MAX_STRING_LENGTH + 1];
Dictionary d;
if (!d.decode(data, len))
return false;
m_clear();
this->usageFlags = d.getUI("f");
this->timestamp = (int64_t)d.getUI("t");
this->validity[0] = (int64_t)d.getUI("v#0");
this->validity[1] = (int64_t)d.getUI("v#1");
this->subject.timestamp = (int64_t)d.getUI("s.t");
unsigned int cnt = (unsigned int)d.getUI("s.i$");
for (unsigned int i = 0; i < cnt; ++i) {
const Vector<uint8_t> &identityData = d[Dictionary::arraySubscript(tmp, sizeof(tmp), "s.i$.i", i)];
const Vector<uint8_t> &locatorData = d[Dictionary::arraySubscript(tmp, sizeof(tmp), "s.i$.l", i)];
if (identityData.empty())
return false;
Identity id;
if (id.unmarshal(identityData.data(), (unsigned int)identityData.size()) <= 0)
return false;
if (locatorData.empty()) {
this->addSubjectIdentity(id);
}
else {
Locator loc;
if (loc.unmarshal(locatorData.data(), (unsigned int)locatorData.size()) <= 0)
return false;
this->addSubjectIdentity(id, loc);
}
}
cnt = (unsigned int)d.getUI("s.nw$");
for (unsigned int i = 0; i < cnt; ++i) {
const uint64_t nwid = d.getUI(Dictionary::arraySubscript(tmp, sizeof(tmp), "s.nw$.i", i));
const Vector<uint8_t> &fingerprintData = d[Dictionary::arraySubscript(tmp, sizeof(tmp), "s.nw$.c", i)];
if ((nwid == 0) || (fingerprintData.empty()))
return false;
Fingerprint fp;
if (fp.unmarshal(fingerprintData.data(), (unsigned int)fingerprintData.size()) <= 0)
return false;
this->addSubjectNetwork(nwid, fp);
}
cnt = (unsigned int)d.getUI("s.u$");
for (unsigned int i = 0; i < cnt; ++i)
addSubjectUpdateUrl(d.getS(Dictionary::arraySubscript(tmp, sizeof(tmp), "s.u$", i), tmp2, sizeof(tmp2)));
d.getS("s.n.sN", this->subject.name.serialNo, sizeof(this->subject.name.serialNo));
d.getS("s.n.cN", this->subject.name.commonName, sizeof(this->subject.name.commonName));
d.getS("s.n.c", this->subject.name.country, sizeof(this->subject.name.country));
d.getS("s.n.o", this->subject.name.organization, sizeof(this->subject.name.organization));
d.getS("s.n.u", this->subject.name.unit, sizeof(this->subject.name.unit));
d.getS("s.n.l", this->subject.name.locality, sizeof(this->subject.name.locality));
d.getS("s.n.p", this->subject.name.province, sizeof(this->subject.name.province));
d.getS("s.n.sA", this->subject.name.streetAddress, sizeof(this->subject.name.streetAddress));
d.getS("s.n.pC", this->subject.name.postalCode, sizeof(this->subject.name.postalCode));
d.getS("s.n.e", this->subject.name.email, sizeof(this->subject.name.email));
d.getS("s.n.ur", this->subject.name.url, sizeof(this->subject.name.url));
d.getS("s.n.h", this->subject.name.host, sizeof(this->subject.name.host));
const Vector<uint8_t> &uniqueId = d["s.uI"];
if ((!uniqueId.empty()) && (uniqueId.size() <= sizeof(this->subject.uniqueId))) {
Utils::copy(this->subject.uniqueId, uniqueId.data(), uniqueId.size());
this->subject.uniqueIdSize = (unsigned int)uniqueId.size();
}
const Vector<uint8_t> &uniqueIdSignature = d["s.uS"];
if ((!uniqueIdSignature.empty()) && (uniqueIdSignature.size() <= sizeof(this->subject.uniqueIdSignature))) {
Utils::copy(this->subject.uniqueIdSignature, uniqueIdSignature.data(), uniqueIdSignature.size());
this->subject.uniqueIdSignatureSize = (unsigned int)uniqueIdSignature.size();
}
const Vector<uint8_t> &issuerData = d["i"];
if (issuerData.size() == sizeof(this->issuer)) {
Utils::copy<sizeof(this->issuer)>(this->issuer, issuerData.data());
}
const Vector<uint8_t> &issuerPublicKey = d["iPK"];
if ((!issuerPublicKey.empty()) && (issuerPublicKey.size() <= sizeof(this->issuerPublicKey))) {
Utils::copy(this->issuerPublicKey, issuerPublicKey.data(), issuerPublicKey.size());
this->issuerPublicKeySize = (unsigned int)issuerPublicKey.size();
}
const Vector<uint8_t> &publicKey = d["pK"];
if ((!publicKey.empty()) && (publicKey.size() <= sizeof(this->publicKey))) {
Utils::copy(this->publicKey, publicKey.data(), publicKey.size());
this->publicKeySize = (unsigned int)publicKey.size();
}
const Vector<uint8_t> &subjectSignature = d["sS"];
if ((!subjectSignature.empty()) && (subjectSignature.size() <= sizeof(this->subjectSignature))) {
Utils::copy(this->subjectSignature, subjectSignature.data(), subjectSignature.size());
this->subjectSignatureSize = (unsigned int)subjectSignature.size();
}
m_extendedAttributes = d["x"];
if (!m_extendedAttributes.empty()) {
this->extendedAttributes = m_extendedAttributes.data();
this->extendedAttributesSize = (unsigned int)m_extendedAttributes.size();
}
const Vector<uint8_t> &signature = d["si"];
if ((!signature.empty()) && (signature.size() <= sizeof(this->signature))) {
Utils::copy(this->signature, signature.data(), signature.size());
this->signatureSize = (unsigned int)signature.size();
}
this->maxPathLength = (unsigned int)d.getUI("l");
const Vector<uint8_t> enc(encode(true));
SHA384(this->serialNo, enc.data(), (unsigned int)enc.size());
return true;
}
bool Certificate::sign(const uint8_t issuer[ZT_CERTIFICATE_HASH_SIZE], const void *const issuerPrivateKey, const unsigned int issuerPrivateKeySize)
{
if ((!issuerPrivateKey) || (issuerPrivateKeySize == 0))
return false;
switch (reinterpret_cast<const uint8_t *>(issuerPrivateKey)[0]) {
default: return false;
case ZT_CERTIFICATE_PUBLIC_KEY_ALGORITHM_ECDSA_NIST_P_384:
if (issuerPrivateKeySize == (1 + ZT_ECC384_PUBLIC_KEY_SIZE + ZT_ECC384_PRIVATE_KEY_SIZE)) {
if ((!issuer) || ((this->publicKeySize == (ZT_ECC384_PUBLIC_KEY_SIZE + 1)) && (memcmp(issuerPrivateKey, this->publicKey, ZT_ECC384_PUBLIC_KEY_SIZE + 1) == 0))) {
// If public key and issuer public key match, this is a self-signed certificate.
// This can also be specified by signing with issuer set to NULL.
Utils::fill<sizeof(this->issuer), 0xff>(this->issuer);
this->issuerPublicKeySize = 0;
}
else {
// Otherwise set the issuer and issuer public key.
Utils::copy<sizeof(this->issuer)>(this->issuer, issuer);
Utils::copy<1 + ZT_ECC384_PUBLIC_KEY_SIZE>(this->issuerPublicKey,
issuerPrivateKey); // private is prefixed with public
this->issuerPublicKeySize = 1 + ZT_ECC384_PUBLIC_KEY_SIZE;
}
const Vector<uint8_t> enc(encode(true));
SHA384(this->serialNo, enc.data(), (unsigned int)enc.size());
ECC384ECDSASign(reinterpret_cast<const uint8_t *>(issuerPrivateKey) + 1 + ZT_ECC384_PUBLIC_KEY_SIZE, this->serialNo, this->signature);
this->signatureSize = ZT_ECC384_SIGNATURE_SIZE;
return true;
}
break;
}
return false;
}
ZT_CertificateError Certificate::verify(const int64_t clock, const bool checkSignatures) const
{
try {
if (this->validity[0] > this->validity[1]) {
return ZT_CERTIFICATE_ERROR_INVALID_FORMAT;
}
if (this->subject.identityCount > 0) {
if (this->subject.identities) {
for (unsigned int i = 0; i < this->subject.identityCount; ++i) {
if (!this->subject.identities[i].identity) {
return ZT_CERTIFICATE_ERROR_INVALID_FORMAT;
}
if (checkSignatures) {
if (!reinterpret_cast<const Identity *>(this->subject.identities[i].identity)->locallyValidate()) {
return ZT_CERTIFICATE_ERROR_INVALID_IDENTITY;
}
if ((this->subject.identities[i].locator) && (!reinterpret_cast<const Locator *>(this->subject.identities[i].locator)->verify(*reinterpret_cast<const Identity *>(this->subject.identities[i].identity)))) {
return ZT_CERTIFICATE_ERROR_INVALID_COMPONENT_SIGNATURE;
}
}
}
}
else {
return ZT_CERTIFICATE_ERROR_INVALID_FORMAT;
}
}
if (this->subject.networkCount > 0) {
if (this->subject.networks) {
for (unsigned int i = 0; i < this->subject.networkCount; ++i) {
if (!this->subject.networks[i].id) {
return ZT_CERTIFICATE_ERROR_MISSING_REQUIRED_FIELDS;
}
}
}
else {
return ZT_CERTIFICATE_ERROR_INVALID_FORMAT;
}
}
if (this->subject.updateURLCount > 0) {
if (this->subject.updateURLs) {
for (unsigned int i = 0; i < this->subject.updateURLCount; ++i) {
if (!this->subject.updateURLs[i])
return ZT_CERTIFICATE_ERROR_MISSING_REQUIRED_FIELDS;
}
}
else {
return ZT_CERTIFICATE_ERROR_MISSING_REQUIRED_FIELDS;
}
}
if ((this->subject.uniqueIdSize > sizeof(this->subject.uniqueId)) || (this->subject.uniqueIdSignatureSize > sizeof(this->subject.uniqueIdSignature)) || (this->issuerPublicKeySize > sizeof(this->issuerPublicKey)) || (this->publicKeySize > sizeof(this->publicKey)) || (this->subjectSignatureSize > sizeof(this->subjectSignature))) {
return ZT_CERTIFICATE_ERROR_INVALID_FORMAT;
}
if ((this->extendedAttributesSize > 0) && (!this->extendedAttributes)) {
return ZT_CERTIFICATE_ERROR_INVALID_FORMAT;
}
if (this->signatureSize > sizeof(this->signature)) {
return ZT_CERTIFICATE_ERROR_INVALID_FORMAT;
}
if (checkSignatures) {
Dictionary d;
Vector<uint8_t> enc;
// If the issuer is all 1's (0xffffff...) then this cert is self-signed.
bool selfSigned = true;
for (unsigned int i = 0; i < ZT_CERTIFICATE_HASH_SIZE; ++i) {
if (this->issuer[i] != 0xff) {
selfSigned = false;
break;
}
}
if (!selfSigned) {
// Regular certs have an issuer signature and a self-signature
// of their subject to ensure CSR integrity.
if (this->issuerPublicKeySize > 0) {
switch (this->issuerPublicKey[0]) {
case ZT_CERTIFICATE_PUBLIC_KEY_ALGORITHM_ECDSA_NIST_P_384:
if ((this->issuerPublicKeySize == (ZT_ECC384_PUBLIC_KEY_SIZE + 1)) && (this->signatureSize == ZT_ECC384_SIGNATURE_SIZE)) {
if (!ECC384ECDSAVerify(this->issuerPublicKey + 1, this->serialNo, this->signature)) {
return ZT_CERTIFICATE_ERROR_INVALID_PRIMARY_SIGNATURE;
}
}
else {
return ZT_CERTIFICATE_ERROR_INVALID_PRIMARY_SIGNATURE;
}
break;
default: return ZT_CERTIFICATE_ERROR_INVALID_PRIMARY_SIGNATURE;
}
}
else {
return ZT_CERTIFICATE_ERROR_INVALID_PRIMARY_SIGNATURE;
}
if (this->publicKeySize > 0) {
d.clear();
m_encodeSubject(this->subject, d, false);
d.encode(enc);
switch (this->publicKey[0]) {
case ZT_CERTIFICATE_PUBLIC_KEY_ALGORITHM_ECDSA_NIST_P_384:
if ((this->publicKeySize == (ZT_ECC384_PUBLIC_KEY_SIZE + 1)) && (this->subjectSignatureSize == ZT_ECC384_SIGNATURE_SIZE)) {
uint8_t h[ZT_SHA384_DIGEST_SIZE];
SHA384(h, enc.data(), (unsigned int)enc.size());
if (!ECC384ECDSAVerify(this->publicKey + 1, h, this->subjectSignature)) {
return ZT_CERTIFICATE_ERROR_INVALID_COMPONENT_SIGNATURE;
}
}
else {
return ZT_CERTIFICATE_ERROR_INVALID_COMPONENT_SIGNATURE;
}
break;
default: return ZT_CERTIFICATE_ERROR_INVALID_COMPONENT_SIGNATURE;
}
}
else {
return ZT_CERTIFICATE_ERROR_INVALID_COMPONENT_SIGNATURE;
}
}
else {
// Self-signed certs are just signed by their own public keys.
// The issuer public key and subject self-signature are ignored
// and can be empty.
if (this->publicKeySize > 0) {
switch (this->publicKey[0]) {
case ZT_CERTIFICATE_PUBLIC_KEY_ALGORITHM_ECDSA_NIST_P_384:
if ((this->publicKeySize == (ZT_ECC384_PUBLIC_KEY_SIZE + 1)) && (this->signatureSize == ZT_ECC384_SIGNATURE_SIZE)) {
if (!ECC384ECDSAVerify(this->publicKey + 1, this->serialNo, this->signature)) {
return ZT_CERTIFICATE_ERROR_INVALID_PRIMARY_SIGNATURE;
}
}
else {
return ZT_CERTIFICATE_ERROR_INVALID_PRIMARY_SIGNATURE;
}
break;
default: return ZT_CERTIFICATE_ERROR_INVALID_PRIMARY_SIGNATURE;
}
}
else {
return ZT_CERTIFICATE_ERROR_INVALID_PRIMARY_SIGNATURE;
}
}
// Subject unique ID signatures are optional, so this only fails if it
// is present and invalid. A unique ID with type ALGORITHM_NONE is also
// allowed, but this means its signature is not checked.
if (this->subject.uniqueIdSize > 0) {
if (this->subject.uniqueIdSize <= (unsigned int)sizeof(this->subject.uniqueId)) {
switch (this->subject.uniqueId[0]) {
case ZT_CERTIFICATE_PUBLIC_KEY_ALGORITHM_NONE: break;
case ZT_CERTIFICATE_PUBLIC_KEY_ALGORITHM_ECDSA_NIST_P_384:
if ((this->subject.uniqueIdSize == (ZT_ECC384_PUBLIC_KEY_SIZE + 1)) && (this->subject.uniqueIdSignatureSize == ZT_ECC384_SIGNATURE_SIZE)) {
d.clear();
m_encodeSubject(this->subject, d, true);
d.encode(enc);
static_assert(ZT_ECC384_SIGNATURE_HASH_SIZE == ZT_SHA384_DIGEST_SIZE, "ECC384 should take 384-bit hash");
uint8_t h[ZT_SHA384_DIGEST_SIZE];
SHA384(h, enc.data(), (unsigned int)enc.size());
if (!ECC384ECDSAVerify(this->subject.uniqueId + 1, h, this->subject.uniqueIdSignature)) {
return ZT_CERTIFICATE_ERROR_INVALID_UNIQUE_ID_PROOF;
}
}
else {
return ZT_CERTIFICATE_ERROR_INVALID_UNIQUE_ID_PROOF;
}
break;
default: return ZT_CERTIFICATE_ERROR_INVALID_UNIQUE_ID_PROOF;
}
}
else {
return ZT_CERTIFICATE_ERROR_INVALID_UNIQUE_ID_PROOF;
}
}
}
if (clock >= 0) {
if (!this->verifyTimeWindow(clock))
return ZT_CERTIFICATE_ERROR_OUT_OF_VALID_TIME_WINDOW;
}
}
catch (...) {
return ZT_CERTIFICATE_ERROR_INVALID_FORMAT;
}
return ZT_CERTIFICATE_ERROR_NONE;
}
bool Certificate::newKeyPair(const ZT_CertificatePublicKeyAlgorithm type, uint8_t publicKey[ZT_CERTIFICATE_MAX_PUBLIC_KEY_SIZE], int *const publicKeySize, uint8_t privateKey[ZT_CERTIFICATE_MAX_PRIVATE_KEY_SIZE], int *const privateKeySize)
{
switch (type) {
case ZT_CERTIFICATE_PUBLIC_KEY_ALGORITHM_ECDSA_NIST_P_384:
publicKey[0] = (uint8_t)ZT_CERTIFICATE_PUBLIC_KEY_ALGORITHM_ECDSA_NIST_P_384;
ZeroTier::ECC384GenerateKey(publicKey + 1, privateKey + ZT_ECC384_PUBLIC_KEY_SIZE + 1);
ZeroTier::Utils::copy<ZT_ECC384_PUBLIC_KEY_SIZE + 1>(privateKey, publicKey);
*publicKeySize = ZT_ECC384_PUBLIC_KEY_SIZE + 1;
*privateKeySize = 1 + ZT_ECC384_PUBLIC_KEY_SIZE + ZT_ECC384_PRIVATE_KEY_SIZE;
return true;
default: break;
}
return false;
}
Vector<uint8_t> Certificate::createCSR(const ZT_Certificate_Subject &s, const void *const certificatePrivateKey, const unsigned int certificatePrivateKeySize, const void *uniqueIdPrivate, unsigned int uniqueIdPrivateSize)
{
Vector<uint8_t> enc;
ZT_Certificate_Subject sc;
Utils::copy<sizeof(ZT_Certificate_Subject)>(&sc, &s);
if ((!certificatePrivateKey) || (certificatePrivateKeySize != (1 + ZT_ECC384_PUBLIC_KEY_SIZE + ZT_ECC384_PRIVATE_KEY_SIZE)) || (reinterpret_cast<const uint8_t *>(certificatePrivateKey)[0] != ZT_CERTIFICATE_PUBLIC_KEY_ALGORITHM_ECDSA_NIST_P_384))
return enc;
if (m_setSubjectUniqueId(sc, uniqueIdPrivate, uniqueIdPrivateSize)) {
Dictionary d;
m_encodeSubject(sc, d, false);
d.encode(enc);
uint8_t subjectHash[ZT_SHA384_DIGEST_SIZE], subjectSig[ZT_ECC384_SIGNATURE_SIZE];
SHA384(subjectHash, enc.data(), (unsigned int)enc.size());
ECC384ECDSASign(reinterpret_cast<const uint8_t *>(certificatePrivateKey) + 1 + ZT_ECC384_PUBLIC_KEY_SIZE, subjectHash, subjectSig);
d.add("pK", reinterpret_cast<const uint8_t *>(certificatePrivateKey), (1 + ZT_ECC384_PUBLIC_KEY_SIZE));
d.add("sS", subjectSig, ZT_ECC384_SIGNATURE_SIZE);
d.encode(enc);
}
return enc;
}
void Certificate::m_clear()
{
ZT_Certificate *const sup = this;
Utils::zero<sizeof(ZT_Certificate)>(sup);
m_identities.clear();
m_locators.clear();
m_strings.clear();
m_subjectIdentities.clear();
m_subjectNetworks.clear();
m_updateUrls.clear();
m_extendedAttributes.clear();
}
bool Certificate::m_setSubjectUniqueId(ZT_Certificate_Subject &s, const void *uniqueIdPrivate, unsigned int uniqueIdPrivateSize)
{
if (uniqueIdPrivateSize > 0) {
if ((uniqueIdPrivate != nullptr) && (uniqueIdPrivateSize == (1 + ZT_ECC384_PUBLIC_KEY_SIZE + ZT_ECC384_PRIVATE_KEY_SIZE)) && (reinterpret_cast<const uint8_t *>(uniqueIdPrivate)[0] == (uint8_t)ZT_CERTIFICATE_PUBLIC_KEY_ALGORITHM_ECDSA_NIST_P_384)) {
Utils::copy<1 + ZT_ECC384_PUBLIC_KEY_SIZE>(s.uniqueId, uniqueIdPrivate);
s.uniqueIdSize = 1 + ZT_ECC384_PUBLIC_KEY_SIZE; // private is prefixed with public
Vector<uint8_t> enc;
Dictionary d;
m_encodeSubject(s, d, true);
d.encode(enc);
uint8_t h[ZT_SHA384_DIGEST_SIZE];
SHA384(h, enc.data(), (unsigned int)enc.size());
ECC384ECDSASign(reinterpret_cast<const uint8_t *>(uniqueIdPrivate) + 1 + ZT_ECC384_PUBLIC_KEY_SIZE, h, s.uniqueIdSignature);
s.uniqueIdSignatureSize = ZT_ECC384_SIGNATURE_SIZE;
}
else {
return false;
}
}
else {
Utils::zero<sizeof(s.uniqueId)>(s.uniqueId);
s.uniqueIdSize = 0;
Utils::zero<sizeof(s.uniqueIdSignature)>(s.uniqueIdSignature);
s.uniqueIdSignatureSize = 0;
}
return true;
}
void Certificate::m_encodeSubject(const ZT_Certificate_Subject &s, Dictionary &d, bool omitUniqueIdProofSignature)
{
char tmp[32];
d.add("s.t", (uint64_t)s.timestamp);
if (s.identities) {
d.add("s.i$", (uint64_t)s.identityCount);
for (unsigned int i = 0; i < s.identityCount; ++i) {
if (s.identities[i].identity)
d.addO(Dictionary::arraySubscript(tmp, sizeof(tmp), "s.i$.i", i), *reinterpret_cast<const Identity *>(s.identities[i].identity));
if (s.identities[i].locator)
d.addO(Dictionary::arraySubscript(tmp, sizeof(tmp), "s.i$.l", i), *reinterpret_cast<const Locator *>(s.identities[i].locator));
}
}
if (s.networks) {
d.add("s.nw$", (uint64_t)s.networkCount);
for (unsigned int i = 0; i < s.networkCount; ++i) {
d.add(Dictionary::arraySubscript(tmp, sizeof(tmp), "s.nw$.i", i), s.networks[i].id);
Fingerprint fp(s.networks[i].controller);
d.addO(Dictionary::arraySubscript(tmp, sizeof(tmp), "s.nw$.c", i), fp);
}
}
if (s.updateURLs) {
d.add("s.u$", (uint64_t)s.updateURLCount);
for (unsigned int i = 0; i < s.updateURLCount; ++i)
d.add(Dictionary::arraySubscript(tmp, sizeof(tmp), "s.u$", i), s.updateURLs[i]);
}
if (s.name.country[0])
d.add("s.n.c", s.name.country);
if (s.name.organization[0])
d.add("s.n.o", s.name.organization);
if (s.name.unit[0])
d.add("s.n.u", s.name.unit);
if (s.name.locality[0])
d.add("s.n.l", s.name.locality);
if (s.name.province[0])
d.add("s.n.p", s.name.province);
if (s.name.streetAddress[0])
d.add("s.n.sA", s.name.streetAddress);
if (s.name.postalCode[0])
d.add("s.n.pC", s.name.postalCode);
if (s.name.commonName[0])
d.add("s.n.cN", s.name.commonName);
if (s.name.serialNo[0])
d.add("s.n.sN", s.name.serialNo);
if (s.name.email[0])
d.add("s.n.e", s.name.email);
if (s.name.url[0])
d.add("s.n.ur", s.name.url);
if (s.name.host[0])
d.add("s.n.h", s.name.host);
if (s.uniqueIdSize > 0)
d["s.uI"].assign(s.uniqueId, s.uniqueId + s.uniqueIdSize);
if ((!omitUniqueIdProofSignature) && (s.uniqueIdSignatureSize > 0))
d["s.uS"].assign(s.uniqueIdSignature, s.uniqueIdSignature + s.uniqueIdSignatureSize);
}
} // namespace ZeroTier

View file

@ -1,234 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_CERTIFICATE_HPP
#define ZT_CERTIFICATE_HPP
#include "C25519.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Dictionary.hpp"
#include "ECC384.hpp"
#include "Identity.hpp"
#include "Locator.hpp"
#include "SHA512.hpp"
#include "Utils.hpp"
namespace ZeroTier {
/**
* Certificate describing and grouping a set of objects.
*
* This is a wrapper around the straight C ZT_IdentificationCertificate and
* handles allocating memory for objects added via addXXX() and disposing of
* them on delete. If pointers in the underlying C struct are set manually,
* their memory is not freed on delete. Use the addXXX() methods to fill
* out this structure in C++ code.
*
* The serialNo field is filled in automatically by sign() and decode(), so
* it can be left undefined when building certificates. It contains a SHA384
* hash of the certificate marshalled without the signature field.
*
* The hashCode() method and comparison operators compare the serial number
* field, so these will not work correctly before sign() or decode() is
* called.
*/
class Certificate : public ZT_Certificate {
public:
Certificate() noexcept;
explicit Certificate(const ZT_Certificate &apiCert);
Certificate(const Certificate &cert);
~Certificate();
Certificate &operator=(const ZT_Certificate &cert);
ZT_INLINE Certificate &operator=(const Certificate &cert) noexcept
{
if (likely(&cert != this)) {
const ZT_Certificate *const sup = &cert;
*this = *sup;
}
return *this;
}
/**
* @return Serial number in a H384 object
*/
ZT_INLINE H384 getSerialNo() const noexcept { return H384(this->serialNo); }
/**
* @return True if this is a self-signed certificate
*/
ZT_INLINE bool isSelfSigned() const noexcept
{
for (unsigned int i = 0; i < ZT_CERTIFICATE_HASH_SIZE; ++i) {
if (this->issuer[i] != 0xff)
return false;
}
return true;
}
/**
* Add a subject node/identity without a locator
*
* @param id Identity
* @return Pointer to C struct
*/
ZT_Certificate_Identity *addSubjectIdentity(const Identity &id);
/**
* Add a subject node/identity with a locator
*
* @param id Identity
* @param loc Locator signed by identity (signature is NOT checked here)
* @return Pointer to C struct
*/
ZT_Certificate_Identity *addSubjectIdentity(const Identity &id, const Locator &loc);
/**
* Add a subject network
*
* @param id Network ID
* @param controller Network controller's full fingerprint
* @return Pointer to C struct
*/
ZT_Certificate_Network *addSubjectNetwork(uint64_t id, const ZT_Fingerprint &controller);
/**
* Add an update URL to the updateUrls list
*
* @param url Update URL
*/
void addSubjectUpdateUrl(const char *url);
/**
* Sign subject with unique ID private key and set.
*
* This is done when you createCSR but can also be done explicitly here. This
* is mostly for testing purposes.
*
* @param uniqueIdPrivate Unique ID private key (includes public)
* @param uniqueIdPrivateSize Size of private key
* @return True on success
*/
ZT_INLINE bool setSubjectUniqueId(const void *uniqueIdPrivate, unsigned int uniqueIdPrivateSize) { return m_setSubjectUniqueId(this->subject, uniqueIdPrivate, uniqueIdPrivateSize); }
/**
* Marshal this certificate in binary form
*
* The internal encoding used here is Dictionary to permit easy
* extensibility.
*
* @param omitSignature If true omit the signature field (for signing and verification, default is false)
* @return Marshaled certificate
*/
Vector<uint8_t> encode(bool omitSignature = false) const;
/**
* Decode this certificate from marshaled bytes.
*
* @param data Marshalled certificate
* @param len Length of marshalled certificate
* @return True if input is valid and was unmarshalled (signature is NOT checked)
*/
bool decode(const void *data, unsigned int len);
/**
* Sign this certificate.
*
* This sets serialNo, issuer, issuerPublicKey, and signature.
*
* @return True on success
*/
bool sign(const uint8_t issuer[ZT_CERTIFICATE_HASH_SIZE], const void *issuerPrivateKey, unsigned int issuerPrivateKeySize);
/**
* Verify self-contained signatures and validity of certificate structure
*
* This cannot check the chain of trust back to a CA, only the internal validity
* of this certificate.
*
* @param clock If non-negative, also do verifyTimeWindow()
* @param checkSignatures If true, perform full signature check (which is more expensive than other checks)
* @return OK (0) or error code indicating why certificate failed verification.
*/
ZT_CertificateError verify(int64_t clock, bool checkSignatures) const;
/**
* Check this certificate's expiration status
*
* @param clock Current real world time in milliseconds since epoch
* @return True if certificate is not expired or outside window
*/
ZT_INLINE bool verifyTimeWindow(int64_t clock) const noexcept { return ((clock >= this->validity[0]) && (clock <= this->validity[1]) && (this->validity[0] <= this->validity[1])); }
/**
* Create a new certificate public/private key pair
*
* @param type Key pair type to create
* @param publicKey Buffer to fill with public key
* @param publicKeySize Result parameter: set to size of public key
* @param privateKey Buffer to fill with private key
* @param privateKeySize Result parameter: set to size of private key
* @return True on success
*/
static bool newKeyPair(const ZT_CertificatePublicKeyAlgorithm type, uint8_t publicKey[ZT_CERTIFICATE_MAX_PUBLIC_KEY_SIZE], int *const publicKeySize, uint8_t privateKey[ZT_CERTIFICATE_MAX_PRIVATE_KEY_SIZE], int *const privateKeySize);
/**
* Create a CSR that encodes the subject of this certificate
*
* @param s Subject to encode
* @param certificatePrivateKey Private key for certificate (includes public)
* @param certificatePrivateKeySize Size of private
* @param uniqueIdPrivate Unique ID private key for proof signature or NULL if none
* @param uniqueIdPrivateSize Size of unique ID private key
* @return Encoded subject (without any unique ID fields) or empty vector on error
*/
static Vector<uint8_t> createCSR(const ZT_Certificate_Subject &s, const void *certificatePrivateKey, unsigned int certificatePrivateKeySize, const void *uniqueIdPrivate, unsigned int uniqueIdPrivateSize);
ZT_INLINE unsigned long hashCode() const noexcept { return (unsigned long)Utils::loadMachineEndian<uint32_t>(this->serialNo); }
ZT_INLINE bool operator==(const ZT_Certificate &c) const noexcept { return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) == 0; }
ZT_INLINE bool operator!=(const ZT_Certificate &c) const noexcept { return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) != 0; }
ZT_INLINE bool operator<(const ZT_Certificate &c) const noexcept { return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) < 0; }
ZT_INLINE bool operator<=(const ZT_Certificate &c) const noexcept { return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) <= 0; }
ZT_INLINE bool operator>(const ZT_Certificate &c) const noexcept { return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) > 0; }
ZT_INLINE bool operator>=(const ZT_Certificate &c) const noexcept { return memcmp(this->serialNo, c.serialNo, ZT_SHA384_DIGEST_SIZE) >= 0; }
private:
void m_clear();
static bool m_setSubjectUniqueId(ZT_Certificate_Subject &s, const void *uniqueIdPrivate, unsigned int uniqueIdPrivateSize);
static void m_encodeSubject(const ZT_Certificate_Subject &s, Dictionary &d, bool omitUniqueIdProofSignature);
// These hold any identity or locator objects that are owned by and should
// be deleted with this certificate. Lists are used so the pointers never
// change.
ForwardList<Identity> m_identities;
ForwardList<Locator> m_locators;
ForwardList<String> m_strings;
// These are stored in a vector because the memory needs to be contiguous.
Vector<ZT_Certificate_Identity> m_subjectIdentities;
Vector<ZT_Certificate_Network> m_subjectNetworks;
Vector<const char *> m_updateUrls;
Vector<uint8_t> m_extendedAttributes;
};
} // namespace ZeroTier
#endif

View file

@ -1,216 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_CONSTANTS_HPP
#define ZT_CONSTANTS_HPP
/**
* Indicates to some parts of zerotier.h that we are building the core
*/
#define ZT_CORE 1
#include "OS.hpp"
#include "version.h"
#include "zerotier.h"
/**
* Length of a ZeroTier address in bytes
*/
#define ZT_ADDRESS_LENGTH 5
/**
* Length of a ZeroTier address in digits
*/
#define ZT_ADDRESS_LENGTH_HEX 10
/**
* Addresses beginning with this byte are reserved for the joy of in-band signaling
*/
#define ZT_ADDRESS_RESERVED_PREFIX 0xff
/**
* Bit mask for addresses against a uint64_t
*/
#define ZT_ADDRESS_MASK 0xffffffffffULL
/**
* Size of an identity fingerprint hash (SHA384) in bytes
*/
#define ZT_FINGERPRINT_HASH_SIZE 48
/**
* Default virtual network MTU (not physical)
*/
#define ZT_DEFAULT_MTU 2800
/**
* Maximum number of packet fragments we'll support (11 is the maximum that will fit in a Buf)
*/
#define ZT_MAX_PACKET_FRAGMENTS 11
/**
* Anti-DOS limit on the maximum incoming fragments per path
*/
#define ZT_MAX_INCOMING_FRAGMENTS_PER_PATH 16
/**
* Sanity limit on the maximum size of a network config object
*/
#define ZT_MAX_NETWORK_CONFIG_BYTES 131072
/**
* Length of symmetric keys
*/
#define ZT_SYMMETRIC_KEY_SIZE 48
/**
* Time limit for ephemeral keys: 30 minutes.
*/
#define ZT_SYMMETRIC_KEY_TTL 1800000
/**
* Maximum number of messages per symmetric key.
*/
#define ZT_SYMMETRIC_KEY_TTL_MESSAGES 2147483648
/**
* Normal delay between processBackgroundTasks calls.
*/
#define ZT_TIMER_TASK_INTERVAL 2000
/**
* How often most internal cleanup and housekeeping tasks are performed
*/
#define ZT_HOUSEKEEPING_PERIOD 300000
/**
* How often network housekeeping is performed
*
* Note that this affects how frequently we re-request network configurations
* from network controllers if we haven't received one yet.
*/
#define ZT_NETWORK_HOUSEKEEPING_PERIOD 30000
/**
* Period between calls to update() in TrustStore
*/
#define ZT_TRUSTSTORE_UPDATE_PERIOD 300000
/**
* Delay between WHOIS retries in ms
*/
#define ZT_WHOIS_RETRY_DELAY 500
/**
* Maximum number of ZT hops allowed (this is not IP hops/TTL)
*
* The protocol allows up to 7, but we limit it to something smaller.
*/
#define ZT_RELAY_MAX_HOPS 4
/**
* Period between keepalives sent to paths if no other traffic has been sent.
*
* The average NAT timeout is 60-120s, but there exist NATs in the wild with timeouts
* as short as 30s. Come in just under 30s and we should be fine.
*/
#define ZT_PATH_KEEPALIVE_PERIOD 28000
/**
* Timeout for path alive-ness (measured from last receive)
*/
#define ZT_PATH_ALIVE_TIMEOUT ((ZT_PATH_KEEPALIVE_PERIOD * 2) + 5000)
/**
* Maximum number of queued endpoints to try per "pulse."
*/
#define ZT_NAT_T_PORT_SCAN_MAX 16
/**
* Minimum interval between attempts to reach a given physical endpoint
*/
#define ZT_PATH_MIN_TRY_INTERVAL ZT_PATH_KEEPALIVE_PERIOD
/**
* Delay between calls to the pulse() method in Peer for each peer
*/
#define ZT_PEER_PULSE_INTERVAL 10000
/**
* Interval between HELLOs to peers.
*/
#define ZT_PEER_HELLO_INTERVAL 120000
/**
* Timeout for peers being alive
*/
#define ZT_PEER_ALIVE_TIMEOUT ((ZT_PEER_HELLO_INTERVAL * 2) + 5000)
/**
* Global timeout for peers in milliseconds
*
* This is global as in "entire world," and this value is 30 days. In this
* code the global timeout is used to determine when to ignore cached
* peers and their identity<>address mappings.
*/
#define ZT_PEER_GLOBAL_TIMEOUT 2592000000LL
/**
* Delay between requests for updated network autoconf information
*
* Don't lengthen this as it affects things like QoS / uptime monitoring
* via ZeroTier Central. This is the heartbeat, basically.
*/
#define ZT_NETWORK_AUTOCONF_DELAY 60000
/**
* Sanity limit on maximum bridge routes
*
* If the number of bridge routes exceeds this, we cull routes from the
* bridges with the most MACs behind them until it doesn't. This is a
* sanity limit to prevent memory-filling DOS attacks, nothing more. No
* physical LAN has anywhere even close to this many nodes. Note that this
* does not limit the size of ZT virtual LANs, only bridge routing.
*/
#define ZT_MAX_BRIDGE_ROUTES 16777216
/**
* WHOIS rate limit (we allow these to be pretty fast)
*/
#define ZT_PEER_WHOIS_RATE_LIMIT 100
/**
* General rate limit for other kinds of rate-limited packets (HELLO, credential request, etc.) both inbound and
* outbound
*/
#define ZT_PEER_GENERAL_RATE_LIMIT 500
/**
* Rate limit for responses to short probes to prevent amplification attacks
*/
#define ZT_PEER_PROBE_RESPONSE_RATE_LIMIT 5000
/**
* Size of a buffer to store either a C25519 or an ECC P-384 signature
*
* This must be large enough to hold all signature types, which right now is
* Curve25519 EDDSA and NIST P-384 ECDSA.
*/
#define ZT_SIGNATURE_BUFFER_SIZE 96
/* Ethernet frame types that might be relevant to us */
#define ZT_ETHERTYPE_IPV4 0x0800
#define ZT_ETHERTYPE_ARP 0x0806
#define ZT_ETHERTYPE_IPV6 0x86dd
#endif

View file

@ -1,185 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_CONTAINERS_HPP
#define ZT_CONTAINERS_HPP
/* This defines a Map, SortedMap, Vector, etc. based on STL templates. */
#include "Constants.hpp"
#include "Utils.hpp"
#include <algorithm>
#include <list>
#include <map>
#include <set>
#include <string>
#include <vector>
#ifdef __CPP11__
#include <atomic>
#include <forward_list>
#include <unordered_map>
#endif
namespace ZeroTier {
template <typename V> class Vector : public std::vector<V> {
public:
ZT_INLINE Vector() : std::vector<V>() {}
template <typename I> ZT_INLINE Vector(I begin, I end) : std::vector<V>(begin, end) {}
};
template <typename V> class List : public std::list<V> {
};
#ifdef __CPP11__
struct intl_MapHasher {
template <typename O> std::size_t operator()(const O &obj) const noexcept { return (std::size_t)obj.hashCode(); }
std::size_t operator()(const Vector<uint8_t> &bytes) const noexcept { return (std::size_t)Utils::fnv1a32(bytes.data(), (unsigned int)bytes.size()); }
std::size_t operator()(const uint64_t i) const noexcept { return (std::size_t)Utils::hash64(i ^ Utils::s_mapNonce); }
std::size_t operator()(const int64_t i) const noexcept { return (std::size_t)Utils::hash64((uint64_t)i ^ Utils::s_mapNonce); }
std::size_t operator()(const uint32_t i) const noexcept { return (std::size_t)Utils::hash32(i ^ (uint32_t)Utils::s_mapNonce); }
std::size_t operator()(const int32_t i) const noexcept { return (std::size_t)Utils::hash32((uint32_t)i ^ (uint32_t)Utils::s_mapNonce); }
};
template <typename K, typename V> class Map : public std::unordered_map<K, V, intl_MapHasher> {
};
template <typename K, typename V> class MultiMap : public std::unordered_multimap<K, V, intl_MapHasher, std::equal_to<K>> {
};
#else
template <typename K, typename V> class Map : public std::map<K, V> {
};
template <typename K, typename V> class MultiMap : public std::multimap<K, V> {
};
#endif
template <typename K, typename V> class SortedMap : public std::map<K, V> {
};
#ifdef __CPP11__
template <typename V> class ForwardList : public std::forward_list<V> {
};
#else
template <typename V> class ForwardList : public std::list<V> {
};
#endif
template <typename V> class Set : public std::set<V, std::less<V>> {
};
typedef std::string String;
/**
* A 384-bit hash
*/
struct H384 {
uint64_t data[6];
ZT_INLINE H384() noexcept { Utils::zero<sizeof(data)>(data); }
ZT_INLINE H384(const H384 &b) noexcept { Utils::copy<48>(data, b.data); }
explicit ZT_INLINE H384(const void *const d) noexcept { Utils::copy<48>(data, d); }
ZT_INLINE H384 &operator=(const H384 &b) noexcept
{
Utils::copy<48>(data, b.data);
return *this;
}
ZT_INLINE unsigned long hashCode() const noexcept { return (unsigned long)data[0]; }
ZT_INLINE operator bool() const noexcept { return ((data[0] != 0) && (data[1] != 0) && (data[2] != 0) && (data[3] != 0) && (data[4] != 0) && (data[5] != 0)); }
ZT_INLINE bool operator==(const H384 &b) const noexcept { return ((data[0] == b.data[0]) && (data[1] == b.data[1]) && (data[2] == b.data[2]) && (data[3] == b.data[3]) && (data[4] == b.data[4]) && (data[5] == b.data[5])); }
ZT_INLINE bool operator!=(const H384 &b) const noexcept { return !(*this == b); }
ZT_INLINE bool operator<(const H384 &b) const noexcept { return std::lexicographical_compare(data, data + 6, b.data, b.data + 6); }
ZT_INLINE bool operator<=(const H384 &b) const noexcept { return !(b < *this); }
ZT_INLINE bool operator>(const H384 &b) const noexcept { return (b < *this); }
ZT_INLINE bool operator>=(const H384 &b) const noexcept { return !(*this < b); }
};
static_assert(sizeof(H384) == 48, "H384 contains unnecessary padding");
/**
* A fixed size byte array
*
* @tparam S Size in bytes
*/
template <unsigned long S> struct Blob {
uint8_t data[S];
ZT_INLINE Blob() noexcept { Utils::zero<S>(data); }
ZT_INLINE Blob(const Blob &b) noexcept { Utils::copy<S>(data, b.data); }
explicit ZT_INLINE Blob(const void *const d) noexcept { Utils::copy<S>(data, d); }
explicit ZT_INLINE Blob(const void *const d, const unsigned int l) noexcept
{
Utils::copy(data, d, (l > (unsigned int)S) ? (unsigned int)S : l);
if (l < S) {
Utils::zero(data + l, S - l);
}
}
ZT_INLINE Blob &operator=(const Blob &b) noexcept
{
Utils::copy<S>(data, b.data);
return *this;
}
ZT_INLINE unsigned long hashCode() const noexcept { return Utils::fnv1a32(data, (unsigned int)S); }
ZT_INLINE operator bool() const noexcept { return Utils::allZero(data, (unsigned int)S); }
ZT_INLINE bool operator==(const Blob &b) const noexcept { return (memcmp(data, b.data, S) == 0); }
ZT_INLINE bool operator!=(const Blob &b) const noexcept { return (memcmp(data, b.data, S) != 0); }
ZT_INLINE bool operator<(const Blob &b) const noexcept { return (memcmp(data, b.data, S) < 0); }
ZT_INLINE bool operator<=(const Blob &b) const noexcept { return (memcmp(data, b.data, S) <= 0); }
ZT_INLINE bool operator>(const Blob &b) const noexcept { return (memcmp(data, b.data, S) > 0); }
ZT_INLINE bool operator>=(const Blob &b) const noexcept { return (memcmp(data, b.data, S) >= 0); }
};
} // namespace ZeroTier
#endif

View file

@ -1,90 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_RUNTIMEENVIRONMENT_HPP
#define ZT_RUNTIMEENVIRONMENT_HPP
#include "AES.hpp"
#include "Constants.hpp"
#include "Identity.hpp"
#include "SharedPtr.hpp"
#include "TinyMap.hpp"
#include "Utils.hpp"
namespace ZeroTier {
class VL1;
class VL2;
class Topology;
class Node;
class NetworkController;
class SelfAwareness;
class Trace;
class Expect;
class TrustStore;
class Store;
class Network;
/**
* Node instance context
*/
class Context {
public:
ZT_INLINE Context(Node *const n) noexcept : instanceId(Utils::getSecureRandomU64()), node(n), uPtr(nullptr), localNetworkController(nullptr), store(nullptr), networks(nullptr), t(nullptr), expect(nullptr), vl2(nullptr), vl1(nullptr), topology(nullptr), sa(nullptr), ts(nullptr)
{
publicIdentityStr[0] = 0;
secretIdentityStr[0] = 0;
}
ZT_INLINE ~Context() noexcept { Utils::burn(secretIdentityStr, sizeof(secretIdentityStr)); }
// Unique ID generated on startup
const uint64_t instanceId;
// Node instance that owns this RuntimeEnvironment
Node *const restrict node;
// Callbacks specified by caller who created node
ZT_Node_Callbacks cb;
// User pointer specified by external code via API
void *restrict uPtr;
// This is set externally to an instance of this base class
NetworkController *restrict localNetworkController;
Store *restrict store;
TinyMap<SharedPtr<Network>> *restrict networks;
Trace *restrict t;
Expect *restrict expect;
VL2 *restrict vl2;
VL1 *restrict vl1;
Topology *restrict topology;
SelfAwareness *restrict sa;
TrustStore *restrict ts;
// This node's identity and string representations thereof
Identity identity;
char publicIdentityStr[ZT_IDENTITY_STRING_BUFFER_LENGTH];
char secretIdentityStr[ZT_IDENTITY_STRING_BUFFER_LENGTH];
// Symmetric key for encrypting secrets at rest on this system.
AES localSecretCipher;
// Privileged ports from 1 to 1023 in a random order (for IPv4 NAT traversal)
uint16_t randomPrivilegedPortOrder[1023];
};
} // namespace ZeroTier
#endif

View file

@ -1,96 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "Credential.hpp"
#include "CapabilityCredential.hpp"
#include "Constants.hpp"
#include "Context.hpp"
#include "MembershipCredential.hpp"
#include "Network.hpp"
#include "OwnershipCredential.hpp"
#include "RevocationCredential.hpp"
#include "TagCredential.hpp"
#include "Topology.hpp"
// These are compile-time asserts to make sure temporary marshal buffers here and
// also in NtworkConfig.cpp are always large enough to marshal all credential types.
#if ZT_TAG_MARSHAL_SIZE_MAX > ZT_BUF_MEM_SIZE
#error ZT_TAG_MARSHAL_SIZE_MAX exceeds maximum buffer size
#endif
#if ZT_CAPABILITY_MARSHAL_SIZE_MAX > ZT_BUF_MEM_SIZE
#error ZT_CAPABILITY_MARSHAL_SIZE_MAX exceeds maximum buffer size
#endif
#if ZT_REVOCATION_MARSHAL_SIZE_MAX > ZT_BUF_MEM_SIZE
#error ZT_REVOCATION_MARSHAL_SIZE_MAX exceeds maximum buffer size
#endif
#if ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX > ZT_BUF_MEM_SIZE
#error ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX exceeds maximum buffer size
#endif
#if ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX > ZT_BUF_MEM_SIZE
#error ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX exceeds maximum buffer size
#endif
namespace ZeroTier {
template <typename CRED> static ZT_INLINE Credential::VerifyResult p_credVerify(const Context &ctx, const CallContext &cc, CRED credential)
{
uint8_t tmp[ZT_BUF_MEM_SIZE + 16];
const Address signedBy(credential.signer());
const uint64_t networkId = credential.networkId();
if ((!signedBy) || (signedBy != Network::controllerFor(networkId)))
return Credential::VERIFY_BAD_SIGNATURE;
const SharedPtr<Peer> peer(ctx.topology->peer(cc, signedBy));
if (!peer)
return Credential::VERIFY_NEED_IDENTITY;
try {
int l = credential.marshal(tmp, true);
if (l <= 0)
return Credential::VERIFY_BAD_SIGNATURE;
return (peer->identity().verify(tmp, (unsigned int)l, credential.signature(), credential.signatureLength()) ? Credential::VERIFY_OK : Credential::VERIFY_BAD_SIGNATURE);
}
catch (...) {
}
return Credential::VERIFY_BAD_SIGNATURE;
}
Credential::VerifyResult Credential::s_verify(const Context &ctx, const CallContext &cc, const RevocationCredential &credential) { return p_credVerify(ctx, cc, credential); }
Credential::VerifyResult Credential::s_verify(const Context &ctx, const CallContext &cc, const TagCredential &credential) { return p_credVerify(ctx, cc, credential); }
Credential::VerifyResult Credential::s_verify(const Context &ctx, const CallContext &cc, const CapabilityCredential &credential) { return p_credVerify(ctx, cc, credential); }
Credential::VerifyResult Credential::s_verify(const Context &ctx, const CallContext &cc, const OwnershipCredential &credential) { return p_credVerify(ctx, cc, credential); }
Credential::VerifyResult Credential::s_verify(const Context &ctx, const CallContext &cc, const MembershipCredential &credential)
{
// Sanity check network ID.
if ((!credential.m_signedBy) || (credential.m_signedBy != Network::controllerFor(credential.m_networkId)))
return Credential::VERIFY_BAD_SIGNATURE;
// If we don't know the peer, get its identity. This shouldn't happen here but should be handled.
const SharedPtr<Peer> peer(ctx.topology->peer(cc, credential.m_signedBy));
if (!peer)
return Credential::VERIFY_NEED_IDENTITY;
// Now verify the controller's signature.
uint64_t buf[ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX / 8];
const unsigned int bufSize = credential.m_fillSigningBuf(buf);
return peer->identity().verify(buf, bufSize, credential.m_signature, credential.m_signatureLength) ? Credential::VERIFY_OK : Credential::VERIFY_BAD_SIGNATURE;
}
} // namespace ZeroTier

View file

@ -1,55 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_CREDENTIAL_HPP
#define ZT_CREDENTIAL_HPP
#include "CallContext.hpp"
#include "Constants.hpp"
#include "TriviallyCopyable.hpp"
namespace ZeroTier {
class CapabilityCredential;
class RevocationCredential;
class TagCredential;
class MembershipCredential;
class OwnershipCredential;
class Context;
/**
* Base class for credentials
*
* Note that all credentials are and must be trivially copyable.
*
* All credential verification methods are implemented in Credential.cpp as they share a lot
* of common code and logic and grouping them makes auditing easier.
*/
class Credential : public TriviallyCopyable {
public:
/**
* Result of verify() operations
*/
enum VerifyResult { VERIFY_OK = 0, VERIFY_BAD_SIGNATURE = 1, VERIFY_NEED_IDENTITY = 2 };
protected:
static VerifyResult s_verify(const Context &ctx, const CallContext &cc, const MembershipCredential &credential);
static VerifyResult s_verify(const Context &ctx, const CallContext &cc, const RevocationCredential &credential);
static VerifyResult s_verify(const Context &ctx, const CallContext &cc, const TagCredential &credential);
static VerifyResult s_verify(const Context &ctx, const CallContext &cc, const OwnershipCredential &credential);
static VerifyResult s_verify(const Context &ctx, const CallContext &cc, const CapabilityCredential &credential);
};
} // namespace ZeroTier
#endif

View file

@ -1,24 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "Defaults.hpp"
namespace ZeroTier {
namespace Defaults {
const uint8_t *CERTIFICATE[DEFAULT_CERTIFICATE_COUNT] = {};
unsigned int CERTIFICATE_SIZE[DEFAULT_CERTIFICATE_COUNT] = {};
} // namespace Defaults
} // namespace ZeroTier

View file

@ -1,30 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_DEFAULTS_HPP
#define ZT_DEFAULTS_HPP
#include "Constants.hpp"
namespace ZeroTier {
namespace Defaults {
#define DEFAULT_CERTIFICATE_COUNT 0
extern const uint8_t *CERTIFICATE[DEFAULT_CERTIFICATE_COUNT];
extern unsigned int CERTIFICATE_SIZE[DEFAULT_CERTIFICATE_COUNT];
} // namespace Defaults
} // namespace ZeroTier
#endif

View file

@ -1,330 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_DEFRAGMENTER_HPP
#define ZT_DEFRAGMENTER_HPP
#include "Buf.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "FCV.hpp"
#include "Mutex.hpp"
#include "Path.hpp"
#include "SharedPtr.hpp"
namespace ZeroTier {
/**
* Generalized putter back together-er for fragmented messages
*
* This is used both for packet fragment assembly and multi-chunk network config
* assembly. This is abstracted out of the code that uses it because it's a bit of
* a hairy and difficult thing to get both correct and fast, and because its
* hairiness makes it very desirable to be able to test and fuzz this code
* independently.
*
* This class is thread-safe and handles locking internally.
*
* Templating is so that this class can be placed in a test harness and tested
* without dependencies on external code. The default template parameters are
* the ones used throughout the ZeroTier core.
*
* @tparam MF Maximum number of fragments that each message can possess (default: ZT_MAX_PACKET_FRAGMENTS)
* @tparam MFP Maximum number of incoming fragments per path (if paths are specified) (default: ZT_MAX_INCOMING_FRAGMENTS_PER_PATH)
* @tparam GCS Garbage collection target size for the incoming message queue (default: ZT_MAX_PACKET_FRAGMENTS * 2)
* @tparam GCT Garbage collection trigger threshold, usually 2X GCS (default: ZT_MAX_PACKET_FRAGMENTS * 4)
* @tparam P Type for pointer to a path object (default: SharedPtr<Path>)
*/
template <unsigned int MF = ZT_MAX_PACKET_FRAGMENTS, unsigned int MFP = ZT_MAX_INCOMING_FRAGMENTS_PER_PATH, unsigned int GCS = (ZT_MAX_PACKET_FRAGMENTS * 2), unsigned int GCT = (ZT_MAX_PACKET_FRAGMENTS * 4), typename P = SharedPtr<Path>> class Defragmenter {
public:
/**
* Return values from assemble()
*/
enum ResultCode {
/**
* No error occurred, fragment accepted
*/
OK,
/**
* Message fully assembled and placed in message vector
*/
COMPLETE,
/**
* We already have this fragment number or the message is complete
*/
ERR_DUPLICATE_FRAGMENT,
/**
* The fragment is invalid, such as e.g. having a fragment number beyond the expected count.
*/
ERR_INVALID_FRAGMENT,
/**
* Too many fragments are in flight for this path
*
* The message will be marked as if it's done (all fragments received) but will
* be abandoned. Subsequent fragments will generate a DUPLICATE_FRAGMENT error.
*
* This is an anti-denial-of-service feature to limit the number of inbound
* fragments that can be in flight over a given physical network path.
*/
ERR_TOO_MANY_FRAGMENTS_FOR_PATH,
/**
* Memory (or some other limit) exhausted
*/
ERR_OUT_OF_MEMORY
};
ZT_INLINE Defragmenter() {}
/**
* Process a fragment of a multi-part message
*
* The message ID is arbitrary but must be something that can uniquely
* group fragments for a given final message. The total fragments
* value is expected to be the same for all fragments in a message. Results
* are undefined and probably wrong if this value changes across a message.
* Fragment numbers must be sequential starting with 0 and going up to
* one minus total fragments expected (non-inclusive range).
*
* Fragments can arrive in any order. Duplicates are dropped and ignored.
*
* It's the responsibility of the caller to do whatever validation needs to
* be done before considering a fragment valid and to make sure the fragment
* data index and size parameters are valid.
*
* The fragment supplied to this function is kept and held under the supplied
* message ID until or unless (1) the message is fully assembled, (2) the
* message is orphaned and its entry is taken by a new message, or (3) the
* clear() function is called to forget all incoming messages. The pointer
* at the 'fragment' reference will be zeroed since this pointer is handed
* off, so the SharedPtr<> passed in as 'fragment' will be NULL after this
* function is called.
*
* The 'via' parameter causes this fragment to be registered with a path and
* unregistered when done or abandoned. It's only used the first time it's
* supplied (the first non-NULL) for a given message ID. This is a mitigation
* against memory exhausting DOS attacks.
*
* @tparam X Template parameter type for Buf<> containing fragment (inferred)
* @param messageId Message ID (a unique ID identifying this message)
* @param message Fixed capacity vector that will be filled with the result if result code is DONE
* @param fragment Buffer containing fragment that will be filed under this message's ID
* @param fragmentDataIndex Index of data in fragment's data.bytes (fragment's data.fields type is ignored)
* @param fragmentDataSize Length of data in fragment's data.bytes (fragment's data.fields type is ignored)
* @param fragmentNo Number of fragment (0..totalFragmentsExpected, non-inclusive)
* @param totalFragmentsExpected Total number of expected fragments in this message or 0 to use cached value
* @param ts Current time
* @param via If non-NULL this is the path on which this message fragment was received
* @return Result code
*/
ZT_INLINE ResultCode assemble(const uint64_t messageId, FCV<Buf::Slice, MF> &message, SharedPtr<Buf> &fragment, const unsigned int fragmentDataIndex, const unsigned int fragmentDataSize, const unsigned int fragmentNo, const unsigned int totalFragmentsExpected, const int64_t ts, const P &via)
{
// Sanity checks for malformed fragments or invalid input parameters.
if ((fragmentNo >= totalFragmentsExpected) || (totalFragmentsExpected > MF) || (totalFragmentsExpected == 0))
return ERR_INVALID_FRAGMENT;
// We hold the read lock on _messages unless we need to add a new entry or do GC.
RWMutex::RMaybeWLock ml(m_messages_l);
// Check message hash table size and perform GC if necessary.
if (m_messages.size() >= GCT) {
try {
// Scan messages with read lock still locked first and make a sorted list of
// message entries by last modified time. Then lock for writing and delete
// the oldest entries to bring the size of the messages hash table down to
// under the target size. This tries to minimize the amount of time the write
// lock is held since many threads can hold the read lock but all threads must
// wait if someone holds the write lock.
std::vector<std::pair<int64_t, uint64_t>> messagesByLastUsedTime;
messagesByLastUsedTime.reserve(m_messages.size());
for (typename Map<uint64_t, p_E>::const_iterator i(m_messages.begin()); i != m_messages.end(); ++i)
messagesByLastUsedTime.push_back(std::pair<int64_t, uint64_t>(i->second.lastUsed, i->first));
std::sort(messagesByLastUsedTime.begin(), messagesByLastUsedTime.end());
ml.writing(); // acquire write lock on _messages
for (unsigned long x = 0, y = (messagesByLastUsedTime.size() - GCS); x <= y; ++x)
m_messages.erase(messagesByLastUsedTime[x].second);
}
catch (...) {
return ERR_OUT_OF_MEMORY;
}
}
// Get or create message fragment.
Defragmenter<MF, MFP, GCS, GCT, P>::p_E *e;
{
typename Map<uint64_t, Defragmenter<MF, MFP, GCS, GCT, P>::p_E>::iterator ee(m_messages.find(messageId));
if (ee == m_messages.end()) {
ml.writing(); // acquire write lock on _messages if not already
try {
e = &(m_messages[messageId]);
}
catch (...) {
return ERR_OUT_OF_MEMORY;
}
e->id = messageId;
}
else {
e = &(ee->second);
}
}
// Switch back to holding only the read lock on _messages if we have locked for write
ml.reading();
// Acquire lock on entry itself
Mutex::Lock el(e->lock);
// This magic value means this message has already been assembled and is done.
if (e->lastUsed < 0)
return ERR_DUPLICATE_FRAGMENT;
// Update last-activity timestamp for this entry, delaying GC.
e->lastUsed = ts;
// Learn total fragments expected if a value is given. Otherwise the cached
// value gets used. This is to support the implementation of fragmentation
// in the ZT protocol where only fragments carry the total.
if (totalFragmentsExpected > 0)
e->totalFragmentsExpected = totalFragmentsExpected;
// If there is a path associated with this fragment make sure we've registered
// ourselves as in flight, check the limit, and abort if exceeded.
if ((via) && (!e->via)) {
e->via = via;
bool tooManyPerPath = false;
via->m_inboundFragmentedMessages_l.lock();
try {
if (via->m_inboundFragmentedMessages.size() < MFP) {
via->m_inboundFragmentedMessages.insert(messageId);
}
else {
tooManyPerPath = true;
}
}
catch (...) {
// This would indicate something like bad_alloc thrown by the set. Treat
// it as limit exceeded.
tooManyPerPath = true;
}
via->m_inboundFragmentedMessages_l.unlock();
if (tooManyPerPath)
return ERR_TOO_MANY_FRAGMENTS_FOR_PATH;
}
// If we already have fragment number X, abort. Note that we do not
// actually compare data here. Two same-numbered fragments with different
// data would just mean the transfer is corrupt and would be detected
// later e.g. by packet MAC check. Other use cases of this code like
// network configs check each fragment so this basically can't happen.
Buf::Slice &s = e->message.at(fragmentNo);
if (s.b)
return ERR_DUPLICATE_FRAGMENT;
// Take ownership of fragment, setting 'fragment' pointer to NULL. The simple
// transfer of the pointer avoids a synchronized increment/decrement of the object's
// reference count.
s.b.move(fragment);
s.s = fragmentDataIndex;
s.e = fragmentDataIndex + fragmentDataSize;
++e->fragmentsReceived;
// If we now have all fragments then assemble them.
if ((e->fragmentsReceived >= e->totalFragmentsExpected) && (e->totalFragmentsExpected > 0)) {
// This message is done so de-register it with its path if one is associated.
if (e->via) {
e->via->m_inboundFragmentedMessages_l.lock();
e->via->m_inboundFragmentedMessages.erase(messageId);
e->via->m_inboundFragmentedMessages_l.unlock();
e->via.zero();
}
// Slices are TriviallyCopyable and so may be raw copied from e->message to
// the result parameter. This is fast.
e->message.unsafeMoveTo(message);
e->lastUsed = -1; // mark as "done" and force GC to collect
return COMPLETE;
}
return OK;
}
/**
* Erase all message entries in the internal queue
*/
ZT_INLINE void clear()
{
RWMutex::Lock ml(m_messages_l);
m_messages.clear();
}
/**
* @return Number of entries currently in message defragmentation cache
*/
ZT_INLINE unsigned int cacheSize() noexcept
{
RWMutex::RLock ml(m_messages_l);
return m_messages.size();
}
private:
// p_E is an entry in the message queue.
struct p_E {
ZT_INLINE p_E() noexcept : id(0), lastUsed(0), totalFragmentsExpected(0), fragmentsReceived(0) {}
ZT_INLINE p_E(const p_E &e) noexcept : id(e.id), lastUsed(e.lastUsed), totalFragmentsExpected(e.totalFragmentsExpected), fragmentsReceived(e.fragmentsReceived), via(e.via), message(e.message), lock() {}
ZT_INLINE ~p_E()
{
if (via) {
via->m_inboundFragmentedMessages_l.lock();
via->m_inboundFragmentedMessages.erase(id);
via->m_inboundFragmentedMessages_l.unlock();
}
}
ZT_INLINE p_E &operator=(const p_E &e)
{
if (this != &e) {
id = e.id;
lastUsed = e.lastUsed;
totalFragmentsExpected = e.totalFragmentsExpected;
fragmentsReceived = e.fragmentsReceived;
via = e.via;
message = e.message;
}
return *this;
}
uint64_t id;
int64_t lastUsed;
unsigned int totalFragmentsExpected;
unsigned int fragmentsReceived;
P via;
FCV<Buf::Slice, MF> message;
Mutex lock;
};
Map<uint64_t, Defragmenter<MF, MFP, GCS, GCT, P>::p_E> m_messages;
RWMutex m_messages_l;
};
} // namespace ZeroTier
#endif

View file

@ -1,166 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "Dictionary.hpp"
namespace ZeroTier {
Vector<uint8_t> &Dictionary::operator[](const char *const k) { return m_entries[k]; }
const Vector<uint8_t> &Dictionary::operator[](const char *const k) const
{
static const Vector<uint8_t> s_emptyEntry;
const SortedMap<String, Vector<uint8_t>>::const_iterator e(m_entries.find(k));
return (e == m_entries.end()) ? s_emptyEntry : e->second;
}
void Dictionary::add(const char *k, const Address &v)
{
char tmp[ZT_ADDRESS_STRING_SIZE_MAX];
v.toString(tmp);
add(k, tmp);
}
void Dictionary::add(const char *k, const char *v)
{
Vector<uint8_t> &e = (*this)[k];
e.clear();
if (v) {
while (*v)
e.push_back((uint8_t) * (v++));
}
}
void Dictionary::add(const char *k, const void *data, unsigned int len)
{
Vector<uint8_t> &e = (*this)[k];
if (likely(len != 0)) {
e.assign((const uint8_t *)data, (const uint8_t *)data + len);
}
else {
e.clear();
}
}
uint64_t Dictionary::getUI(const char *k, uint64_t dfl) const
{
char tmp[32];
getS(k, tmp, sizeof(tmp));
if (tmp[0])
return Utils::unhex(tmp);
return dfl;
}
char *Dictionary::getS(const char *k, char *v, const unsigned int cap) const
{
if (cap == 0) // sanity check
return v;
const Vector<uint8_t> &e = (*this)[k];
if (e.empty()) {
v[0] = 0;
return v;
}
for (unsigned int i = 0, last = (cap - 1);; ++i) {
if ((i >= last) || (i >= (unsigned int)e.size())) {
v[i] = 0;
break;
}
if ((v[i] = (char)e[i]) == 0)
break;
}
return v;
}
void Dictionary::clear() { m_entries.clear(); }
void Dictionary::encode(Vector<uint8_t> &out) const
{
out.clear();
for (SortedMap<String, Vector<uint8_t>>::const_iterator ti(m_entries.begin()); ti != m_entries.end(); ++ti) {
s_appendKey(out, ti->first.data());
for (Vector<uint8_t>::const_iterator i(ti->second.begin()); i != ti->second.end(); ++i)
s_appendValueByte(out, *i);
out.push_back((uint8_t)'\n');
}
}
bool Dictionary::decode(const void *data, unsigned int len)
{
clear();
String k;
Vector<uint8_t> *v = nullptr;
bool escape = false;
for (unsigned int di = 0; di < len; ++di) {
const uint8_t c = reinterpret_cast<const uint8_t *>(data)[di];
if (c) {
if (v) {
if (escape) {
escape = false;
switch (c) {
case 48: v->push_back(0); break;
case 101: v->push_back(61); break;
case 110: v->push_back(10); break;
case 114: v->push_back(13); break;
default: v->push_back(c); break;
}
}
else {
if (c == (uint8_t)'\n') {
k.clear();
v = nullptr;
}
else if (c == 92) { // backslash
escape = true;
}
else {
v->push_back(c);
}
}
}
else {
if (c == (uint8_t)'=') {
v = &m_entries[k];
}
else {
k.push_back(c);
}
}
}
else {
break;
}
}
return true;
}
char *Dictionary::arraySubscript(char *buf, unsigned int bufSize, const char *name, const unsigned long sub) noexcept
{
if (bufSize < 17) { // sanity check
buf[0] = 0;
return buf;
}
for (unsigned int i = 0; i < (bufSize - 17); ++i) {
if ((buf[i] = name[i]) == 0) {
buf[i++] = '#';
Utils::hex(sub, buf + i);
return buf;
}
}
buf[0] = 0;
return buf;
}
} // namespace ZeroTier

View file

@ -1,421 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_DICTIONARY_HPP
#define ZT_DICTIONARY_HPP
#include "Address.hpp"
#include "Buf.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Utils.hpp"
namespace ZeroTier {
class Identity;
/**
* A simple key-value store for short keys
*
* This data structure is used for network configurations, node meta-data,
* and other open-definition protocol objects.
*
* This is technically a binary encoding, but with delimiters chosen so that
* it looks like a series of key=value lines of the keys and values are
* human-readable strings.
*
* The fastest way to build a dictionary to send is to use the append
* static functions, not to populate and then encode a Dictionary.
*/
class Dictionary {
public:
typedef SortedMap<String, Vector<uint8_t>>::const_iterator const_iterator;
ZT_INLINE Dictionary() {}
ZT_INLINE ~Dictionary() {}
/*
ZT_INLINE void dump() const
{
printf("\n--\n");
for (const_iterator e(begin()); e != end(); ++e) {
printf("%.8x %s=", Utils::fnv1a32(e->second.data(), (unsigned int)e->second.size()), e->first.c_str());
bool binary = false;
for (Vector< uint8_t >::const_iterator c(e->second.begin()); c != e->second.end(); ++c) {
if ((*c < 33) || (*c > 126)) {
binary = true;
break;
}
}
if (binary) {
for (Vector< uint8_t >::const_iterator c(e->second.begin()); c != e->second.end(); ++c)
printf("%.2x", (unsigned int)*c);
} else {
Vector< uint8_t > s(e->second);
s.push_back(0);
printf("%s", s.data());
}
printf("\n");
}
printf("--\n");
}
*/
/**
* Get a reference to a value
*
* @param k Key to look up
* @return Reference to value
*/
Vector<uint8_t> &operator[](const char *k);
/**
* Get a const reference to a value
*
* @param k Key to look up
* @return Reference to value or to empty vector if not found
*/
const Vector<uint8_t> &operator[](const char *k) const;
/**
* @return Start of key->value pairs
*/
ZT_INLINE const_iterator begin() const noexcept { return m_entries.begin(); }
/**
* @return End of key->value pairs
*/
ZT_INLINE const_iterator end() const noexcept { return m_entries.end(); }
/**
* Add an integer as a hexadecimal string value
*
* @param k Key to set
* @param v Integer to set, will be cast to uint64_t and stored as hex
*/
ZT_INLINE void add(const char *const k, const uint64_t v)
{
char buf[24];
add(k, Utils::hex((uint64_t)(v), buf));
}
/**
* Add an integer as a hexadecimal string value
*
* @param k Key to set
* @param v Integer to set, will be cast to uint64_t and stored as hex
*/
ZT_INLINE void add(const char *const k, const int64_t v)
{
char buf[24];
add(k, Utils::hex((uint64_t)(v), buf));
}
/**
* Add an address in 10-digit hex string format
*/
void add(const char *k, const Address &v);
/**
* Add a C string as a value
*/
void add(const char *k, const char *v);
/**
* Add a binary blob as a value
*/
void add(const char *k, const void *data, unsigned int len);
/**
* Get an integer
*
* @param k Key to look up
* @param dfl Default value (default: 0)
* @return Value of key or default if not found
*/
uint64_t getUI(const char *k, uint64_t dfl = 0) const;
/**
* Get a C string
*
* If the buffer is too small the string will be truncated, but the
* buffer will always end in a terminating null no matter what.
*
* @param k Key to look up
* @param v Buffer to hold string
* @param cap Maximum size of string (including terminating null)
*/
char *getS(const char *k, char *v, unsigned int cap) const;
/**
* Get an object supporting the marshal/unmarshal interface pattern
*
* @tparam T Object type (inferred)
* @param k Key to look up
* @param obj Object to unmarshal() into
* @return True if unmarshal was successful
*/
template <typename T> ZT_INLINE bool getO(const char *k, T &obj) const
{
const Vector<uint8_t> &d = (*this)[k];
if (d.empty())
return false;
return (obj.unmarshal(d.data(), (unsigned int)d.size()) > 0);
}
/**
* Add an object supporting the marshal/unmarshal interface pattern
*
* @tparam T Object type (inferred)
* @param k Key to add
* @param obj Object to marshal() into vector
* @return True if successful
*/
template <typename T> ZT_INLINE bool addO(const char *k, T &obj)
{
Vector<uint8_t> &d = (*this)[k];
d.resize(T::marshalSizeMax());
const int l = obj.marshal(d.data());
if (l > 0) {
d.resize(l);
return true;
}
d.clear();
return false;
}
/**
* Erase all entries in dictionary
*/
void clear();
/**
* @return Number of entries
*/
ZT_INLINE unsigned int size() const noexcept { return (unsigned int)m_entries.size(); }
/**
* @return True if dictionary is not empty
*/
ZT_INLINE bool empty() const noexcept { return m_entries.empty(); }
/**
* Encode to a string in the supplied vector
*
* @param out String encoded dictionary
*/
void encode(Vector<uint8_t> &out) const;
/**
* Decode a string encoded dictionary
*
* This will decode up to 'len' but will also abort if it finds a
* null/zero as this could be a C string.
*
* @param data Data to decode
* @param len Length of data
* @return True if dictionary was formatted correctly and valid, false on error
*/
bool decode(const void *data, unsigned int len);
/**
* Append a key=value pair to a buffer (vector or FCV)
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Value
*/
template <typename V> ZT_INLINE static void append(V &out, const char *const k, const bool v)
{
s_appendKey(out, k);
out.push_back((uint8_t)(v ? '1' : '0'));
out.push_back((uint8_t)'\n');
}
/**
* Append a key=value pair to a buffer (vector or FCV)
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Value
*/
template <typename V> ZT_INLINE static void append(V &out, const char *const k, const Address v)
{
s_appendKey(out, k);
const uint64_t a = v.toInt();
static_assert(ZT_ADDRESS_LENGTH_HEX == 10, "this must be rewritten for any change in address length");
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 36U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 32U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 28U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 24U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 20U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 16U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 12U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 8U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[(a >> 4U) & 0xfU]);
out.push_back((uint8_t)Utils::HEXCHARS[a & 0xfU]);
out.push_back((uint8_t)'\n');
}
/**
* Append a key=value pair to a buffer (vector or FCV)
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Value
*/
template <typename V> ZT_INLINE static void append(V &out, const char *const k, const uint64_t v)
{
s_appendKey(out, k);
char buf[17];
Utils::hex(v, buf);
unsigned int i = 0;
while (buf[i])
out.push_back((uint8_t)buf[i++]);
out.push_back((uint8_t)'\n');
}
template <typename V> ZT_INLINE static void append(V &out, const char *const k, const int64_t v) { append(out, k, (uint64_t)v); }
template <typename V> ZT_INLINE static void append(V &out, const char *const k, const uint32_t v) { append(out, k, (uint64_t)v); }
template <typename V> ZT_INLINE static void append(V &out, const char *const k, const int32_t v) { append(out, k, (uint64_t)v); }
template <typename V> ZT_INLINE static void append(V &out, const char *const k, const uint16_t v) { append(out, k, (uint64_t)v); }
template <typename V> ZT_INLINE static void append(V &out, const char *const k, const int16_t v) { append(out, k, (uint64_t)v); }
template <typename V> ZT_INLINE static void append(V &out, const char *const k, const uint8_t v) { append(out, k, (uint64_t)v); }
template <typename V> ZT_INLINE static void append(V &out, const char *const k, const int8_t v) { append(out, k, (uint64_t)v); }
/**
* Append a key=value pair to a buffer (vector or FCV)
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Value
*/
template <typename V> ZT_INLINE static void append(V &out, const char *const k, const char *v)
{
if ((v) && (*v)) {
s_appendKey(out, k);
while (*v)
s_appendValueByte(out, (uint8_t) * (v++));
out.push_back((uint8_t)'\n');
}
}
/**
* Append a key=value pair to a buffer (vector or FCV)
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Value
* @param vlen Value length in bytes
*/
template <typename V> ZT_INLINE static void append(V &out, const char *const k, const void *const v, const unsigned int vlen)
{
s_appendKey(out, k);
for (unsigned int i = 0; i < vlen; ++i)
s_appendValueByte(out, reinterpret_cast<const uint8_t *>(v)[i]);
out.push_back((uint8_t)'\n');
}
/**
* Append a packet ID as raw bytes in the provided byte order
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param pid Packet ID
*/
template <typename V> static ZT_INLINE void appendPacketId(V &out, const char *const k, const uint64_t pid) { append(out, k, &pid, 8); }
/**
* Append key=value with any object implementing the correct marshal interface
*
* @param out Buffer
* @param k Key (must be <= 8 characters)
* @param v Marshal-able object
* @return Bytes appended or negative on error (return value of marshal())
*/
template <typename V, typename T> static ZT_INLINE int appendObject(V &out, const char *const k, const T &v)
{
uint8_t tmp[2048]; // large enough for any current object
if (T::marshalSizeMax() > sizeof(tmp))
return -1;
const int mlen = v.marshal(tmp);
if (mlen > 0)
append(out, k, tmp, (unsigned int)mlen);
return mlen;
}
/**
* Append #sub where sub is a hexadecimal string to 'name' and store in 'buf'
*
* @param buf Buffer to store subscript key
* @param name Root name
* @param sub Subscript index
* @return Pointer to 'buf'
*/
static char *arraySubscript(char *buf, unsigned int bufSize, const char *name, const unsigned long sub) noexcept;
private:
template <typename V> ZT_INLINE static void s_appendValueByte(V &out, const uint8_t c)
{
switch (c) {
case 0:
out.push_back(92);
out.push_back(48);
break;
case 10:
out.push_back(92);
out.push_back(110);
break;
case 13:
out.push_back(92);
out.push_back(114);
break;
case 61:
out.push_back(92);
out.push_back(101);
break;
case 92:
out.push_back(92);
out.push_back(92);
break;
default: out.push_back(c); break;
}
}
template <typename V> ZT_INLINE static void s_appendKey(V &out, const char *k)
{
for (;;) {
const char c = *(k++);
if (c == 0)
break;
out.push_back((uint8_t)c);
}
out.push_back((uint8_t)'=');
}
// Dictionary maps need to be sorted so that they always encode in the same order
// to yield blobs that can be hashed and signed reproducibly. Other than for areas
// where dictionaries are signed and verified the order doesn't matter.
SortedMap<String, Vector<uint8_t>> m_entries;
};
} // namespace ZeroTier
#endif

View file

@ -1,739 +0,0 @@
/*
* ECC384 code is based on EASY-ECC by Kenneth MacKay with only minor changes.
*
* It can be found in various places online such as: https://github.com/jestan/easy-ecc
*
* This file is under the BSD 2-clause license since that was the license under which
* the original ECC code was distributed.
*/
#include "ECC384.hpp"
#include "Constants.hpp"
#include "Utils.hpp"
namespace ZeroTier {
namespace {
#define ECC_CURVE_BYTES 48
#define ECC_CURVE_DIGITS (ECC_CURVE_BYTES / 8)
#define ECC_CREATE_KEY_MAX_ATTEMPTS 4096
#define vli_clear(p) std::fill((p), (p) + ECC_CURVE_DIGITS, 0ULL)
#define vli_set(dest, src) std::copy((src), (src) + ECC_CURVE_DIGITS, (dest))
#define vli_isEven(vli) ((vli[0] & 1ULL) == 0ULL)
#define vli_isZero(p) std::all_of((p), (p) + ECC_CURVE_DIGITS, [](const uint64_t i) { return i == 0; })
#define vli_testBit(p, b) ((p)[(unsigned int)(b) >> 6U] & (1ULL << ((unsigned int)(b)&63U)))
#ifndef ZT_HAVE_UINT128
struct uint128_t {
uint64_t m_low, m_high;
};
#endif
struct EccPoint {
uint64_t x[ECC_CURVE_DIGITS], y[ECC_CURVE_DIGITS];
};
// ECC curve NIST P-384
const uint64_t curve_p[ECC_CURVE_DIGITS] = { 0x00000000FFFFFFFF, 0xFFFFFFFF00000000, 0xFFFFFFFFFFFFFFFE, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF };
const uint64_t curve_b[ECC_CURVE_DIGITS] = { 0x2A85C8EDD3EC2AEF, 0xC656398D8A2ED19D, 0x0314088F5013875A, 0x181D9C6EFE814112, 0x988E056BE3F82D19, 0xB3312FA7E23EE7E4 };
const EccPoint curve_G = { { 0x3A545E3872760AB7, 0x5502F25DBF55296C, 0x59F741E082542A38, 0x6E1D3B628BA79B98, 0x8EB1C71EF320AD74, 0xAA87CA22BE8B0537 }, { 0x7A431D7C90EA0E5F, 0x0A60B1CE1D7E819D, 0xE9DA3113B5F0B8C0, 0xF8F41DBD289A147C, 0x5D9E98BF9292DC29, 0x3617DE4A96262C6F } };
const uint64_t curve_n[ECC_CURVE_DIGITS] = { 0xECEC196ACCC52973, 0x581A0DB248B0A77A, 0xC7634D81F4372DDF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF };
ZT_INLINE unsigned int vli_numBits(const uint64_t *const p_vli)
{
int l_numDigits = ECC_CURVE_DIGITS - 1;
for (; l_numDigits >= 0 && p_vli[l_numDigits] == 0; --l_numDigits) {}
if (likely(l_numDigits > -1)) {
uint64_t l_digit = p_vli[l_numDigits];
int i = 0;
for (; l_digit; ++i) {
l_digit >>= 1;
}
return (unsigned int)((l_numDigits * 64) + i);
}
return 0;
}
ZT_INLINE int vli_cmp(const uint64_t *const p_left, const uint64_t *const p_right)
{
int comp = 0;
for (int i = ECC_CURVE_DIGITS - 1; i >= 0; --i) {
comp += (int)((p_left[i] > p_right[i]) && (comp == 0)); // should be constant time
comp -= (int)((p_left[i] < p_right[i]) && (comp == 0));
}
return comp;
}
ZT_INLINE uint64_t vli_lshift(uint64_t *const p_result, const uint64_t *const p_in, const unsigned int p_shift)
{
uint64_t l_carry = 0;
for (unsigned int i = 0, p_shift2 = (64U - p_shift); i < ECC_CURVE_DIGITS; ++i) {
uint64_t l_temp = p_in[i];
p_result[i] = (l_temp << p_shift) | l_carry;
l_carry = l_temp >> p_shift2;
}
return l_carry;
}
ZT_INLINE void vli_rshift1(uint64_t *p_vli)
{
uint64_t *const l_end = p_vli, l_carry = 0;
p_vli += ECC_CURVE_DIGITS;
while (p_vli-- > l_end) {
const uint64_t l_temp = *p_vli;
*p_vli = (l_temp >> 1U) | l_carry;
l_carry = l_temp << 63U;
}
}
ZT_INLINE uint64_t vli_add(uint64_t *const p_result, const uint64_t *const p_left, const uint64_t *const p_right)
{
uint64_t l_carry = 0;
for (unsigned int i = 0; i < ECC_CURVE_DIGITS; ++i) {
uint64_t l_sum = p_left[i] + p_right[i] + l_carry;
if (l_sum != p_left[i]) {
l_carry = (l_sum < p_left[i]);
}
p_result[i] = l_sum;
}
return l_carry;
}
ZT_INLINE uint64_t vli_sub(uint64_t *const p_result, const uint64_t *const p_left, const uint64_t *const p_right)
{
uint64_t l_borrow = 0;
for (unsigned int i = 0; i < ECC_CURVE_DIGITS; ++i) {
uint64_t l_diff = p_left[i] - p_right[i] - l_borrow;
if (l_diff != p_left[i]) {
l_borrow = (l_diff > p_left[i]);
}
p_result[i] = l_diff;
}
return l_borrow;
}
#ifdef ZT_HAVE_UINT128
void vli_mult(uint64_t *const p_result, const uint64_t *const p_left, const uint64_t *const p_right)
{
uint128_t r01 = 0;
uint64_t r2 = 0;
for (int k = 0; k < ECC_CURVE_DIGITS * 2 - 1; ++k) {
for (int i = (k < ECC_CURVE_DIGITS ? 0 : (k + 1) - ECC_CURVE_DIGITS); i <= k && i < ECC_CURVE_DIGITS; ++i) {
uint128_t l_product = (uint128_t)p_left[i] * p_right[k - i];
r01 += l_product;
r2 += (r01 < l_product);
}
p_result[k] = (uint64_t)r01;
r01 = (r01 >> 64U) | (((uint128_t)r2) << 64U);
r2 = 0;
}
p_result[ECC_CURVE_DIGITS * 2 - 1] = (uint64_t)r01;
}
ZT_INLINE void vli_square(uint64_t *const p_result, const uint64_t *const p_left)
{
uint128_t r01 = 0;
uint64_t r2 = 0;
for (int k = 0; k < ECC_CURVE_DIGITS * 2 - 1; ++k) {
for (int i = (k < ECC_CURVE_DIGITS ? 0 : (k + 1) - ECC_CURVE_DIGITS); i <= k && i <= k - i; ++i) {
uint128_t l_product = (uint128_t)p_left[i] * p_left[k - i];
if (i < (k - i)) {
r2 += (uint64_t)(l_product >> 127U);
l_product *= 2;
}
r01 += l_product;
r2 += (r01 < l_product);
}
p_result[k] = (uint64_t)r01;
r01 = (r01 >> 64U) | (((uint128_t)r2) << 64U);
r2 = 0;
}
p_result[ECC_CURVE_DIGITS * 2 - 1] = (uint64_t)r01;
}
#else /* ZT_HAVE_UINT128 */
uint128_t mul_64_64(uint64_t p_left, uint64_t p_right)
{
uint64_t a0 = p_left & 0xffffffffull, a1 = p_left >> 32, b0 = p_right & 0xffffffffull, b1 = p_right >> 32, m0 = a0 * b0, m1 = a0 * b1, m2 = a1 * b0, m3 = a1 * b1;
uint128_t l_result;
m2 += (m0 >> 32);
m2 += m1;
if (m2 < m1) { // overflow
m3 += 0x100000000ull;
}
l_result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
l_result.m_high = m3 + (m2 >> 32);
return l_result;
}
ZT_INLINE uint128_t add_128_128(uint128_t a, uint128_t b)
{
uint128_t l_result;
l_result.m_low = a.m_low + b.m_low;
l_result.m_high = a.m_high + b.m_high + (l_result.m_low < a.m_low);
return l_result;
}
void vli_mult(uint64_t *const p_result, uint64_t *const p_left, const uint64_t *const p_right)
{
uint128_t r01 = { 0, 0 };
uint64_t r2 = 0;
/* Compute each digit of p_result in sequence, maintaining the carries. */
for (int k = 0; k < ECC_CURVE_DIGITS * 2 - 1; ++k) {
for (int i = (k < ECC_CURVE_DIGITS ? 0 : (k + 1) - ECC_CURVE_DIGITS); i <= k && i < ECC_CURVE_DIGITS; ++i) {
uint128_t l_product = mul_64_64(p_left[i], p_right[k - i]);
r01 = add_128_128(r01, l_product);
r2 += (r01.m_high < l_product.m_high);
}
p_result[k] = r01.m_low;
r01.m_low = r01.m_high;
r01.m_high = r2;
r2 = 0;
}
p_result[ECC_CURVE_DIGITS * 2 - 1] = r01.m_low;
}
ZT_INLINE void vli_square(uint64_t *const p_result, uint64_t *const p_left)
{
uint128_t r01 = { 0, 0 };
uint64_t r2 = 0;
for (int k = 0; k < ECC_CURVE_DIGITS * 2 - 1; ++k) {
for (int i = (k < ECC_CURVE_DIGITS ? 0 : (k + 1) - ECC_CURVE_DIGITS); i <= k && i <= k - i; ++i) {
uint128_t l_product = mul_64_64(p_left[i], p_left[k - i]);
if (i < k - i) {
r2 += l_product.m_high >> 63;
l_product.m_high = (l_product.m_high << 1) | (l_product.m_low >> 63);
l_product.m_low <<= 1;
}
r01 = add_128_128(r01, l_product);
r2 += (r01.m_high < l_product.m_high);
}
p_result[k] = r01.m_low;
r01.m_low = r01.m_high;
r01.m_high = r2;
r2 = 0;
}
p_result[ECC_CURVE_DIGITS * 2 - 1] = r01.m_low;
}
#endif /* ZT_HAVE_UINT128 */
void vli_modAdd(uint64_t *const p_result, uint64_t *const p_left, const uint64_t *const p_right, const uint64_t *const p_mod)
{
if ((vli_add(p_result, p_left, p_right) != 0ULL) || vli_cmp(p_result, p_mod) >= 0) {
vli_sub(p_result, p_result, p_mod);
}
}
void vli_modSub(uint64_t *const p_result, uint64_t *const p_left, const uint64_t *const p_right, const uint64_t *const p_mod)
{
if (vli_sub(p_result, p_left, p_right) != 0ULL) {
vli_add(p_result, p_result, p_mod);
}
}
ZT_INLINE void omega_mult(uint64_t *const p_result, const uint64_t *const p_right)
{
uint64_t l_tmp[ECC_CURVE_DIGITS];
vli_set(p_result, p_right);
uint64_t l_carry = vli_lshift(l_tmp, p_right, 32);
p_result[1 + ECC_CURVE_DIGITS] = l_carry + vli_add(p_result + 1, p_result + 1, l_tmp);
p_result[2 + ECC_CURVE_DIGITS] = vli_add(p_result + 2, p_result + 2, p_right);
uint64_t l_diff = p_result[ECC_CURVE_DIGITS] - (l_carry + vli_sub(p_result, p_result, l_tmp));
if (l_diff > p_result[ECC_CURVE_DIGITS]) {
for (unsigned int i = 1 + ECC_CURVE_DIGITS;; ++i) {
if (likely(--p_result[i] != (uint64_t)-1)) {
break;
}
}
}
p_result[ECC_CURVE_DIGITS] = l_diff;
}
void vli_mmod_fast(uint64_t *const p_result, uint64_t *const p_product)
{
uint64_t l_tmp[2 * ECC_CURVE_DIGITS];
while (!vli_isZero(p_product + ECC_CURVE_DIGITS)) {
uint64_t l_carry = 0;
std::fill(l_tmp, l_tmp + (2 * ECC_CURVE_DIGITS), 0ULL);
omega_mult(l_tmp, p_product + ECC_CURVE_DIGITS);
vli_clear(p_product + ECC_CURVE_DIGITS);
for (unsigned int i = 0; i < ECC_CURVE_DIGITS + 3; ++i) {
uint64_t l_sum = p_product[i] + l_tmp[i] + l_carry;
if (l_sum != p_product[i]) {
l_carry = (l_sum < p_product[i]);
}
p_product[i] = l_sum;
}
}
while (vli_cmp(p_product, curve_p) > 0) {
vli_sub(p_product, p_product, curve_p);
}
vli_set(p_result, p_product);
}
ZT_INLINE void vli_modMult_fast(uint64_t *const p_result, uint64_t *const p_left, const uint64_t *const p_right)
{
uint64_t l_product[2 * ECC_CURVE_DIGITS];
vli_mult(l_product, p_left, p_right);
vli_mmod_fast(p_result, l_product);
}
ZT_INLINE void vli_modSquare_fast(uint64_t *const p_result, uint64_t *const p_left)
{
uint64_t l_product[2 * ECC_CURVE_DIGITS];
vli_square(l_product, p_left);
vli_mmod_fast(p_result, l_product);
}
void vli_modInv(uint64_t *const p_result, uint64_t *const p_input, const uint64_t *const p_mod)
{
if (likely(!vli_isZero(p_input))) {
uint64_t a[ECC_CURVE_DIGITS], b[ECC_CURVE_DIGITS], u[ECC_CURVE_DIGITS], v[ECC_CURVE_DIGITS], l_carry;
vli_set(a, p_input);
vli_set(b, p_mod);
u[0] = 1;
std::fill(u + 1, u + ECC_CURVE_DIGITS, 0ULL);
vli_clear(v);
int l_cmpResult;
while ((l_cmpResult = vli_cmp(a, b)) != 0) {
l_carry = 0;
if (vli_isEven(a)) {
vli_rshift1(a);
if (!vli_isEven(u)) {
l_carry = vli_add(u, u, p_mod);
}
vli_rshift1(u);
if (l_carry) {
u[ECC_CURVE_DIGITS - 1] |= 0x8000000000000000ULL;
}
}
else if (vli_isEven(b)) {
vli_rshift1(b);
if (!vli_isEven(v)) {
l_carry = vli_add(v, v, p_mod);
}
vli_rshift1(v);
if (l_carry) {
v[ECC_CURVE_DIGITS - 1] |= 0x8000000000000000ULL;
}
}
else if (l_cmpResult > 0) {
vli_sub(a, a, b);
vli_rshift1(a);
if (vli_cmp(u, v) < 0) {
vli_add(u, u, p_mod);
}
vli_sub(u, u, v);
if (!vli_isEven(u)) {
l_carry = vli_add(u, u, p_mod);
}
vli_rshift1(u);
if (l_carry) {
u[ECC_CURVE_DIGITS - 1] |= 0x8000000000000000ULL;
}
}
else {
vli_sub(b, b, a);
vli_rshift1(b);
if (vli_cmp(v, u) < 0) {
vli_add(v, v, p_mod);
}
vli_sub(v, v, u);
if (!vli_isEven(v)) {
l_carry = vli_add(v, v, p_mod);
}
vli_rshift1(v);
if (l_carry) {
v[ECC_CURVE_DIGITS - 1] |= 0x8000000000000000ULL;
}
}
}
vli_set(p_result, u);
}
else {
vli_clear(p_result);
}
}
ZT_INLINE bool EccPoint_isZero(const EccPoint *const p_point) { return (vli_isZero(p_point->x) && vli_isZero(p_point->y)); }
void EccPoint_double_jacobian(uint64_t *const X1, uint64_t *const Y1, uint64_t *const Z1)
{
if (likely(!vli_isZero(Z1))) {
uint64_t t4[ECC_CURVE_DIGITS], t5[ECC_CURVE_DIGITS];
vli_modSquare_fast(t4, Y1);
vli_modMult_fast(t5, X1, t4);
vli_modSquare_fast(t4, t4);
vli_modMult_fast(Y1, Y1, Z1);
vli_modSquare_fast(Z1, Z1);
vli_modAdd(X1, X1, Z1, curve_p);
vli_modAdd(Z1, Z1, Z1, curve_p);
vli_modSub(Z1, X1, Z1, curve_p);
vli_modMult_fast(X1, X1, Z1);
vli_modAdd(Z1, X1, X1, curve_p);
vli_modAdd(X1, X1, Z1, curve_p);
if (vli_testBit(X1, 0)) {
const uint64_t l_carry = vli_add(X1, X1, curve_p);
vli_rshift1(X1);
X1[ECC_CURVE_DIGITS - 1] |= l_carry << 63U;
}
else {
vli_rshift1(X1);
}
vli_modSquare_fast(Z1, X1);
vli_modSub(Z1, Z1, t5, curve_p);
vli_modSub(Z1, Z1, t5, curve_p);
vli_modSub(t5, t5, Z1, curve_p);
vli_modMult_fast(X1, X1, t5);
vli_modSub(t4, X1, t4, curve_p);
vli_set(X1, Z1);
vli_set(Z1, Y1);
vli_set(Y1, t4);
}
}
ZT_INLINE void apply_z(uint64_t *const X1, uint64_t *const Y1, uint64_t *const Z)
{
uint64_t t1[ECC_CURVE_DIGITS];
vli_modSquare_fast(t1, Z);
vli_modMult_fast(X1, X1, t1);
vli_modMult_fast(t1, t1, Z);
vli_modMult_fast(Y1, Y1, t1);
}
void XYcZ_initial_double(uint64_t *const X1, uint64_t *const Y1, uint64_t *const X2, uint64_t *const Y2, uint64_t *const p_initialZ)
{
uint64_t z[ECC_CURVE_DIGITS];
vli_set(X2, X1);
vli_set(Y2, Y1);
z[0] = 1;
std::fill(z + 1, z + ECC_CURVE_DIGITS, 0ULL);
z[0] = 1;
if (p_initialZ) {
vli_set(z, p_initialZ);
}
apply_z(X1, Y1, z);
EccPoint_double_jacobian(X1, Y1, z);
apply_z(X2, Y2, z);
}
void XYcZ_add(uint64_t *const X1, uint64_t *const Y1, uint64_t *const X2, uint64_t *const Y2)
{
uint64_t t5[ECC_CURVE_DIGITS];
vli_modSub(t5, X2, X1, curve_p);
vli_modSquare_fast(t5, t5);
vli_modMult_fast(X1, X1, t5);
vli_modMult_fast(X2, X2, t5);
vli_modSub(Y2, Y2, Y1, curve_p);
vli_modSquare_fast(t5, Y2);
vli_modSub(t5, t5, X1, curve_p);
vli_modSub(t5, t5, X2, curve_p);
vli_modSub(X2, X2, X1, curve_p);
vli_modMult_fast(Y1, Y1, X2);
vli_modSub(X2, X1, t5, curve_p);
vli_modMult_fast(Y2, Y2, X2);
vli_modSub(Y2, Y2, Y1, curve_p);
vli_set(X2, t5);
}
void XYcZ_addC(uint64_t *const X1, uint64_t *const Y1, uint64_t *const X2, uint64_t *const Y2)
{
uint64_t t5[ECC_CURVE_DIGITS], t6[ECC_CURVE_DIGITS], t7[ECC_CURVE_DIGITS];
vli_modSub(t5, X2, X1, curve_p);
vli_modSquare_fast(t5, t5);
vli_modMult_fast(X1, X1, t5);
vli_modMult_fast(X2, X2, t5);
vli_modAdd(t5, Y2, Y1, curve_p);
vli_modSub(Y2, Y2, Y1, curve_p);
vli_modSub(t6, X2, X1, curve_p);
vli_modMult_fast(Y1, Y1, t6);
vli_modAdd(t6, X1, X2, curve_p);
vli_modSquare_fast(X2, Y2);
vli_modSub(X2, X2, t6, curve_p);
vli_modSub(t7, X1, X2, curve_p);
vli_modMult_fast(Y2, Y2, t7);
vli_modSub(Y2, Y2, Y1, curve_p);
vli_modSquare_fast(t7, t5);
vli_modSub(t7, t7, t6, curve_p);
vli_modSub(t6, t7, X1, curve_p);
vli_modMult_fast(t6, t6, t5);
vli_modSub(Y1, t6, Y1, curve_p);
vli_set(X1, t7);
}
void EccPoint_mult(EccPoint *const p_result, const EccPoint *const p_point, uint64_t *const p_scalar, uint64_t *const p_initialZ)
{
uint64_t Rx[2][ECC_CURVE_DIGITS], Ry[2][ECC_CURVE_DIGITS], z[ECC_CURVE_DIGITS];
vli_set(Rx[1], p_point->x);
vli_set(Ry[1], p_point->y);
XYcZ_initial_double(Rx[1], Ry[1], Rx[0], Ry[0], p_initialZ);
for (int i = (int)vli_numBits(p_scalar) - 2; i > 0; --i) {
int nb = (int)!vli_testBit(p_scalar, i);
XYcZ_addC(Rx[1 - nb], Ry[1 - nb], Rx[nb], Ry[nb]);
XYcZ_add(Rx[nb], Ry[nb], Rx[1 - nb], Ry[1 - nb]);
}
int nb = (int)!vli_testBit(p_scalar, 0);
XYcZ_addC(Rx[1 - nb], Ry[1 - nb], Rx[nb], Ry[nb]);
vli_modSub(z, Rx[1], Rx[0], curve_p);
vli_modMult_fast(z, z, Ry[1 - nb]);
vli_modMult_fast(z, z, p_point->x);
vli_modInv(z, z, curve_p);
vli_modMult_fast(z, z, p_point->y);
vli_modMult_fast(z, z, Rx[1 - nb]);
XYcZ_add(Rx[nb], Ry[nb], Rx[1 - nb], Ry[1 - nb]);
apply_z(Rx[0], Ry[0], z);
vli_set(p_result->x, Rx[0]);
vli_set(p_result->y, Ry[0]);
}
ZT_INLINE void ECC_CURVE_BYTES2native(uint64_t p_native[ECC_CURVE_DIGITS], const uint8_t p_bytes[ECC_CURVE_BYTES])
{
for (unsigned int i = 0; i < ECC_CURVE_DIGITS; ++i) {
const uint8_t *const p_digit = p_bytes + 8 * (ECC_CURVE_DIGITS - 1 - i);
p_native[i] = ((uint64_t)p_digit[0] << 56) | ((uint64_t)p_digit[1] << 48) | ((uint64_t)p_digit[2] << 40) | ((uint64_t)p_digit[3] << 32) | ((uint64_t)p_digit[4] << 24) | ((uint64_t)p_digit[5] << 16) | ((uint64_t)p_digit[6] << 8) | (uint64_t)p_digit[7];
}
}
ZT_INLINE void ecc_native2bytes(uint8_t p_bytes[ECC_CURVE_BYTES], const uint64_t p_native[ECC_CURVE_DIGITS])
{
for (unsigned int i = 0; i < ECC_CURVE_DIGITS; ++i) {
uint8_t *p_digit = p_bytes + 8 * (ECC_CURVE_DIGITS - 1 - i);
p_digit[0] = p_native[i] >> 56;
p_digit[1] = p_native[i] >> 48;
p_digit[2] = p_native[i] >> 40;
p_digit[3] = p_native[i] >> 32;
p_digit[4] = p_native[i] >> 24;
p_digit[5] = p_native[i] >> 16;
p_digit[6] = p_native[i] >> 8;
p_digit[7] = p_native[i];
}
}
void mod_sqrt(uint64_t a[ECC_CURVE_DIGITS])
{
uint64_t l_result[ECC_CURVE_DIGITS] = { 1 }, p1[ECC_CURVE_DIGITS] = { 1 };
vli_add(p1, curve_p, p1);
for (int i = (int)vli_numBits(p1) - 1; i > 1; --i) {
vli_modSquare_fast(l_result, l_result);
if (vli_testBit(p1, i)) {
vli_modMult_fast(l_result, l_result, a);
}
}
vli_set(a, l_result);
}
void ecc_point_decompress(EccPoint *p_point, const uint8_t p_compressed[ECC_CURVE_BYTES + 1])
{
static const uint64_t _3[ECC_CURVE_DIGITS] = { 3 };
ECC_CURVE_BYTES2native(p_point->x, p_compressed + 1);
vli_modSquare_fast(p_point->y, p_point->x);
vli_modSub(p_point->y, p_point->y, _3, curve_p);
vli_modMult_fast(p_point->y, p_point->y, p_point->x);
vli_modAdd(p_point->y, p_point->y, curve_b, curve_p);
mod_sqrt(p_point->y);
if ((p_point->y[0] & 0x01) != (p_compressed[0] & 0x01)) {
vli_sub(p_point->y, curve_p, p_point->y);
}
}
ZT_INLINE bool ecc_make_key(uint8_t p_publicKey[ECC_CURVE_BYTES + 1], uint8_t p_privateKey[ECC_CURVE_BYTES])
{
uint64_t l_private[ECC_CURVE_DIGITS];
EccPoint l_public;
unsigned int l_tries = 0;
do {
if (unlikely(l_tries++ >= ECC_CREATE_KEY_MAX_ATTEMPTS))
return false;
Utils::getSecureRandom(l_private, ECC_CURVE_BYTES);
if (likely(!vli_isZero(l_private))) {
if (vli_cmp(curve_n, l_private) != 1)
vli_sub(l_private, l_private, curve_n);
EccPoint_mult(&l_public, &curve_G, l_private, NULL);
}
} while (EccPoint_isZero(&l_public));
ecc_native2bytes(p_privateKey, l_private);
ecc_native2bytes(p_publicKey + 1, l_public.x);
p_publicKey[0] = 0x02 + (l_public.y[0] & 0x01);
return true;
}
ZT_INLINE bool ecdh_shared_secret(const uint8_t p_publicKey[ECC_CURVE_BYTES + 1], const uint8_t p_privateKey[ECC_CURVE_BYTES], uint8_t p_secret[ECC_CURVE_BYTES])
{
EccPoint l_public;
uint64_t l_private[ECC_CURVE_DIGITS];
uint64_t l_random[ECC_CURVE_DIGITS];
Utils::getSecureRandom(l_random, ECC_CURVE_BYTES);
ecc_point_decompress(&l_public, p_publicKey);
ECC_CURVE_BYTES2native(l_private, p_privateKey);
EccPoint l_product;
EccPoint_mult(&l_product, &l_public, l_private, l_random);
ecc_native2bytes(p_secret, l_product.x);
return !EccPoint_isZero(&l_product);
}
void vli_modMult(uint64_t *const p_result, uint64_t *const p_left, uint64_t *const p_right, const uint64_t *const p_mod)
{
uint64_t l_product[2 * ECC_CURVE_DIGITS], l_modMultiple[2 * ECC_CURVE_DIGITS];
unsigned int l_digitShift, l_bitShift, l_productBits, l_modBits = vli_numBits(p_mod);
vli_mult(l_product, p_left, p_right);
l_productBits = vli_numBits(l_product + ECC_CURVE_DIGITS);
if (l_productBits) {
l_productBits += ECC_CURVE_DIGITS * 64;
}
else {
l_productBits = vli_numBits(l_product);
}
if (l_productBits < l_modBits) {
vli_set(p_result, l_product);
return;
}
vli_clear(l_modMultiple);
vli_clear(l_modMultiple + ECC_CURVE_DIGITS);
l_digitShift = (l_productBits - l_modBits) / 64;
l_bitShift = (l_productBits - l_modBits) % 64;
if (l_bitShift) {
l_modMultiple[l_digitShift + ECC_CURVE_DIGITS] = vli_lshift(l_modMultiple + l_digitShift, p_mod, l_bitShift);
}
else {
vli_set(l_modMultiple + l_digitShift, p_mod);
}
vli_clear(p_result);
p_result[0] = 1;
while (l_productBits > ECC_CURVE_DIGITS * 64 || vli_cmp(l_modMultiple, p_mod) >= 0) {
int l_cmp = vli_cmp(l_modMultiple + ECC_CURVE_DIGITS, l_product + ECC_CURVE_DIGITS);
if (l_cmp < 0 || (l_cmp == 0 && vli_cmp(l_modMultiple, l_product) <= 0)) {
if (vli_sub(l_product, l_product, l_modMultiple)) { /* borrow */
vli_sub(l_product + ECC_CURVE_DIGITS, l_product + ECC_CURVE_DIGITS, p_result);
}
vli_sub(l_product + ECC_CURVE_DIGITS, l_product + ECC_CURVE_DIGITS, l_modMultiple + ECC_CURVE_DIGITS);
}
uint64_t l_carry = (l_modMultiple[ECC_CURVE_DIGITS] & 0x01) << 63;
vli_rshift1(l_modMultiple + ECC_CURVE_DIGITS);
vli_rshift1(l_modMultiple);
l_modMultiple[ECC_CURVE_DIGITS - 1] |= l_carry;
--l_productBits;
}
vli_set(p_result, l_product);
}
ZT_INLINE bool ecdsa_sign(const uint8_t p_privateKey[ECC_CURVE_BYTES], const uint8_t p_hash[ECC_CURVE_BYTES], uint8_t p_signature[ECC_CURVE_BYTES * 2])
{
uint64_t k[ECC_CURVE_DIGITS], l_tmp[ECC_CURVE_DIGITS], l_s[ECC_CURVE_DIGITS];
EccPoint p;
unsigned int l_tries = 0;
do {
if (unlikely(l_tries++ >= ECC_CREATE_KEY_MAX_ATTEMPTS)) {
return false;
}
Utils::getSecureRandom(k, ECC_CURVE_BYTES);
if (likely(!vli_isZero(k))) {
if (vli_cmp(curve_n, k) != 1) {
vli_sub(k, k, curve_n);
}
EccPoint_mult(&p, &curve_G, k, NULL);
if (vli_cmp(curve_n, p.x) != 1) {
vli_sub(p.x, p.x, curve_n);
}
}
} while (vli_isZero(p.x));
ecc_native2bytes(p_signature, p.x);
ECC_CURVE_BYTES2native(l_tmp, p_privateKey);
vli_modMult(l_s, p.x, l_tmp, curve_n); /* s = r*d */
ECC_CURVE_BYTES2native(l_tmp, p_hash);
vli_modAdd(l_s, l_tmp, l_s, curve_n); /* s = e + r*d */
vli_modInv(k, k, curve_n); /* k = 1 / k */
vli_modMult(l_s, l_s, k, curve_n); /* s = (e + r*d) / k */
ecc_native2bytes(p_signature + ECC_CURVE_BYTES, l_s);
return true;
}
ZT_INLINE bool ecdsa_verify(const uint8_t p_publicKey[ECC_CURVE_BYTES + 1], const uint8_t p_hash[ECC_CURVE_BYTES], const uint8_t p_signature[ECC_CURVE_BYTES * 2])
{
uint64_t u1[ECC_CURVE_DIGITS], u2[ECC_CURVE_DIGITS], z[ECC_CURVE_DIGITS], rx[ECC_CURVE_DIGITS], ry[ECC_CURVE_DIGITS], tx[ECC_CURVE_DIGITS], ty[ECC_CURVE_DIGITS], tz[ECC_CURVE_DIGITS], l_r[ECC_CURVE_DIGITS], l_s[ECC_CURVE_DIGITS];
EccPoint l_public, l_sum;
ecc_point_decompress(&l_public, p_publicKey);
ECC_CURVE_BYTES2native(l_r, p_signature);
ECC_CURVE_BYTES2native(l_s, p_signature + ECC_CURVE_BYTES);
if (unlikely(vli_isZero(l_r) || vli_isZero(l_s) || (vli_cmp(curve_n, l_r) != 1) || (vli_cmp(curve_n, l_s) != 1))) {
return false;
}
vli_modInv(z, l_s, curve_n);
ECC_CURVE_BYTES2native(u1, p_hash);
vli_modMult(u1, u1, z, curve_n);
vli_modMult(u2, l_r, z, curve_n);
vli_set(l_sum.x, l_public.x);
vli_set(l_sum.y, l_public.y);
vli_set(tx, curve_G.x);
vli_set(ty, curve_G.y);
vli_modSub(z, l_sum.x, tx, curve_p);
XYcZ_add(tx, ty, l_sum.x, l_sum.y);
vli_modInv(z, z, curve_p);
apply_z(l_sum.x, l_sum.y, z);
const EccPoint *const l_points[4] = { NULL, &curve_G, &l_public, &l_sum };
unsigned int l_numBits = std::max(vli_numBits(u1), vli_numBits(u2));
const EccPoint *const l_point = l_points[(!!vli_testBit(u1, l_numBits - 1)) | ((!!vli_testBit(u2, l_numBits - 1)) << 1)];
vli_set(rx, l_point->x);
vli_set(ry, l_point->y);
vli_clear(z);
z[0] = 1;
for (int i = l_numBits - 2; i >= 0; --i) {
EccPoint_double_jacobian(rx, ry, z);
int l_index = (!!vli_testBit(u1, i)) | ((!!vli_testBit(u2, i)) << 1);
const EccPoint *const l_point2 = l_points[l_index];
if (l_point2) {
vli_set(tx, l_point2->x);
vli_set(ty, l_point2->y);
apply_z(tx, ty, z);
vli_modSub(tz, rx, tx, curve_p);
XYcZ_add(tx, ty, rx, ry);
vli_modMult_fast(z, z, tz);
}
}
vli_modInv(z, z, curve_p);
apply_z(rx, ry, z);
if (vli_cmp(curve_n, rx) != 1) {
vli_sub(rx, rx, curve_n);
}
return (vli_cmp(rx, l_r) == 0);
}
} // anonymous namespace
void ECC384GenerateKey(uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE], uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE])
{
if (unlikely(!ecc_make_key(pub, priv))) {
fprintf(stderr, "FATAL: ecdsa_make_key() failed!" ZT_EOL_S);
abort();
}
}
void ECC384ECDSASign(const uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE], const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE], uint8_t sig[ZT_ECC384_SIGNATURE_SIZE])
{
if (unlikely(!ecdsa_sign(priv, hash, sig))) {
fprintf(stderr, "FATAL: ecdsa_sign() failed!" ZT_EOL_S);
abort();
}
}
bool ECC384ECDSAVerify(const uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE], const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE], const uint8_t sig[ZT_ECC384_SIGNATURE_SIZE]) { return (ecdsa_verify(pub, hash, sig) != 0); }
bool ECC384ECDH(const uint8_t theirPub[ZT_ECC384_PUBLIC_KEY_SIZE], const uint8_t ourPriv[ZT_ECC384_PRIVATE_KEY_SIZE], uint8_t secret[ZT_ECC384_SHARED_SECRET_SIZE]) { return (ecdh_shared_secret(theirPub, ourPriv, secret) != 0); }
} // namespace ZeroTier

View file

@ -1,106 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
/*
* The contents of ECC384.cpp are third party code and are licensed under
* the BSD 2-clause license.
*
* The built-in implementation is easy-ecc by Kenneth MacKay and can be found
* here: https://github.com/esxgx/easy-ecc
*
* Our copy is trimmed down with unused stuff removed and also contains a few
* ZeroTier shims to implement these function interfaces. Otherwise it is
* unmodified from the original. It's a nice and fairly fast portable
* implementation that should build everywhere.
*
* For FIPS-compliant builds this will eventually link against FIPS-compliant
* crypto libraries instead of using the built-in version.
*/
#ifndef ZT_ECC384_HPP
#define ZT_ECC384_HPP
#include "Constants.hpp"
/**
* Size of a (point compressed) P-384 public key
*/
#define ZT_ECC384_PUBLIC_KEY_SIZE 49
/**
* Size of a P-384 private key
*/
#define ZT_ECC384_PRIVATE_KEY_SIZE 48
/**
* Size of the hash that should be signed using P-384
*/
#define ZT_ECC384_SIGNATURE_HASH_SIZE 48
/**
* Size of a P-384 signature
*/
#define ZT_ECC384_SIGNATURE_SIZE 96
/**
* Size of raw shared secret generated by ECDH key agreement
*/
#define ZT_ECC384_SHARED_SECRET_SIZE 48
namespace ZeroTier {
/**
* Generate a NIST P-384 key pair
*
* @param pub Buffer to receive point compressed public key
* @param priv Buffer to receiver private key
*/
void ECC384GenerateKey(uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE], uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE]);
/**
* Sign a hash with a NIST P-384 private key
*
* The hash must be 48 bytes in size. If it's longer only the first 48
* bytes are used.
*
* @param priv Private key
* @param hash 48-byte hash
* @param sig Buffer to receive signature
*/
void ECC384ECDSASign(const uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE], const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE], uint8_t sig[ZT_ECC384_SIGNATURE_SIZE]);
/**
* Verify a signature
*
* @param pub Public key
* @param hash 48-byte hash (usually first 48 bytes of SHA512(msg))
* @param sig Signature to check
* @return True if signature is valid
*/
bool ECC384ECDSAVerify(const uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE], const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE], const uint8_t sig[ZT_ECC384_SIGNATURE_SIZE]);
/**
* Perform ECDH key agreement
*
* The secret generated here is the raw 48-byte result of ECDH.
* It's typically hashed prior to use.
*
* @param theirPub Remote public key
* @param ourPriv Local private key
* @param secret Buffer to receive 48-byte secret
*/
bool ECC384ECDH(const uint8_t theirPub[ZT_ECC384_PUBLIC_KEY_SIZE], const uint8_t ourPriv[ZT_ECC384_PRIVATE_KEY_SIZE], uint8_t secret[ZT_ECC384_SHARED_SECRET_SIZE]);
} // namespace ZeroTier
#endif

View file

@ -1,256 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "Endpoint.hpp"
#include "Utils.hpp"
namespace ZeroTier {
static ZT_INLINE char s_endpointTypeChar(const ZT_EndpointType t)
{
switch (t) {
default: return '0';
case ZT_ENDPOINT_TYPE_ZEROTIER: return 'z';
case ZT_ENDPOINT_TYPE_ETHERNET: return 'e';
case ZT_ENDPOINT_TYPE_WIFI_DIRECT: return 'd';
case ZT_ENDPOINT_TYPE_BLUETOOTH: return 'b';
case ZT_ENDPOINT_TYPE_IP: return 'i';
case ZT_ENDPOINT_TYPE_IP_UDP: return 'u';
case ZT_ENDPOINT_TYPE_IP_TCP: return 't';
case ZT_ENDPOINT_TYPE_IP_TCP_WS: return 'w';
}
}
static ZT_INLINE ZT_EndpointType s_charEndpointType(const char c)
{
switch (c) {
default: return ZT_ENDPOINT_TYPE_NIL;
case 'z': return ZT_ENDPOINT_TYPE_ZEROTIER;
case 'e': return ZT_ENDPOINT_TYPE_ETHERNET;
case 'd': return ZT_ENDPOINT_TYPE_WIFI_DIRECT;
case 'b': return ZT_ENDPOINT_TYPE_BLUETOOTH;
case 'i': return ZT_ENDPOINT_TYPE_IP;
case 'u': return ZT_ENDPOINT_TYPE_IP_UDP;
case 't': return ZT_ENDPOINT_TYPE_IP_TCP;
case 'w': return ZT_ENDPOINT_TYPE_IP_TCP_WS;
}
}
char *Endpoint::toString(char s[ZT_ENDPOINT_STRING_SIZE_MAX]) const noexcept
{
static_assert(ZT_ENDPOINT_STRING_SIZE_MAX > (ZT_INETADDRESS_STRING_SIZE_MAX + 4), "overflow");
static_assert(ZT_ENDPOINT_STRING_SIZE_MAX > (ZT_FINGERPRINT_STRING_SIZE_MAX + 4), "overflow");
s[0] = s_endpointTypeChar(this->type);
switch (this->type) {
default: // ZT_ENDPOINT_TYPE_NIL
s[1] = 0;
break;
case ZT_ENDPOINT_TYPE_ZEROTIER:
s[1] = '/';
zt().toString(s + 2);
break;
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH:
s[1] = '/';
eth().toString(s + 2);
break;
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
s[1] = '/';
ip().toString(s + 2);
break;
}
return s;
}
bool Endpoint::fromString(const char *s) noexcept
{
memoryZero(this);
if ((!s) || (!*s)) {
// Empty strings are considered NIL endpoints.
return true;
}
else if (s[1] == '/') {
// type/ADDRESS is a fully qualified endpoint.
this->type = s_charEndpointType(s[0]);
switch (this->type) {
case ZT_ENDPOINT_TYPE_NIL: break;
case ZT_ENDPOINT_TYPE_ZEROTIER:
if (!s[2])
return false;
break;
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH: {
if (!s[2])
return false;
MAC tmpmac;
tmpmac.fromString(s + 2);
this->value.mac = tmpmac.toInt();
} break;
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS: {
if (!s[2])
return false;
if (!asInetAddress(this->value.ss).fromString(s + 2))
return false;
} break;
}
}
else if (strchr(s, '/') != nullptr) {
// IP/port is parsed as an IP_UDP endpoint for backward compatibility.
this->type = ZT_ENDPOINT_TYPE_IP_UDP;
return asInetAddress(this->value.ss).fromString(s);
}
return false;
}
int Endpoint::marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const noexcept
{
switch (this->type) {
default: // ZT_ENDPOINT_TYPE_NIL
// NIL endpoints get serialized like NIL InetAddress instances.
data[0] = ZT_ENDPOINT_TYPE_NIL;
return 1;
case ZT_ENDPOINT_TYPE_ZEROTIER:
data[0] = 16 + ZT_ENDPOINT_TYPE_ZEROTIER;
Address(this->value.fp.address).copyTo(data + 1);
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(data + 1 + ZT_ADDRESS_LENGTH, this->value.fp.hash);
return 1 + ZT_ADDRESS_LENGTH + ZT_FINGERPRINT_HASH_SIZE;
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH:
data[0] = 16 + (uint8_t)this->type;
MAC(this->value.mac).copyTo(data + 1);
return 7;
case ZT_ENDPOINT_TYPE_IP_UDP:
// Default UDP mode gets serialized to look exactly like an InetAddress.
return asInetAddress(this->value.ss).marshal(data);
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
// Other IP types get serialized as new version Endpoint instances with type.
data[0] = 16 + (uint8_t)this->type;
return 1 + asInetAddress(this->value.ss).marshal(data + 1);
}
}
int Endpoint::unmarshal(const uint8_t *restrict data, int len) noexcept
{
memoryZero(this);
if (unlikely(len <= 0))
return -1;
// Serialized endpoints with type bytes less than 16 are passed through
// to the unmarshal method of InetAddress and considered UDP endpoints.
// This allows backward compatibility with old endpoint fields in the
// protocol that were serialized InetAddress instances.
if (data[0] < 16) {
switch (data[0]) {
case 0: return 1;
case 4:
case 6: this->type = ZT_ENDPOINT_TYPE_IP_UDP; return asInetAddress(this->value.ss).unmarshal(data, len);
}
return -1;
}
switch ((this->type = (ZT_EndpointType)(data[0] - 16))) {
case ZT_ENDPOINT_TYPE_NIL: return 1;
case ZT_ENDPOINT_TYPE_ZEROTIER:
if (len >= (1 + ZT_ADDRESS_LENGTH + ZT_FINGERPRINT_HASH_SIZE)) {
this->value.fp.address = Address(data + 1).toInt();
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(this->value.fp.hash, data + 1 + ZT_ADDRESS_LENGTH);
return 1 + ZT_ADDRESS_LENGTH + ZT_FINGERPRINT_HASH_SIZE;
}
return -1;
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH:
if (len >= 7) {
MAC tmp;
tmp.setTo(data + 1);
this->value.mac = tmp.toInt();
return 7;
}
return -1;
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS: return asInetAddress(this->value.ss).unmarshal(data + 1, len - 1);
default: break;
}
// Unrecognized types can still be passed over in a valid stream if they are
// prefixed by a 16-bit size. This allows forward compatibility with future
// endpoint types.
this->type = ZT_ENDPOINT_TYPE_NIL;
if (len < 3)
return -1;
const int unrecLen = 1 + (int)Utils::loadBigEndian<uint16_t>(data + 1);
return (unrecLen > len) ? -1 : unrecLen;
}
bool Endpoint::operator==(const Endpoint &ep) const noexcept
{
if (this->type == ep.type) {
switch (this->type) {
case ZT_ENDPOINT_TYPE_ZEROTIER: return zt() == ep.zt();
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH: return this->value.mac == ep.value.mac;
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS: return ip() == ep.ip();
default: return true;
}
}
return false;
}
bool Endpoint::operator<(const Endpoint &ep) const noexcept
{
if (this->type == ep.type) {
switch (this->type) {
case ZT_ENDPOINT_TYPE_ZEROTIER: return zt() < ep.zt();
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH: return this->value.mac < ep.value.mac;
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS: return ip() < ep.ip();
default: return true;
}
}
return (int)this->type < (int)ep.type;
}
} // namespace ZeroTier

View file

@ -1,223 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_ENDPOINT_HPP
#define ZT_ENDPOINT_HPP
#include "Address.hpp"
#include "Constants.hpp"
#include "Fingerprint.hpp"
#include "InetAddress.hpp"
#include "MAC.hpp"
#include "TriviallyCopyable.hpp"
#include "Utils.hpp"
#define ZT_ENDPOINT_STRING_SIZE_MAX 256
#define ZT_ENDPOINT_MARSHAL_SIZE_MAX 192
namespace ZeroTier {
static_assert((ZT_ENDPOINT_MARSHAL_SIZE_MAX - 1) > ZT_INETADDRESS_MARSHAL_SIZE_MAX, "ZT_ENDPOINT_MARSHAL_SIZE_MAX not large enough");
static_assert((ZT_ENDPOINT_MARSHAL_SIZE_MAX - 1) > sizeof(ZT_Fingerprint), "ZT_ENDPOINT_MARSHAL_SIZE_MAX not large enough");
static_assert((ZT_ENDPOINT_MARSHAL_SIZE_MAX - 1) > sizeof(InetAddress), "ZT_ENDPOINT_MARSHAL_SIZE_MAX not large enough");
static_assert((ZT_ENDPOINT_MARSHAL_SIZE_MAX - 1) > sizeof(MAC), "ZT_ENDPOINT_MARSHAL_SIZE_MAX not large enough");
static_assert((ZT_ENDPOINT_MARSHAL_SIZE_MAX - 1) > sizeof(Fingerprint), "ZT_ENDPOINT_MARSHAL_SIZE_MAX not large enough");
/**
* Endpoint variant specifying some form of network endpoint.
*
* This is sort of a superset of InetAddress and for the standard UDP
* protocol marshals and unmarshals to a compatible format. This makes
* it backward compatible with older node versions' protocol fields
* where InetAddress was used as long as only the UDP type is exchanged
* with those nodes.
*/
class Endpoint
: public ZT_Endpoint
, public TriviallyCopyable {
public:
/**
* Create a NIL/empty endpoint
*/
ZT_INLINE Endpoint() noexcept { memoryZero(this); }
ZT_INLINE Endpoint(const ZT_Endpoint &ep) noexcept { Utils::copy<sizeof(ZT_Endpoint)>((ZT_Endpoint *)this, &ep); }
/**
* Create an endpoint for a type that uses an IP
*
* @param a IP/port
* @param et Endpoint type (default: IP_UDP)
*/
ZT_INLINE Endpoint(const InetAddress &inaddr, const ZT_EndpointType et = ZT_ENDPOINT_TYPE_IP_UDP) noexcept
{
if (inaddr) {
this->type = et;
Utils::copy<sizeof(struct sockaddr_storage)>(&(this->value.ss), &(inaddr.as.ss));
}
else {
memoryZero(this);
}
}
/**
* Create an endpoint for ZeroTier relaying (ZEROTIER type)
*
* @param zt_ ZeroTier identity fingerprint
*/
ZT_INLINE Endpoint(const Fingerprint &zt_) noexcept
{
if (zt_) {
this->type = ZT_ENDPOINT_TYPE_ZEROTIER;
this->value.fp = zt_;
}
else {
memoryZero(this);
}
}
/**
* Create an endpoint for a type that uses a MAC address
*
* @param eth_ Ethernet address
* @param et Endpoint type (default: ETHERNET)
*/
ZT_INLINE Endpoint(const MAC &eth_, const ZT_EndpointType et = ZT_ENDPOINT_TYPE_ETHERNET) noexcept
{
if (eth_) {
this->type = et;
this->value.mac = eth_.toInt();
}
else {
memoryZero(this);
}
}
/**
* @return True if endpoint type isn't NIL
*/
ZT_INLINE operator bool() const noexcept { return this->type != ZT_ENDPOINT_TYPE_NIL; }
/**
* @return True if this endpoint type has an InetAddress address type and thus ip() is valid
*/
ZT_INLINE bool isInetAddr() const noexcept
{
switch (this->type) {
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS: return true;
default: return false;
}
}
/**
* Check whether this endpoint's address is the same as another.
*
* Right now this checks whether IPs are equal if both are IP based endpoints.
* Otherwise it checks for simple equality.
*
* @param ep Endpoint to check
* @return True if endpoints seem to refer to the same address/host
*/
ZT_INLINE bool isSameAddress(const Endpoint &ep) const noexcept
{
switch (this->type) {
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS:
switch (ep.type) {
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS: return ip().ipsEqual(ep.ip());
default: break;
}
break;
default: break;
}
return (*this) == ep;
}
/**
* Get InetAddress if this type uses IPv4 or IPv6 addresses (undefined otherwise)
*
* @return InetAddress instance
*/
ZT_INLINE const InetAddress &ip() const noexcept { return asInetAddress(this->value.ss); }
/**
* Get MAC if this is an Ethernet, WiFi direct, or Bluetooth type (undefined otherwise)
*
* @return Ethernet MAC
*/
ZT_INLINE MAC eth() const noexcept { return MAC(this->value.mac); }
/**
* Get fingerprint if this is a ZeroTier endpoint type (undefined otherwise)
*
* @return ZeroTier fingerprint
*/
ZT_INLINE Fingerprint zt() const noexcept { return Fingerprint(this->value.fp); }
ZT_INLINE unsigned long hashCode() const noexcept
{
switch (this->type) {
default: return 1;
case ZT_ENDPOINT_TYPE_ZEROTIER: return (unsigned long)this->value.fp.address;
case ZT_ENDPOINT_TYPE_ETHERNET:
case ZT_ENDPOINT_TYPE_WIFI_DIRECT:
case ZT_ENDPOINT_TYPE_BLUETOOTH: return (unsigned long)Utils::hash64(this->value.mac);
case ZT_ENDPOINT_TYPE_IP:
case ZT_ENDPOINT_TYPE_IP_UDP:
case ZT_ENDPOINT_TYPE_IP_TCP:
case ZT_ENDPOINT_TYPE_IP_TCP_WS: return ip().hashCode();
}
}
char *toString(char s[ZT_ENDPOINT_STRING_SIZE_MAX]) const noexcept;
ZT_INLINE String toString() const
{
char tmp[ZT_ENDPOINT_STRING_SIZE_MAX];
return String(toString(tmp));
}
bool fromString(const char *s) noexcept;
static constexpr int marshalSizeMax() noexcept { return ZT_ENDPOINT_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const noexcept;
int unmarshal(const uint8_t *restrict data, int len) noexcept;
bool operator==(const Endpoint &ep) const noexcept;
ZT_INLINE bool operator!=(const Endpoint &ep) const noexcept { return !((*this) == ep); }
bool operator<(const Endpoint &ep) const noexcept;
ZT_INLINE bool operator>(const Endpoint &ep) const noexcept { return (ep < *this); }
ZT_INLINE bool operator<=(const Endpoint &ep) const noexcept { return !(ep < *this); }
ZT_INLINE bool operator>=(const Endpoint &ep) const noexcept { return !(*this < ep); }
};
static_assert(sizeof(Endpoint) == sizeof(ZT_Endpoint), "size mismatch");
} // namespace ZeroTier
#endif

View file

@ -1,71 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_EXPECT_HPP
#define ZT_EXPECT_HPP
#include "Constants.hpp"
#include "Utils.hpp"
/**
* Number of buckets to use to maintain a list of expected replies.
*
* This must be a power of two. Memory consumed will be about this*4 bytes.
*/
#define ZT_EXPECT_BUCKETS 32768
/**
* 1/2 the TTL for expected replies in milliseconds
*
* This must be a power of two.
*/
#define ZT_EXPECT_TTL 4096LL
namespace ZeroTier {
/**
* Tracker for expected OK replies to packet IDs of sent packets
*/
class Expect {
public:
ZT_INLINE Expect() : m_packetIdSent() {}
/**
* Called by other code when something is sending a packet that could potentially receive an OK response
*
* @param packetId Packet ID of packet being sent (be sure it's post-armor())
* @param now Current time
*/
ZT_INLINE void sending(const uint64_t packetId, const int64_t now) noexcept { m_packetIdSent[Utils::hash64(packetId ^ Utils::s_mapNonce) % ZT_EXPECT_BUCKETS] = (uint32_t)(now / ZT_EXPECT_TTL); }
/**
* Check if an OK is expected and if so reset the corresponding bucket.
*
* This means this call mutates the state. If it returns true, it will
* subsequently return false. This is to filter OKs against replays or
* responses to queries we did not send.
*
* @param inRePacketId In-re packet ID we're expecting
* @param now Current time
* @return True if we're expecting a reply (and a reset occurred)
*/
ZT_INLINE bool expecting(const uint64_t inRePacketId, const int64_t now) noexcept { return (((now / ZT_EXPECT_TTL) - (int64_t)m_packetIdSent[(unsigned long)Utils::hash64(inRePacketId ^ Utils::s_mapNonce) % ZT_EXPECT_BUCKETS].exchange(0)) <= 1); }
private:
// Each bucket contains a timestamp in units of the max expect duration.
std::atomic<uint32_t> m_packetIdSent[ZT_EXPECT_BUCKETS];
};
} // namespace ZeroTier
#endif

View file

@ -1,284 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_FCV_HPP
#define ZT_FCV_HPP
#include "Constants.hpp"
#include "Utils.hpp"
#include <algorithm>
#include <iterator>
#include <memory>
#include <stdexcept>
namespace ZeroTier {
/**
* FCV is a Fixed Capacity Vector
*
* This doesn't implement everything in std::vector, just what we need. It
* also adds a few special things for use in ZT core code.
*
* @tparam T Type to contain
* @tparam C Maximum capacity of vector
*/
template <typename T, unsigned int C> class FCV {
public:
typedef T *iterator;
typedef const T *const_iterator;
ZT_INLINE FCV() noexcept : _s(0) {}
ZT_INLINE FCV(const FCV &v) : _s(0) { *this = v; }
ZT_INLINE FCV(const T *const contents, const unsigned int len) : _s(len)
{
const unsigned int l = std::min(len, C);
for (unsigned int i = 0; i < l; ++i)
new (reinterpret_cast<T *>(_m) + i) T(contents[i]);
}
template <typename I> ZT_INLINE FCV(I i, I end) : _s(0)
{
while (i != end) {
push_back(*i);
++i;
}
}
ZT_INLINE ~FCV() { this->clear(); }
ZT_INLINE FCV &operator=(const FCV &v)
{
if (likely(&v != this)) {
this->clear();
const unsigned int s = v._s;
_s = s;
for (unsigned int i = 0; i < s; ++i)
new (reinterpret_cast<T *>(_m) + i) T(*(reinterpret_cast<const T *>(v._m) + i));
}
return *this;
}
/**
* Clear this vector, destroying all content objects
*/
ZT_INLINE void clear()
{
const unsigned int s = _s;
_s = 0;
for (unsigned int i = 0; i < s; ++i)
(reinterpret_cast<T *>(_m) + i)->~T();
}
/**
* Move contents from this vector to another and clear this vector.
*
* @param v Target vector
*/
ZT_INLINE void unsafeMoveTo(FCV &v) noexcept
{
Utils::copy(v._m, _m, (v._s = _s) * sizeof(T));
_s = 0;
}
ZT_INLINE iterator begin() noexcept { return reinterpret_cast<T *>(_m); }
ZT_INLINE iterator end() noexcept { return reinterpret_cast<T *>(_m) + _s; }
ZT_INLINE const_iterator begin() const noexcept { return reinterpret_cast<const T *>(_m); }
ZT_INLINE const_iterator end() const noexcept { return reinterpret_cast<const T *>(_m) + _s; }
ZT_INLINE T &operator[](const unsigned int i)
{
if (likely(i < _s))
return reinterpret_cast<T *>(_m)[i];
throw Utils::OutOfRangeException;
}
ZT_INLINE const T &operator[](const unsigned int i) const
{
if (likely(i < _s))
return reinterpret_cast<const T *>(_m)[i];
throw Utils::OutOfRangeException;
}
static constexpr unsigned int capacity() noexcept { return C; }
ZT_INLINE unsigned int size() const noexcept { return _s; }
ZT_INLINE bool empty() const noexcept { return (_s == 0); }
ZT_INLINE T *data() noexcept { return reinterpret_cast<T *>(_m); }
ZT_INLINE const T *data() const noexcept { return reinterpret_cast<const T *>(_m); }
/**
* Push a value onto the back of this vector
*
* If the vector is at capacity this silently fails.
*
* @param v Value to push
*/
ZT_INLINE void push_back(const T &v)
{
if (likely(_s < C))
new (reinterpret_cast<T *>(_m) + _s++) T(v);
else
throw Utils::OutOfRangeException;
}
/**
* Push new default value or return last in vector if full.
*
* @return Reference to new item
*/
ZT_INLINE T &push()
{
if (likely(_s < C)) {
return *(new (reinterpret_cast<T *>(_m) + _s++) T());
}
else {
return *(reinterpret_cast<T *>(_m) + (C - 1));
}
}
/**
* Push new default value or replace and return last in vector if full.
*
* @return Reference to new item
*/
ZT_INLINE T &push(const T &v)
{
if (likely(_s < C)) {
return *(new (reinterpret_cast<T *>(_m) + _s++) T(v));
}
else {
T &tmp = *(reinterpret_cast<T *>(_m) + (C - 1));
tmp = v;
return tmp;
}
}
/**
* Remove the last element if this vector is not empty
*/
ZT_INLINE void pop_back()
{
if (likely(_s != 0))
(reinterpret_cast<T *>(_m) + --_s)->~T();
}
/**
* Resize vector
*
* @param ns New size (clipped to C if larger than capacity)
*/
ZT_INLINE void resize(unsigned int ns)
{
if (unlikely(ns > C))
throw Utils::OutOfRangeException;
unsigned int s = _s;
while (s < ns)
new (reinterpret_cast<T *>(_m) + s++) T();
while (s > ns)
(reinterpret_cast<T *>(_m) + --s)->~T();
_s = s;
}
/**
* Set the size of this vector without otherwise changing anything
*
* @param ns New size
*/
ZT_INLINE void unsafeSetSize(unsigned int ns) { _s = ns; }
/**
* This is a bounds checked auto-resizing variant of the [] operator
*
* If 'i' is out of bounds vs the current size of the vector, the vector is
* resized. If that size would exceed C (capacity), 'i' is clipped to C-1.
*
* @param i Index to obtain as a reference, resizing if needed
* @return Reference to value at this index
*/
ZT_INLINE T &at(unsigned int i)
{
if (i >= _s) {
if (unlikely(i >= C))
i = C - 1;
do {
new (reinterpret_cast<T *>(_m) + _s++) T();
} while (i >= _s);
}
return *(reinterpret_cast<T *>(_m) + i);
}
/**
* Assign this vector's contents from a range of pointers or iterators
*
* If the range is larger than C it is truncated at C.
*
* @tparam X Inferred type of interators or pointers
* @param start Starting iterator
* @param end Ending iterator (must be greater than start)
*/
template <typename X> ZT_INLINE void assign(X start, const X &end)
{
const int l = std::min((int)std::distance(start, end), (int)C);
if (l > 0) {
this->resize((unsigned int)l);
for (int i = 0; i < l; ++i)
reinterpret_cast<T *>(_m)[i] = *(start++);
}
else {
this->clear();
}
}
ZT_INLINE bool operator==(const FCV &v) const noexcept
{
if (_s == v._s) {
for (unsigned int i = 0; i < _s; ++i) {
if (!(*(reinterpret_cast<const T *>(_m) + i) == *(reinterpret_cast<const T *>(v._m) + i)))
return false;
}
return true;
}
return false;
}
ZT_INLINE bool operator!=(const FCV &v) const noexcept { return *this != v; }
ZT_INLINE bool operator<(const FCV &v) const noexcept { return std::lexicographical_compare(begin(), end(), v.begin(), v.end()); }
ZT_INLINE bool operator>(const FCV &v) const noexcept { return (v < *this); }
ZT_INLINE bool operator<=(const FCV &v) const noexcept { return v >= *this; }
ZT_INLINE bool operator>=(const FCV &v) const noexcept { return *this >= v; }
private:
#ifdef _MSC_VER
uint8_t _m[sizeof(T) * C];
#else
__attribute__((aligned(16))) uint8_t _m[sizeof(T) * C];
#endif
unsigned int _s;
};
} // namespace ZeroTier
#endif

View file

@ -1,135 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_FINGERPRINT_HPP
#define ZT_FINGERPRINT_HPP
#include "Address.hpp"
#include "Constants.hpp"
#include "TriviallyCopyable.hpp"
#include "Utils.hpp"
#define ZT_FINGERPRINT_STRING_SIZE_MAX 128
#define ZT_FINGERPRINT_MARSHAL_SIZE 53
namespace ZeroTier {
/**
* Address and full hash of an identity's public keys.
*
* This is the same size as ZT_Fingerprint and should be cast-able back and forth.
* This is checked in Tests.cpp.
*/
class Fingerprint
: public ZT_Fingerprint
, public TriviallyCopyable {
public:
ZT_INLINE Fingerprint() noexcept { memoryZero(this); }
ZT_INLINE Fingerprint(const ZT_Fingerprint &fp) noexcept { Utils::copy<sizeof(ZT_Fingerprint)>(this, &fp); }
/**
* @return True if hash is not all zero (missing/unspecified)
*/
ZT_INLINE bool haveHash() const noexcept { return (!Utils::allZero(this->hash, ZT_FINGERPRINT_HASH_SIZE)); }
/**
* Get a base32-encoded representation of this fingerprint
*
* @param s Base32 string
*/
ZT_INLINE char *toString(char s[ZT_FINGERPRINT_STRING_SIZE_MAX]) const noexcept
{
Address(this->address).toString(s);
if (haveHash()) {
s[ZT_ADDRESS_LENGTH_HEX] = '-';
Utils::b32e(this->hash, ZT_FINGERPRINT_HASH_SIZE, s + (ZT_ADDRESS_LENGTH_HEX + 1), ZT_FINGERPRINT_STRING_SIZE_MAX - (ZT_ADDRESS_LENGTH_HEX + 1));
}
return s;
}
ZT_INLINE String toString() const
{
char tmp[ZT_FINGERPRINT_STRING_SIZE_MAX];
return String(toString(tmp));
}
/**
* Set this fingerprint to a base32-encoded string
*
* @param s String to decode
* @return True if string appears to be valid and of the proper length (no other checking is done)
*/
ZT_INLINE bool fromString(const char *const s) noexcept
{
if (!s)
return false;
const int l = (int)strlen(s);
if (l < ZT_ADDRESS_LENGTH_HEX)
return false;
char a[ZT_ADDRESS_LENGTH_HEX + 1];
Utils::copy<ZT_ADDRESS_LENGTH_HEX>(a, s);
a[ZT_ADDRESS_LENGTH_HEX] = 0;
this->address = Utils::hexStrToU64(a) & ZT_ADDRESS_MASK;
if (l > (ZT_ADDRESS_LENGTH_HEX + 1)) {
if (Utils::b32d(s + (ZT_ADDRESS_LENGTH_HEX + 1), this->hash, ZT_FINGERPRINT_HASH_SIZE) != ZT_FINGERPRINT_HASH_SIZE)
return false;
}
else {
Utils::zero<ZT_FINGERPRINT_HASH_SIZE>(this->hash);
}
return true;
}
ZT_INLINE void zero() noexcept { memoryZero(this); }
ZT_INLINE unsigned long hashCode() const noexcept { return (unsigned long)this->address; }
ZT_INLINE operator bool() const noexcept { return this->address != 0; }
static constexpr int marshalSizeMax() noexcept { return ZT_FINGERPRINT_MARSHAL_SIZE; }
ZT_INLINE int marshal(uint8_t data[ZT_FINGERPRINT_MARSHAL_SIZE]) const noexcept
{
Address(this->address).copyTo(data);
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(data + ZT_ADDRESS_LENGTH, this->hash);
return ZT_FINGERPRINT_MARSHAL_SIZE;
}
ZT_INLINE int unmarshal(const uint8_t *const data, int len) noexcept
{
if (unlikely(len < ZT_FINGERPRINT_MARSHAL_SIZE))
return -1;
this->address = Address(data);
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(hash, data + ZT_ADDRESS_LENGTH);
return ZT_FINGERPRINT_MARSHAL_SIZE;
}
ZT_INLINE bool operator==(const ZT_Fingerprint &h) const noexcept { return ((this->address == h.address) && (memcmp(this->hash, h.hash, ZT_FINGERPRINT_HASH_SIZE) == 0)); }
ZT_INLINE bool operator!=(const ZT_Fingerprint &h) const noexcept { return !(*this == h); }
ZT_INLINE bool operator<(const ZT_Fingerprint &h) const noexcept { return ((this->address < h.address) || ((this->address == h.address) && (memcmp(this->hash, h.hash, ZT_FINGERPRINT_HASH_SIZE) < 0))); }
ZT_INLINE bool operator>(const ZT_Fingerprint &h) const noexcept { return (*reinterpret_cast<const Fingerprint *>(&h) < *this); }
ZT_INLINE bool operator<=(const ZT_Fingerprint &h) const noexcept { return !(*reinterpret_cast<const Fingerprint *>(&h) < *this); }
ZT_INLINE bool operator>=(const ZT_Fingerprint &h) const noexcept { return !(*this < h); }
};
static_assert(sizeof(Fingerprint) == sizeof(ZT_Fingerprint), "size mismatch");
} // namespace ZeroTier
#endif

View file

@ -1,484 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "Identity.hpp"
#include "Constants.hpp"
#include "Endpoint.hpp"
#include "MIMC52.hpp"
#include "SHA512.hpp"
#include "Salsa20.hpp"
#include "Utils.hpp"
#include <memory>
#include <utility>
namespace ZeroTier {
namespace {
// This is the memory-intensive hash function used to compute v0 identities from v0 public keys.
#define ZT_V0_IDENTITY_GEN_MEMORY 2097152
void identityV0ProofOfWorkFrankenhash(const void *const restrict c25519CombinedPublicKey, void *const restrict digest, void *const restrict genmem) noexcept
{
// Digest publicKey[] to obtain initial digest
SHA512(digest, c25519CombinedPublicKey, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE);
// Initialize genmem[] using Salsa20 in a CBC-like configuration since
// ordinary Salsa20 is randomly seek-able. This is good for a cipher
// but is not what we want for sequential memory-hardness.
Utils::zero<ZT_V0_IDENTITY_GEN_MEMORY>(genmem);
Salsa20 s20(digest, (char *)digest + 32);
s20.crypt20((char *)genmem, (char *)genmem, 64);
for (unsigned long i = 64; i < ZT_V0_IDENTITY_GEN_MEMORY; i += 64) {
unsigned long k = i - 64;
*((uint64_t *)((char *)genmem + i)) = *((uint64_t *)((char *)genmem + k));
*((uint64_t *)((char *)genmem + i + 8)) = *((uint64_t *)((char *)genmem + k + 8));
*((uint64_t *)((char *)genmem + i + 16)) = *((uint64_t *)((char *)genmem + k + 16));
*((uint64_t *)((char *)genmem + i + 24)) = *((uint64_t *)((char *)genmem + k + 24));
*((uint64_t *)((char *)genmem + i + 32)) = *((uint64_t *)((char *)genmem + k + 32));
*((uint64_t *)((char *)genmem + i + 40)) = *((uint64_t *)((char *)genmem + k + 40));
*((uint64_t *)((char *)genmem + i + 48)) = *((uint64_t *)((char *)genmem + k + 48));
*((uint64_t *)((char *)genmem + i + 56)) = *((uint64_t *)((char *)genmem + k + 56));
s20.crypt20((char *)genmem + i, (char *)genmem + i, 64);
}
// Render final digest using genmem as a lookup table
for (unsigned long i = 0; i < (ZT_V0_IDENTITY_GEN_MEMORY / sizeof(uint64_t));) {
unsigned long idx1 = (unsigned long)(Utils::ntoh(((uint64_t *)genmem)[i++]) % (64 / sizeof(uint64_t)));
unsigned long idx2 = (unsigned long)(Utils::ntoh(((uint64_t *)genmem)[i++]) % (ZT_V0_IDENTITY_GEN_MEMORY / sizeof(uint64_t)));
uint64_t tmp = ((uint64_t *)genmem)[idx2];
((uint64_t *)genmem)[idx2] = ((uint64_t *)digest)[idx1];
((uint64_t *)digest)[idx1] = tmp;
s20.crypt20(digest, digest, 64);
}
}
struct identityV0ProofOfWorkCriteria {
ZT_INLINE identityV0ProofOfWorkCriteria(unsigned char *restrict sb, char *restrict gm) noexcept : digest(sb), genmem(gm) {}
ZT_INLINE bool operator()(const uint8_t pub[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE]) const noexcept
{
identityV0ProofOfWorkFrankenhash(pub, digest, genmem);
return (digest[0] < 17);
}
unsigned char *restrict digest;
char *restrict genmem;
};
void v1ChallengeFromPub(const uint8_t pub[ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE], uint64_t challenge[4])
{
// This builds a 256-bit challenge by XORing the two public keys together. This doesn't need to be
// a hash, just different for different public keys. Public keys are basically kind of hashes of
// private keys, so that's good enough. This is only used to seed a PRNG in MIMC52 for a proof of
// sequential work. It's not used for authentication beyond checking PoW.
Utils::copy<32>(challenge, pub + 7);
challenge[0] ^= Utils::loadMachineEndian<uint64_t>(pub + 40);
challenge[1] ^= Utils::loadMachineEndian<uint64_t>(pub + 48);
challenge[2] ^= Utils::loadMachineEndian<uint64_t>(pub + 56);
challenge[3] ^= Utils::loadMachineEndian<uint64_t>(pub + 64);
challenge[0] ^= Utils::loadMachineEndian<uint64_t>(pub + 72);
challenge[1] ^= Utils::loadMachineEndian<uint64_t>(pub + 80);
challenge[2] ^= Utils::loadMachineEndian<uint64_t>(pub + 88);
challenge[3] ^= Utils::loadMachineEndian<uint64_t>(pub + 96);
challenge[0] ^= Utils::loadMachineEndian<uint64_t>(pub + 104);
challenge[1] ^= Utils::loadMachineEndian<uint64_t>(pub + 112);
}
} // anonymous namespace
const Identity Identity::NIL;
bool Identity::generate(const Type t)
{
m_type = t;
m_hasPrivate = true;
switch (t) {
case C25519: {
// Generate C25519/Ed25519 key pair whose hash satisfies a "hashcash" criterion and generate the
// address from the last 40 bits of this hash. This is different from the fingerprint hash for V0.
uint8_t digest[64];
char *const genmem = new char[ZT_V0_IDENTITY_GEN_MEMORY];
Address address;
do {
C25519::generateSatisfying(identityV0ProofOfWorkCriteria(digest, genmem), m_pub, m_priv);
address.setTo(digest + 59);
} while (address.isReserved());
delete[] genmem;
m_fp.address = address; // address comes from PoW hash for type 0 identities
m_computeHash();
} break;
case P384:
for (;;) {
C25519::generateCombined(m_pub + 7, m_priv);
ECC384GenerateKey(m_pub + 7 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE, m_priv + ZT_C25519_COMBINED_PRIVATE_KEY_SIZE);
uint64_t challenge[4];
v1ChallengeFromPub(m_pub, challenge);
const uint64_t proof = MIMC52::delay(reinterpret_cast<const uint8_t *>(challenge), ZT_IDENTITY_TYPE1_MIMC52_ROUNDS);
m_pub[0] = (uint8_t)(proof >> 48U);
m_pub[1] = (uint8_t)(proof >> 40U);
m_pub[2] = (uint8_t)(proof >> 32U);
m_pub[3] = (uint8_t)(proof >> 24U);
m_pub[4] = (uint8_t)(proof >> 16U);
m_pub[5] = (uint8_t)(proof >> 8U);
m_pub[6] = (uint8_t)proof;
m_computeHash();
const Address addr(m_fp.hash);
if (!addr.isReserved()) {
m_fp.address = addr;
break;
}
}
break;
default: return false;
}
return true;
}
bool Identity::locallyValidate() const noexcept
{
try {
if ((m_fp) && ((!Address(m_fp.address).isReserved()))) {
switch (m_type) {
case C25519: {
uint8_t digest[64];
char *const genmem = (char *)malloc(ZT_V0_IDENTITY_GEN_MEMORY);
if (!genmem)
return false;
identityV0ProofOfWorkFrankenhash(m_pub, digest, genmem);
free(genmem);
return ((Address(digest + 59) == m_fp.address) && (digest[0] < 17));
}
case P384:
if (Address(m_fp.hash) == m_fp.address) {
uint64_t challenge[4];
v1ChallengeFromPub(m_pub, challenge);
return MIMC52::verify(reinterpret_cast<const uint8_t *>(challenge), ZT_IDENTITY_TYPE1_MIMC52_ROUNDS, ((uint64_t)m_pub[0] << 48U) | ((uint64_t)m_pub[1] << 40U) | ((uint64_t)m_pub[2] << 32U) | ((uint64_t)m_pub[3] << 24U) | ((uint64_t)m_pub[4] << 16U) | ((uint64_t)m_pub[5] << 8U) | (uint64_t)m_pub[6]);
}
return false;
}
}
}
catch (...) {
}
return false;
}
void Identity::hashWithPrivate(uint8_t h[ZT_FINGERPRINT_HASH_SIZE]) const
{
if (m_hasPrivate) {
switch (m_type) {
case C25519: SHA384(h, m_pub, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE, m_priv, ZT_C25519_COMBINED_PRIVATE_KEY_SIZE); return;
case P384: SHA384(h, m_pub, sizeof(m_pub), m_priv, sizeof(m_priv)); return;
}
}
Utils::zero<ZT_FINGERPRINT_HASH_SIZE>(h);
}
unsigned int Identity::sign(const void *data, unsigned int len, void *sig, unsigned int siglen) const
{
if (m_hasPrivate) {
switch (m_type) {
case C25519:
if (siglen >= ZT_C25519_SIGNATURE_LEN) {
C25519::sign(m_priv, m_pub, data, len, sig);
return ZT_C25519_SIGNATURE_LEN;
}
break;
case P384:
if (siglen >= ZT_ECC384_SIGNATURE_SIZE) {
static_assert(ZT_ECC384_SIGNATURE_HASH_SIZE == ZT_SHA384_DIGEST_SIZE, "weird!");
uint8_t h[ZT_ECC384_SIGNATURE_HASH_SIZE];
SHA384(h, data, len, m_pub, ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE);
ECC384ECDSASign(m_priv + ZT_C25519_COMBINED_PRIVATE_KEY_SIZE, h, (uint8_t *)sig);
return ZT_ECC384_SIGNATURE_SIZE;
}
break;
}
}
return 0;
}
bool Identity::verify(const void *data, unsigned int len, const void *sig, unsigned int siglen) const
{
switch (m_type) {
case C25519: return C25519::verify(m_pub, data, len, sig, siglen);
case P384:
if (siglen == ZT_ECC384_SIGNATURE_SIZE) {
uint8_t h[ZT_ECC384_SIGNATURE_HASH_SIZE];
SHA384(h, data, len, m_pub, ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE);
return ECC384ECDSAVerify(m_pub + 7 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE, h, (const uint8_t *)sig);
}
break;
}
return false;
}
bool Identity::agree(const Identity &id, uint8_t key[ZT_SYMMETRIC_KEY_SIZE]) const
{
uint8_t rawkey[128], h[64];
if (m_hasPrivate) {
if ((m_type == C25519) || (id.m_type == C25519)) {
// If we are a C25519 key we can agree with another C25519 key or with only the
// C25519 portion of a type 1 P-384 key.
C25519::agree(m_priv, id.m_pub, rawkey);
SHA512(h, rawkey, ZT_C25519_ECDH_SHARED_SECRET_SIZE);
Utils::copy<ZT_SYMMETRIC_KEY_SIZE>(key, h);
return true;
}
else if ((m_type == P384) && (id.m_type == P384)) {
// For another P384 identity we execute DH agreement with BOTH keys and then
// hash the results together. For those (cough FIPS cough) who only consider
// P384 to be kosher, the C25519 secret can be considered a "salt"
// or something. For those who don't trust P384 this means the privacy of
// your traffic is also protected by C25519.
C25519::agree(m_priv, id.m_pub, rawkey);
ECC384ECDH(id.m_pub + 7 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE, m_priv + ZT_C25519_COMBINED_PRIVATE_KEY_SIZE, rawkey + ZT_C25519_ECDH_SHARED_SECRET_SIZE);
SHA384(key, rawkey, ZT_C25519_ECDH_SHARED_SECRET_SIZE + ZT_ECC384_SHARED_SECRET_SIZE);
return true;
}
}
return false;
}
char *Identity::toString(bool includePrivate, char buf[ZT_IDENTITY_STRING_BUFFER_LENGTH]) const
{
char *p = buf;
Address(m_fp.address).toString(p);
p += 10;
*(p++) = ':';
switch (m_type) {
case C25519: {
*(p++) = '0';
*(p++) = ':';
Utils::hex(m_pub, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE, p);
p += ZT_C25519_COMBINED_PUBLIC_KEY_SIZE * 2;
if ((m_hasPrivate) && (includePrivate)) {
*(p++) = ':';
Utils::hex(m_priv, ZT_C25519_COMBINED_PRIVATE_KEY_SIZE, p);
p += ZT_C25519_COMBINED_PRIVATE_KEY_SIZE * 2;
}
*p = (char)0;
return buf;
}
case P384: {
*(p++) = '1';
*(p++) = ':';
int el = Utils::b32e(m_pub, sizeof(m_pub), p, (int)(ZT_IDENTITY_STRING_BUFFER_LENGTH - (uintptr_t)(p - buf)));
if (el <= 0)
return nullptr;
p += el;
if ((m_hasPrivate) && (includePrivate)) {
*(p++) = ':';
el = Utils::b32e(m_priv, sizeof(m_priv), p, (int)(ZT_IDENTITY_STRING_BUFFER_LENGTH - (uintptr_t)(p - buf)));
if (el <= 0)
return nullptr;
p += el;
}
*p = (char)0;
return buf;
}
default: buf[0] = 0;
}
return nullptr;
}
bool Identity::fromString(const char *str)
{
char tmp[ZT_IDENTITY_STRING_BUFFER_LENGTH];
memoryZero(this);
if ((!str) || (!Utils::scopy(tmp, sizeof(tmp), str)))
return false;
int fno = 0;
char *saveptr = nullptr;
for (char *f = Utils::stok(tmp, ":", &saveptr); ((f) && (fno < 4)); f = Utils::stok(nullptr, ":", &saveptr)) {
switch (fno++) {
case 0:
m_fp.address = Utils::hexStrToU64(f) & ZT_ADDRESS_MASK;
if (Address(m_fp.address).isReserved())
return false;
break;
case 1:
if ((f[0] == '0') && (!f[1])) {
m_type = C25519;
}
else if ((f[0] == '1') && (!f[1])) {
m_type = P384;
}
else {
return false;
}
break;
case 2:
switch (m_type) {
case C25519:
if (Utils::unhex(f, strlen(f), m_pub, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE) != ZT_C25519_COMBINED_PUBLIC_KEY_SIZE)
return false;
break;
case P384:
if (Utils::b32d(f, m_pub, sizeof(m_pub)) != sizeof(m_pub))
return false;
break;
}
break;
case 3:
if (strlen(f) > 1) {
switch (m_type) {
case C25519:
if (Utils::unhex(f, strlen(f), m_priv, ZT_C25519_COMBINED_PRIVATE_KEY_SIZE) != ZT_C25519_COMBINED_PRIVATE_KEY_SIZE) {
return false;
}
else {
m_hasPrivate = true;
}
break;
case P384:
if (Utils::b32d(f, m_priv, sizeof(m_priv)) != sizeof(m_priv)) {
return false;
}
else {
m_hasPrivate = true;
}
break;
}
break;
}
}
}
if (fno < 3)
return false;
m_computeHash();
return !((m_type == P384) && (Address(m_fp.hash) != m_fp.address));
}
int Identity::marshal(uint8_t data[ZT_IDENTITY_MARSHAL_SIZE_MAX], const bool includePrivate) const noexcept
{
Address(m_fp.address).copyTo(data);
switch (m_type) {
case C25519:
data[ZT_ADDRESS_LENGTH] = (uint8_t)C25519;
Utils::copy<ZT_C25519_COMBINED_PUBLIC_KEY_SIZE>(data + ZT_ADDRESS_LENGTH + 1, m_pub);
if ((includePrivate) && (m_hasPrivate)) {
data[ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE] = ZT_C25519_COMBINED_PRIVATE_KEY_SIZE;
Utils::copy<ZT_C25519_COMBINED_PRIVATE_KEY_SIZE>(data + ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1, m_priv);
return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1 + ZT_C25519_COMBINED_PRIVATE_KEY_SIZE;
}
data[ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE] = 0;
return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1;
case P384:
data[ZT_ADDRESS_LENGTH] = (uint8_t)P384;
Utils::copy<ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE>(data + ZT_ADDRESS_LENGTH + 1, m_pub);
if ((includePrivate) && (m_hasPrivate)) {
data[ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE] = ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE;
Utils::copy<ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE>(data + ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1, m_priv);
return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1 + ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE;
}
data[ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE] = 0;
return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1;
}
return -1;
}
int Identity::unmarshal(const uint8_t *data, const int len) noexcept
{
memoryZero(this);
if (len < (1 + ZT_ADDRESS_LENGTH))
return -1;
m_fp.address = Address(data);
unsigned int privlen;
switch ((m_type = (Type)data[ZT_ADDRESS_LENGTH])) {
case C25519:
if (len < (ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1))
return -1;
Utils::copy<ZT_C25519_COMBINED_PUBLIC_KEY_SIZE>(m_pub, data + ZT_ADDRESS_LENGTH + 1);
m_computeHash();
privlen = data[ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE];
if (privlen == ZT_C25519_COMBINED_PRIVATE_KEY_SIZE) {
if (len < (ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1 + ZT_C25519_COMBINED_PRIVATE_KEY_SIZE))
return -1;
m_hasPrivate = true;
Utils::copy<ZT_C25519_COMBINED_PRIVATE_KEY_SIZE>(m_priv, data + ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1);
return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1 + ZT_C25519_COMBINED_PRIVATE_KEY_SIZE;
}
else if (privlen == 0) {
m_hasPrivate = false;
return ZT_ADDRESS_LENGTH + 1 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + 1;
}
break;
case P384:
if (len < (ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1))
return -1;
Utils::copy<ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE>(m_pub, data + ZT_ADDRESS_LENGTH + 1);
m_computeHash(); // this sets the address for P384
if (Address(m_fp.hash) != m_fp.address) // this sanity check is possible with V1 identities
return -1;
privlen = data[ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE];
if (privlen == 0) {
m_hasPrivate = false;
return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1;
}
else if (privlen == ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE) {
if (len < (ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1 + ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE))
return -1;
m_hasPrivate = true;
Utils::copy<ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE>(&m_priv, data + ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1);
return ZT_ADDRESS_LENGTH + 1 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + 1 + ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE;
}
break;
}
return -1;
}
void Identity::m_computeHash()
{
switch (m_type) {
default: m_fp.zero(); break;
case C25519: SHA384(m_fp.hash, m_pub, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE); break;
case P384: SHA384(m_fp.hash, m_pub, ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE); break;
}
}
} // namespace ZeroTier

View file

@ -1,251 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_IDENTITY_HPP
#define ZT_IDENTITY_HPP
#include "Address.hpp"
#include "C25519.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "ECC384.hpp"
#include "Fingerprint.hpp"
#include "SHA512.hpp"
#include "TriviallyCopyable.hpp"
#include "Utils.hpp"
#define ZT_IDENTITY_STRING_BUFFER_LENGTH 1024
#define ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE (7 + ZT_C25519_COMBINED_PUBLIC_KEY_SIZE + ZT_ECC384_PUBLIC_KEY_SIZE)
#define ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE (ZT_C25519_COMBINED_PRIVATE_KEY_SIZE + ZT_ECC384_PRIVATE_KEY_SIZE)
#define ZT_IDENTITY_MARSHAL_SIZE_MAX (ZT_ADDRESS_LENGTH + 4 + ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE + ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE)
#define ZT_IDENTITY_TYPE1_MIMC52_ROUNDS 262144
namespace ZeroTier {
/**
* A ZeroTier identity
*
* Identities currently come in two types: type 0 identities based on just Curve25519
* and Ed25519 and type 1 identities that include both a 25519 key pair and a NIST P-384
* key pair. Type 1 identities use P-384 for signatures but use both key pairs at once
* (hashing both keys together) for key agreement with other type 1 identities, and can
* agree with type 0 identities using only Curve25519.
*
* Type 1 identities are better in many ways but type 0 will remain the default until
* 1.x nodes are pretty much dead in the wild.
*/
class Identity : public TriviallyCopyable {
public:
/**
* Identity type -- numeric values of these enums are protocol constants
*/
enum Type {
C25519 = ZT_IDENTITY_TYPE_C25519, // Type 0 -- Curve25519 and Ed25519 (1.x and 2.x, default)
P384 = ZT_IDENTITY_TYPE_P384 // Type 1 -- NIST P-384 with linked Curve25519/Ed25519 secondaries (2.x+)
};
/**
* A nil/empty identity instance
*/
static const Identity NIL;
ZT_INLINE Identity() noexcept { memoryZero(this); }
ZT_INLINE Identity(const Identity &id) noexcept { Utils::copy<sizeof(Identity)>(this, &id); }
/**
* Construct identity from string
*
* If the identity is not basically valid (no deep checking is done) the result will
* be a null identity.
*
* @param str Identity in canonical string format
*/
explicit ZT_INLINE Identity(const char *str) { fromString(str); }
ZT_INLINE ~Identity() { Utils::burn(reinterpret_cast<void *>(&this->m_priv), sizeof(this->m_priv)); }
ZT_INLINE Identity &operator=(const Identity &id) noexcept
{
if (likely(this != &id))
Utils::copy<sizeof(Identity)>(this, &id);
return *this;
}
static ZT_INLINE Identity *from(ZT_Identity *const id) noexcept { return reinterpret_cast<Identity *>(id); }
static ZT_INLINE const Identity *from(const ZT_Identity *const id) noexcept { return reinterpret_cast<const Identity *>(id); }
/**
* Set identity to NIL value (all zero)
*/
ZT_INLINE void zero() noexcept { memoryZero(this); }
/**
* @return Identity type (undefined if identity is null or invalid)
*/
ZT_INLINE Type type() const noexcept { return m_type; }
/**
* Generate a new identity (address, key pair)
*
* This is a time consuming operation taking up to 5-10 seconds on some slower systems.
*
* @param t Type of identity to generate
* @return False if there was an error such as type being an invalid value
*/
bool generate(Type t);
/**
* Check the validity of this identity's address
*
* For type 0 identities this is slightly time consuming. For type 1 identities it's
* instantaneous. It should be done when a new identity is accepted for the very first
* time.
*
* @return True if validation check passes
*/
bool locallyValidate() const noexcept;
/**
* @return True if this identity contains a private key
*/
ZT_INLINE bool hasPrivate() const noexcept { return m_hasPrivate; }
/**
* @return This identity's address
*/
ZT_INLINE Address address() const noexcept { return Address(m_fp.address); }
/**
* @return Full fingerprint of this identity (address plus SHA384 of keys)
*/
ZT_INLINE const Fingerprint &fingerprint() const noexcept { return m_fp; }
/**
* Compute a hash of this identity's public and private keys.
*
* If there is no private key or the identity is NIL the buffer is filled with zero.
*
* @param h Buffer to store SHA384 hash
*/
void hashWithPrivate(uint8_t h[ZT_FINGERPRINT_HASH_SIZE]) const;
/**
* Sign a message with this identity (private key required)
*
* The signature buffer should be large enough for the largest
* signature, which is currently 96 bytes.
*
* @param data Data to sign
* @param len Length of data
* @param sig Buffer to receive signature
* @param siglen Length of buffer
* @return Number of bytes actually written to sig or 0 on error
*/
unsigned int sign(const void *data, unsigned int len, void *sig, unsigned int siglen) const;
/**
* Verify a message signature against this identity
*
* @param data Data to check
* @param len Length of data
* @param signature Signature bytes
* @param siglen Length of signature in bytes
* @return True if signature validates and data integrity checks
*/
bool verify(const void *data, unsigned int len, const void *sig, unsigned int siglen) const;
/**
* Shortcut method to perform key agreement with another identity
*
* This identity must have a private key. (Check hasPrivate())
*
* @param id Identity to agree with
* @param key Result parameter to fill with key bytes
* @return Was agreement successful?
*/
bool agree(const Identity &id, uint8_t key[ZT_SYMMETRIC_KEY_SIZE]) const;
/**
* Serialize to a more human-friendly string
*
* @param includePrivate If true, include private key (if it exists)
* @param buf Buffer to store string
* @return ASCII string representation of identity (pointer to buf)
*/
char *toString(bool includePrivate, char buf[ZT_IDENTITY_STRING_BUFFER_LENGTH]) const;
ZT_INLINE String toString(const bool includePrivate = false) const
{
char buf[ZT_IDENTITY_STRING_BUFFER_LENGTH];
toString(includePrivate, buf);
return String(buf);
}
/**
* Deserialize a human-friendly string
*
* Note: validation is for the format only. The locallyValidate() method
* must be used to check signature and address/key correspondence.
*
* @param str String to deserialize
* @return True if deserialization appears successful
*/
bool fromString(const char *str);
/**
* Erase any private key in this identity object
*/
ZT_INLINE void erasePrivateKey() noexcept
{
Utils::burn(m_priv, sizeof(m_priv));
m_hasPrivate = false;
}
/**
* @return True if this identity contains something
*/
explicit ZT_INLINE operator bool() const noexcept { return (m_fp); }
ZT_INLINE unsigned long hashCode() const noexcept { return m_fp.hashCode(); }
ZT_INLINE bool operator==(const Identity &id) const noexcept { return (m_fp == id.m_fp); }
ZT_INLINE bool operator!=(const Identity &id) const noexcept { return !(*this == id); }
ZT_INLINE bool operator<(const Identity &id) const noexcept { return (m_fp < id.m_fp); }
ZT_INLINE bool operator>(const Identity &id) const noexcept { return (id < *this); }
ZT_INLINE bool operator<=(const Identity &id) const noexcept { return !(id < *this); }
ZT_INLINE bool operator>=(const Identity &id) const noexcept { return !(*this < id); }
static constexpr int marshalSizeMax() noexcept { return ZT_IDENTITY_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_IDENTITY_MARSHAL_SIZE_MAX], bool includePrivate = false) const noexcept;
int unmarshal(const uint8_t *data, int len) noexcept;
private:
void m_computeHash();
Fingerprint m_fp;
uint8_t m_priv[ZT_IDENTITY_P384_COMPOUND_PRIVATE_KEY_SIZE];
uint8_t m_pub[ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE];
Type m_type; // _type determines which fields in _priv and _pub are used
bool m_hasPrivate;
};
} // namespace ZeroTier
#endif

View file

@ -1,465 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#define _WIN32_WINNT 0x06010000
#include "InetAddress.hpp"
#include "Constants.hpp"
#include "Utils.hpp"
namespace ZeroTier {
static_assert(ZT_SOCKADDR_STORAGE_SIZE == sizeof(sockaddr_storage), "ZT_SOCKADDR_STORAGE_SIZE is incorrect on this platform, must be size of sockaddr_storage");
static_assert(ZT_SOCKADDR_STORAGE_SIZE == sizeof(InetAddress), "ZT_SOCKADDR_STORAGE_SIZE should equal InetAddress, which must equal size of sockaddr_storage");
static_assert(ZT_SOCKADDR_STORAGE_SIZE == sizeof(ZT_InetAddress), "ZT_SOCKADDR_STORAGE_SIZE should equal ZT_InetAddress, which must equal size of sockaddr_storage");
const InetAddress InetAddress::LO4((const void *)("\x7f\x00\x00\x01"), 4, 0);
const InetAddress InetAddress::LO6((const void *)("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"), 16, 0);
const InetAddress InetAddress::NIL;
InetAddress::IpScope InetAddress::ipScope() const noexcept
{
switch (as.ss.ss_family) {
case AF_INET: {
const uint32_t ip = Utils::ntoh((uint32_t)as.sa_in.sin_addr.s_addr);
switch (ip >> 24U) {
case 0x00: return ZT_IP_SCOPE_NONE; // 0.0.0.0/8 (reserved, never used)
case 0x06: return ZT_IP_SCOPE_PSEUDOPRIVATE; // 6.0.0.0/8 (US Army)
case 0x0a: return ZT_IP_SCOPE_PRIVATE; // 10.0.0.0/8
case 0x15: // return IP_SCOPE_PSEUDOPRIVATE; // 21.0.0.0/8 (US DDN-RVN)
case 0x16: // return IP_SCOPE_PSEUDOPRIVATE; // 22.0.0.0/8 (US DISA)
case 0x19: // return IP_SCOPE_PSEUDOPRIVATE; // 25.0.0.0/8 (UK Ministry of
// Defense)
case 0x1a: // return IP_SCOPE_PSEUDOPRIVATE; // 26.0.0.0/8 (US DISA)
case 0x1c: // return IP_SCOPE_PSEUDOPRIVATE; // 28.0.0.0/8 (US DSI-North)
case 0x1d: // return IP_SCOPE_PSEUDOPRIVATE; // 29.0.0.0/8 (US DISA)
case 0x1e: // return IP_SCOPE_PSEUDOPRIVATE; // 30.0.0.0/8 (US DISA)
case 0x33: // return IP_SCOPE_PSEUDOPRIVATE; // 51.0.0.0/8 (UK Department of
// Social Security)
case 0x37: // return IP_SCOPE_PSEUDOPRIVATE; // 55.0.0.0/8 (US DoD)
case 0x38: // 56.0.0.0/8 (US Postal Service)
return ZT_IP_SCOPE_PSEUDOPRIVATE;
case 0x64:
if ((ip & 0xffc00000) == 0x64400000)
return ZT_IP_SCOPE_PRIVATE; // 100.64.0.0/10
break;
case 0x7f: return ZT_IP_SCOPE_LOOPBACK; // 127.0.0.0/8
case 0xa9:
if ((ip & 0xffff0000) == 0xa9fe0000)
return ZT_IP_SCOPE_LINK_LOCAL; // 169.254.0.0/16
break;
case 0xac:
if ((ip & 0xfff00000) == 0xac100000)
return ZT_IP_SCOPE_PRIVATE; // 172.16.0.0/12
break;
case 0xc0:
if ((ip & 0xffff0000) == 0xc0a80000)
return ZT_IP_SCOPE_PRIVATE; // 192.168.0.0/16
if ((ip & 0xffffff00) == 0xc0000200)
return ZT_IP_SCOPE_PRIVATE; // 192.0.2.0/24
break;
case 0xc6:
if ((ip & 0xfffe0000) == 0xc6120000)
return ZT_IP_SCOPE_PRIVATE; // 198.18.0.0/15
if ((ip & 0xffffff00) == 0xc6336400)
return ZT_IP_SCOPE_PRIVATE; // 198.51.100.0/24
break;
case 0xcb:
if ((ip & 0xffffff00) == 0xcb007100)
return ZT_IP_SCOPE_PRIVATE; // 203.0.113.0/24
break;
case 0xff: return ZT_IP_SCOPE_NONE; // 255.0.0.0/8 (broadcast, or unused/unusable)
}
switch (ip >> 28U) {
case 0xe: return ZT_IP_SCOPE_MULTICAST; // 224.0.0.0/4
case 0xf: return ZT_IP_SCOPE_PSEUDOPRIVATE; // 240.0.0.0/4 ("reserved," usually unusable)
}
return ZT_IP_SCOPE_GLOBAL;
}
case AF_INET6: {
const uint8_t *const ip = as.sa_in6.sin6_addr.s6_addr;
if ((ip[0] & 0xf0U) == 0xf0) {
if (ip[0] == 0xff)
return ZT_IP_SCOPE_MULTICAST; // ff00::/8
if ((ip[0] == 0xfe) && ((ip[1] & 0xc0U) == 0x80)) {
unsigned int k = 2;
while ((!ip[k]) && (k < 15))
++k;
if ((k == 15) && (ip[15] == 0x01))
return ZT_IP_SCOPE_LOOPBACK; // fe80::1/128
else
return ZT_IP_SCOPE_LINK_LOCAL; // fe80::/10
}
if ((ip[0] & 0xfeU) == 0xfc)
return ZT_IP_SCOPE_PRIVATE; // fc00::/7
}
unsigned int k = 0;
while ((!ip[k]) && (k < 15))
++k;
if (k == 15) { // all 0's except last byte
if (ip[15] == 0x01)
return ZT_IP_SCOPE_LOOPBACK; // ::1/128
if (ip[15] == 0x00)
return ZT_IP_SCOPE_NONE; // ::/128
}
return ZT_IP_SCOPE_GLOBAL;
}
}
return ZT_IP_SCOPE_NONE;
}
void InetAddress::set(const void *ipBytes, unsigned int ipLen, unsigned int port) noexcept
{
memoryZero(this);
if (ipLen == 4) {
as.sa_in.sin_family = AF_INET;
as.sa_in.sin_port = Utils::hton((uint16_t)port);
as.sa_in.sin_addr.s_addr = Utils::loadMachineEndian<uint32_t>(ipBytes);
}
else if (ipLen == 16) {
as.sa_in6.sin6_family = AF_INET6;
as.sa_in6.sin6_port = Utils::hton((uint16_t)port);
Utils::copy<16>(as.sa_in6.sin6_addr.s6_addr, ipBytes);
}
}
bool InetAddress::isDefaultRoute() const noexcept
{
switch (as.ss.ss_family) {
case AF_INET: return ((as.sa_in.sin_port == 0) && (as.sa_in.sin_addr.s_addr == 0));
case AF_INET6:
if (as.sa_in6.sin6_port == 0) {
for (unsigned int i = 0; i < 16; ++i) {
if (as.sa_in6.sin6_addr.s6_addr[i])
return false;
}
return true;
}
return false;
default: return false;
}
}
char *InetAddress::toString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const noexcept
{
char *p = toIpString(buf);
if (*p) {
while (*p)
++p;
*(p++) = '/';
Utils::decimal(port(), p);
}
return buf;
}
char *InetAddress::toIpString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const noexcept
{
buf[0] = (char)0;
switch (as.ss.ss_family) {
case AF_INET: inet_ntop(AF_INET, &as.sa_in.sin_addr.s_addr, buf, INET_ADDRSTRLEN); break;
case AF_INET6: inet_ntop(AF_INET6, as.sa_in6.sin6_addr.s6_addr, buf, INET6_ADDRSTRLEN); break;
}
return buf;
}
bool InetAddress::fromString(const char *ipSlashPort) noexcept
{
char buf[64];
memoryZero(this);
if (!*ipSlashPort)
return true;
if (!Utils::scopy(buf, sizeof(buf), ipSlashPort))
return false;
char *portAt = buf;
while ((*portAt) && (*portAt != '/'))
++portAt;
unsigned int port = 0;
if (*portAt) {
*(portAt++) = (char)0;
port = Utils::strToUInt(portAt) & 0xffffU;
}
if (strchr(buf, ':')) {
as.sa_in6.sin6_family = AF_INET6;
as.sa_in6.sin6_port = Utils::hton((uint16_t)port);
inet_pton(AF_INET6, buf, as.sa_in6.sin6_addr.s6_addr);
return true;
}
else if (strchr(buf, '.')) {
as.sa_in.sin_family = AF_INET;
as.sa_in.sin_port = Utils::hton((uint16_t)port);
inet_pton(AF_INET, buf, &as.sa_in.sin_addr.s_addr);
return true;
}
return false;
}
InetAddress InetAddress::netmask() const noexcept
{
InetAddress r(*this);
switch (r.as.ss.ss_family) {
case AF_INET: r.as.sa_in.sin_addr.s_addr = Utils::hton((uint32_t)(0xffffffffU << (32 - netmaskBits()))); break;
case AF_INET6: {
uint64_t nm[2];
const unsigned int bits = netmaskBits();
if (bits) {
nm[0] = Utils::hton((uint64_t)((bits >= 64) ? 0xffffffffffffffffULL : (0xffffffffffffffffULL << (64 - bits))));
nm[1] = Utils::hton((uint64_t)((bits <= 64) ? 0ULL : (0xffffffffffffffffULL << (128 - bits))));
}
else {
nm[0] = 0;
nm[1] = 0;
}
Utils::copy<16>(r.as.sa_in6.sin6_addr.s6_addr, nm);
} break;
}
return r;
}
InetAddress InetAddress::broadcast() const noexcept
{
if (as.ss.ss_family == AF_INET) {
InetAddress r(*this);
reinterpret_cast<sockaddr_in *>(&r)->sin_addr.s_addr |= Utils::hton((uint32_t)(0xffffffffU >> netmaskBits()));
return r;
}
return InetAddress();
}
InetAddress InetAddress::network() const noexcept
{
InetAddress r(*this);
switch (r.as.ss.ss_family) {
case AF_INET: r.as.sa_in.sin_addr.s_addr &= Utils::hton((uint32_t)(0xffffffffU << (32 - netmaskBits()))); break;
case AF_INET6: {
uint64_t nm[2];
const unsigned int bits = netmaskBits();
Utils::copy<16>(nm, reinterpret_cast<sockaddr_in6 *>(&r)->sin6_addr.s6_addr);
nm[0] &= Utils::hton((uint64_t)((bits >= 64) ? 0xffffffffffffffffULL : (0xffffffffffffffffULL << (64 - bits))));
nm[1] &= Utils::hton((uint64_t)((bits <= 64) ? 0ULL : (0xffffffffffffffffULL << (128 - bits))));
Utils::copy<16>(r.as.sa_in6.sin6_addr.s6_addr, nm);
} break;
}
return r;
}
bool InetAddress::isEqualPrefix(const InetAddress &addr) const noexcept
{
if (addr.as.ss.ss_family == as.ss.ss_family) {
switch (as.ss.ss_family) {
case AF_INET6: {
const InetAddress mask(netmask());
InetAddress addr_mask(addr.netmask());
const uint8_t *const n = addr_mask.as.sa_in6.sin6_addr.s6_addr;
const uint8_t *const m = mask.as.sa_in6.sin6_addr.s6_addr;
const uint8_t *const a = addr.as.sa_in6.sin6_addr.s6_addr;
const uint8_t *const b = as.sa_in6.sin6_addr.s6_addr;
for (unsigned int i = 0; i < 16; ++i) {
if ((a[i] & m[i]) != (b[i] & n[i]))
return false;
}
return true;
}
}
}
return false;
}
bool InetAddress::containsAddress(const InetAddress &addr) const noexcept
{
if (addr.as.ss.ss_family == as.ss.ss_family) {
switch (as.ss.ss_family) {
case AF_INET: {
const unsigned int bits = netmaskBits();
if (bits == 0)
return true;
return ((Utils::ntoh((uint32_t)addr.as.sa_in.sin_addr.s_addr) >> (32 - bits)) == (Utils::ntoh((uint32_t)as.sa_in.sin_addr.s_addr) >> (32 - bits)));
}
case AF_INET6: {
const InetAddress mask(netmask());
const uint8_t *const m = mask.as.sa_in6.sin6_addr.s6_addr;
const uint8_t *const a = addr.as.sa_in6.sin6_addr.s6_addr;
const uint8_t *const b = as.sa_in6.sin6_addr.s6_addr;
for (unsigned int i = 0; i < 16; ++i) {
if ((a[i] & m[i]) != b[i])
return false;
}
return true;
}
}
}
return false;
}
bool InetAddress::isNetwork() const noexcept
{
switch (as.ss.ss_family) {
case AF_INET: {
unsigned int bits = netmaskBits();
if (bits <= 0)
return false;
if (bits >= 32)
return false;
const uint32_t ip = Utils::ntoh((uint32_t)as.sa_in.sin_addr.s_addr);
return ((ip & (0xffffffffU >> bits)) == 0);
}
case AF_INET6: {
unsigned int bits = netmaskBits();
if (bits <= 0)
return false;
if (bits >= 128)
return false;
const uint8_t *const ip = as.sa_in6.sin6_addr.s6_addr;
unsigned int p = bits / 8;
if ((ip[p++] & (0xffU >> (bits % 8))) != 0)
return false;
while (p < 16) {
if (ip[p++])
return false;
}
return true;
}
}
return false;
}
int InetAddress::marshal(uint8_t data[ZT_INETADDRESS_MARSHAL_SIZE_MAX]) const noexcept
{
unsigned int port;
switch (as.ss.ss_family) {
case AF_INET:
port = Utils::ntoh((uint16_t) reinterpret_cast<const sockaddr_in *>(this)->sin_port);
data[0] = 4;
data[1] = reinterpret_cast<const uint8_t *>(&as.sa_in.sin_addr.s_addr)[0];
data[2] = reinterpret_cast<const uint8_t *>(&as.sa_in.sin_addr.s_addr)[1];
data[3] = reinterpret_cast<const uint8_t *>(&as.sa_in.sin_addr.s_addr)[2];
data[4] = reinterpret_cast<const uint8_t *>(&as.sa_in.sin_addr.s_addr)[3];
data[5] = (uint8_t)(port >> 8U);
data[6] = (uint8_t)port;
return 7;
case AF_INET6:
port = Utils::ntoh((uint16_t)as.sa_in6.sin6_port);
data[0] = 6;
Utils::copy<16>(data + 1, as.sa_in6.sin6_addr.s6_addr);
data[17] = (uint8_t)(port >> 8U);
data[18] = (uint8_t)port;
return 19;
default: data[0] = 0; return 1;
}
}
int InetAddress::unmarshal(const uint8_t *restrict data, const int len) noexcept
{
memoryZero(this);
if (unlikely(len <= 0))
return -1;
switch (data[0]) {
case 0: return 1;
case 4:
if (unlikely(len < 7))
return -1;
as.sa_in.sin_family = AF_INET;
as.sa_in.sin_port = Utils::loadMachineEndian<uint16_t>(data + 5);
as.sa_in.sin_addr.s_addr = Utils::loadMachineEndian<uint32_t>(data + 1);
return 7;
case 6:
if (unlikely(len < 19))
return -1;
as.sa_in6.sin6_family = AF_INET6;
as.sa_in6.sin6_port = Utils::loadMachineEndian<uint16_t>(data + 17);
Utils::copy<16>(as.sa_in6.sin6_addr.s6_addr, data + 1);
return 19;
default: return -1;
}
}
InetAddress InetAddress::makeIpv6LinkLocal(const MAC &mac) noexcept
{
InetAddress r;
r.as.sa_in6.sin6_family = AF_INET6;
r.as.sa_in6.sin6_port = ZT_CONST_TO_BE_UINT16(64);
r.as.sa_in6.sin6_addr.s6_addr[0] = 0xfe;
r.as.sa_in6.sin6_addr.s6_addr[1] = 0x80;
r.as.sa_in6.sin6_addr.s6_addr[2] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[3] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[4] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[5] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[6] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[7] = 0x00;
r.as.sa_in6.sin6_addr.s6_addr[8] = mac[0] & 0xfdU;
r.as.sa_in6.sin6_addr.s6_addr[9] = mac[1];
r.as.sa_in6.sin6_addr.s6_addr[10] = mac[2];
r.as.sa_in6.sin6_addr.s6_addr[11] = 0xff;
r.as.sa_in6.sin6_addr.s6_addr[12] = 0xfe;
r.as.sa_in6.sin6_addr.s6_addr[13] = mac[3];
r.as.sa_in6.sin6_addr.s6_addr[14] = mac[4];
r.as.sa_in6.sin6_addr.s6_addr[15] = mac[5];
return r;
}
InetAddress InetAddress::makeIpv6rfc4193(uint64_t nwid, uint64_t zeroTierAddress) noexcept
{
InetAddress r;
r.as.sa_in6.sin6_family = AF_INET6;
r.as.sa_in6.sin6_port = ZT_CONST_TO_BE_UINT16(88); // /88 includes 0xfd + network ID, discriminating by device ID below that
r.as.sa_in6.sin6_addr.s6_addr[0] = 0xfd;
r.as.sa_in6.sin6_addr.s6_addr[1] = (uint8_t)(nwid >> 56U);
r.as.sa_in6.sin6_addr.s6_addr[2] = (uint8_t)(nwid >> 48U);
r.as.sa_in6.sin6_addr.s6_addr[3] = (uint8_t)(nwid >> 40U);
r.as.sa_in6.sin6_addr.s6_addr[4] = (uint8_t)(nwid >> 32U);
r.as.sa_in6.sin6_addr.s6_addr[5] = (uint8_t)(nwid >> 24U);
r.as.sa_in6.sin6_addr.s6_addr[6] = (uint8_t)(nwid >> 16U);
r.as.sa_in6.sin6_addr.s6_addr[7] = (uint8_t)(nwid >> 8U);
r.as.sa_in6.sin6_addr.s6_addr[8] = (uint8_t)nwid;
r.as.sa_in6.sin6_addr.s6_addr[9] = 0x99;
r.as.sa_in6.sin6_addr.s6_addr[10] = 0x93;
r.as.sa_in6.sin6_addr.s6_addr[11] = (uint8_t)(zeroTierAddress >> 32U);
r.as.sa_in6.sin6_addr.s6_addr[12] = (uint8_t)(zeroTierAddress >> 24U);
r.as.sa_in6.sin6_addr.s6_addr[13] = (uint8_t)(zeroTierAddress >> 16U);
r.as.sa_in6.sin6_addr.s6_addr[14] = (uint8_t)(zeroTierAddress >> 8U);
r.as.sa_in6.sin6_addr.s6_addr[15] = (uint8_t)zeroTierAddress;
return r;
}
InetAddress InetAddress::makeIpv66plane(uint64_t nwid, uint64_t zeroTierAddress) noexcept
{
nwid ^= (nwid >> 32U);
InetAddress r;
r.as.sa_in6.sin6_family = AF_INET6;
r.as.sa_in6.sin6_port = ZT_CONST_TO_BE_UINT16(40);
r.as.sa_in6.sin6_addr.s6_addr[0] = 0xfc;
r.as.sa_in6.sin6_addr.s6_addr[1] = (uint8_t)(nwid >> 24U);
r.as.sa_in6.sin6_addr.s6_addr[2] = (uint8_t)(nwid >> 16U);
r.as.sa_in6.sin6_addr.s6_addr[3] = (uint8_t)(nwid >> 8U);
r.as.sa_in6.sin6_addr.s6_addr[4] = (uint8_t)nwid;
r.as.sa_in6.sin6_addr.s6_addr[5] = (uint8_t)(zeroTierAddress >> 32U);
r.as.sa_in6.sin6_addr.s6_addr[6] = (uint8_t)(zeroTierAddress >> 24U);
r.as.sa_in6.sin6_addr.s6_addr[7] = (uint8_t)(zeroTierAddress >> 16U);
r.as.sa_in6.sin6_addr.s6_addr[8] = (uint8_t)(zeroTierAddress >> 8U);
r.as.sa_in6.sin6_addr.s6_addr[9] = (uint8_t)zeroTierAddress;
r.as.sa_in6.sin6_addr.s6_addr[15] = 0x01;
return r;
}
extern "C" {
extern const int ZT_AF_INET = (int)AF_INET;
extern const int ZT_AF_INET6 = (int)AF_INET6;
}
} // namespace ZeroTier

View file

@ -1,567 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_INETADDRESS_HPP
#define ZT_INETADDRESS_HPP
#include "Constants.hpp"
#include "Containers.hpp"
#include "MAC.hpp"
#include "TriviallyCopyable.hpp"
#include "Utils.hpp"
namespace ZeroTier {
#define ZT_INETADDRESS_MARSHAL_SIZE_MAX 19
#define ZT_INETADDRESS_STRING_SIZE_MAX 64
/**
* C++ class that overlaps in size with sockaddr_storage and adds convenience methods
*
* This is basically a "mixin" for sockaddr_storage. It adds methods and
* operators, but does not modify the structure. This can be cast to/from
* sockaddr_storage and used interchangeably. DO NOT change this by e.g.
* adding non-static fields, since much code depends on this identity.
*/
struct InetAddress : public TriviallyCopyable {
public:
/**
* Loopback IPv4 address (no port)
*/
static const InetAddress LO4;
/**
* Loopback IPV6 address (no port)
*/
static const InetAddress LO6;
/**
* Null address
*/
static const InetAddress NIL;
/**
* IP address scope
*
* Note that these values are in ascending order of path preference and
* MUST remain that way or Path must be changed to reflect. Also be sure
* to change ZT_INETADDRESS_MAX_SCOPE if the max changes.
*/
typedef ZT_InetAddress_IpScope IpScope;
ZT_INLINE InetAddress() noexcept { memoryZero(this); }
explicit ZT_INLINE InetAddress(const sockaddr_storage &ss) noexcept { *this = ss; }
explicit ZT_INLINE InetAddress(const sockaddr_storage *const ss) noexcept { *this = ss; }
explicit ZT_INLINE InetAddress(const sockaddr &sa) noexcept { *this = sa; }
explicit ZT_INLINE InetAddress(const sockaddr *const sa) noexcept { *this = sa; }
explicit ZT_INLINE InetAddress(const sockaddr_in &sa) noexcept { *this = sa; }
explicit ZT_INLINE InetAddress(const sockaddr_in *const sa) noexcept { *this = sa; }
explicit ZT_INLINE InetAddress(const sockaddr_in6 &sa) noexcept { *this = sa; }
explicit ZT_INLINE InetAddress(const sockaddr_in6 *const sa) noexcept { *this = sa; }
ZT_INLINE InetAddress(const void *const ipBytes, const unsigned int ipLen, const unsigned int port) noexcept { this->set(ipBytes, ipLen, port); }
ZT_INLINE InetAddress(const uint32_t ipv4, const unsigned int port) noexcept { this->set(&ipv4, 4, port); }
explicit ZT_INLINE InetAddress(const char *const ipSlashPort) noexcept { this->fromString(ipSlashPort); }
ZT_INLINE InetAddress &operator=(const sockaddr_storage &ss) noexcept
{
as.ss = ss;
return *this;
}
ZT_INLINE InetAddress &operator=(const sockaddr_storage *ss) noexcept
{
if (ss)
as.ss = *ss;
else
memoryZero(this);
return *this;
}
ZT_INLINE InetAddress &operator=(const sockaddr_in &sa) noexcept
{
memoryZero(this);
as.sa_in = sa;
return *this;
}
ZT_INLINE InetAddress &operator=(const sockaddr_in *sa) noexcept
{
memoryZero(this);
if (sa)
as.sa_in = *sa;
return *this;
}
ZT_INLINE InetAddress &operator=(const sockaddr_in6 &sa) noexcept
{
memoryZero(this);
as.sa_in6 = sa;
return *this;
}
ZT_INLINE InetAddress &operator=(const sockaddr_in6 *sa) noexcept
{
memoryZero(this);
if (sa)
as.sa_in6 = *sa;
return *this;
}
ZT_INLINE InetAddress &operator=(const sockaddr &sa) noexcept
{
memoryZero(this);
if (sa.sa_family == AF_INET)
as.sa_in = *reinterpret_cast<const sockaddr_in *>(&sa);
else if (sa.sa_family == AF_INET6)
as.sa_in6 = *reinterpret_cast<const sockaddr_in6 *>(&sa);
return *this;
}
ZT_INLINE InetAddress &operator=(const sockaddr *sa) noexcept
{
memoryZero(this);
if (sa) {
if (sa->sa_family == AF_INET)
as.sa_in = *reinterpret_cast<const sockaddr_in *>(sa);
else if (sa->sa_family == AF_INET6)
as.sa_in6 = *reinterpret_cast<const sockaddr_in6 *>(sa);
}
return *this;
}
ZT_INLINE void clear() noexcept { memoryZero(this); }
/**
* @return IP scope classification (e.g. loopback, link-local, private, global)
*/
IpScope ipScope() const noexcept;
/**
* Set from a raw IP and port number
*
* @param ipBytes Bytes of IP address in network byte order
* @param ipLen Length of IP address: 4 or 16
* @param port Port number or 0 for none
*/
void set(const void *ipBytes, unsigned int ipLen, unsigned int port) noexcept;
/**
* Set the port component
*
* @param port Port, 0 to 65535
*/
ZT_INLINE void setPort(unsigned int port) noexcept
{
switch (as.ss.ss_family) {
case AF_INET: as.sa_in.sin_port = Utils::hton((uint16_t)port); break;
case AF_INET6: as.sa_in6.sin6_port = Utils::hton((uint16_t)port); break;
}
}
/**
* @return True if this network/netmask route describes a default route (e.g. 0.0.0.0/0)
*/
bool isDefaultRoute() const noexcept;
/**
* @return ASCII IP/port format representation
*/
char *toString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const noexcept;
ZT_INLINE String toString() const
{
char buf[ZT_INETADDRESS_STRING_SIZE_MAX];
toString(buf);
return String(buf);
}
/**
* @return IP portion only, in ASCII string format
*/
char *toIpString(char buf[ZT_INETADDRESS_STRING_SIZE_MAX]) const noexcept;
ZT_INLINE String toIpString() const
{
char buf[ZT_INETADDRESS_STRING_SIZE_MAX];
toIpString(buf);
return String(buf);
}
/**
* @param ipSlashPort IP/port (port is optional, will be 0 if not included)
* @return True if address appeared to be valid
*/
bool fromString(const char *ipSlashPort) noexcept;
/**
* @return Port or 0 if no port component defined
*/
ZT_INLINE unsigned int port() const noexcept
{
switch (as.ss.ss_family) {
case AF_INET: return Utils::ntoh((uint16_t)as.sa_in.sin_port);
case AF_INET6: return Utils::ntoh((uint16_t)as.sa_in6.sin6_port);
default: return 0;
}
}
/**
* Alias for port()
*
* This just aliases port() to make code more readable when netmask bits
* are stuffed there, as they are in Network, EthernetTap, and a few other
* spots.
*
* @return Netmask bits
*/
ZT_INLINE unsigned int netmaskBits() const noexcept { return port(); }
/**
* @return True if netmask bits is valid for the address type
*/
ZT_INLINE bool netmaskBitsValid() const noexcept
{
const unsigned int n = port();
switch (as.ss.ss_family) {
case AF_INET: return (n <= 32);
case AF_INET6: return (n <= 128);
}
return false;
}
/**
* Alias for port()
*
* This just aliases port() because for gateways we use this field to
* store the gateway metric.
*
* @return Gateway metric
*/
ZT_INLINE unsigned int metric() const noexcept { return port(); }
/**
* Construct a full netmask as an InetAddress
*
* @return Netmask such as 255.255.255.0 if this address is /24 (port field will be unchanged)
*/
InetAddress netmask() const noexcept;
/**
* Constructs a broadcast address from a network/netmask address
*
* This is only valid for IPv4 and will return a NULL InetAddress for other
* address families.
*
* @return Broadcast address (only IP portion is meaningful)
*/
InetAddress broadcast() const noexcept;
/**
* Return the network -- a.k.a. the IP ANDed with the netmask
*
* @return Network e.g. 10.0.1.0/24 from 10.0.1.200/24
*/
InetAddress network() const noexcept;
/**
* Test whether this IPv6 prefix matches the prefix of a given IPv6 address
*
* @param addr Address to check
* @return True if this IPv6 prefix matches the prefix of a given IPv6 address
*/
bool isEqualPrefix(const InetAddress &addr) const noexcept;
/**
* Test whether this IP/netmask contains this address
*
* @param addr Address to check
* @return True if this IP/netmask (route) contains this address
*/
bool containsAddress(const InetAddress &addr) const noexcept;
/**
* @return True if this is an IPv4 address
*/
ZT_INLINE bool isV4() const noexcept { return (as.ss.ss_family == AF_INET); }
/**
* @return True if this is an IPv6 address
*/
ZT_INLINE bool isV6() const noexcept { return (as.ss.ss_family == AF_INET6); }
/**
* @return pointer to raw address bytes or NULL if not available
*/
ZT_INLINE const void *rawIpData() const noexcept
{
switch (as.ss.ss_family) {
case AF_INET: return reinterpret_cast<const void *>(&(as.sa_in.sin_addr.s_addr));
case AF_INET6: return reinterpret_cast<const void *>(as.sa_in6.sin6_addr.s6_addr);
default: return nullptr;
}
}
/**
* @return InetAddress containing only the IP portion of this address and a zero port, or NULL if not IPv4 or IPv6
*/
ZT_INLINE InetAddress ipOnly() const noexcept
{
InetAddress r;
switch (as.ss.ss_family) {
case AF_INET:
r.as.sa_in.sin_family = AF_INET;
r.as.sa_in.sin_addr.s_addr = as.sa_in.sin_addr.s_addr;
break;
case AF_INET6:
r.as.sa_in6.sin6_family = AF_INET6;
Utils::copy<16>(r.as.sa_in6.sin6_addr.s6_addr, as.sa_in6.sin6_addr.s6_addr);
break;
}
return r;
}
/**
* Performs an IP-only comparison or, if that is impossible, a memcmp()
*
* @param a InetAddress to compare again
* @return True if only IP portions are equal (false for non-IP or null addresses)
*/
ZT_INLINE bool ipsEqual(const InetAddress &a) const noexcept
{
const uint8_t f = as.ss.ss_family;
if (f == a.as.ss.ss_family) {
if (f == AF_INET)
return as.sa_in.sin_addr.s_addr == a.as.sa_in.sin_addr.s_addr;
if (f == AF_INET6)
return memcmp(as.sa_in6.sin6_addr.s6_addr, a.as.sa_in6.sin6_addr.s6_addr, 16) == 0;
return memcmp(this, &a, sizeof(InetAddress)) == 0;
}
return false;
}
/**
* Performs an IP-only comparison or, if that is impossible, a memcmp()
*
* This version compares only the first 64 bits of IPv6 addresses.
*
* @param a InetAddress to compare again
* @return True if only IP portions are equal (false for non-IP or null addresses)
*/
ZT_INLINE bool ipsEqual2(const InetAddress &a) const noexcept
{
const uint8_t f = as.ss.ss_family;
if (f == a.as.ss.ss_family) {
if (f == AF_INET)
return as.sa_in.sin_addr.s_addr == a.as.sa_in.sin_addr.s_addr;
if (f == AF_INET6)
return memcmp(as.sa_in6.sin6_addr.s6_addr, a.as.sa_in6.sin6_addr.s6_addr, 8) == 0;
return (memcmp(this, &a, sizeof(InetAddress)) == 0);
}
return false;
}
ZT_INLINE unsigned long hashCode() const noexcept
{
if (as.ss.ss_family == AF_INET) {
return (unsigned long)Utils::hash32(((uint32_t)as.sa_in.sin_addr.s_addr + (uint32_t)as.sa_in.sin_port) ^ (uint32_t)Utils::s_mapNonce);
}
else if (as.ss.ss_family == AF_INET6) {
return (unsigned long)Utils::hash64((Utils::loadMachineEndian<uint64_t>(as.sa_in6.sin6_addr.s6_addr) + Utils::loadMachineEndian<uint64_t>(as.sa_in6.sin6_addr.s6_addr + 8) + (uint64_t)as.sa_in6.sin6_port) ^ Utils::s_mapNonce);
}
return Utils::fnv1a32(this, sizeof(InetAddress));
}
/**
* Check whether this is a network/route rather than an IP assignment
*
* A network is an IP/netmask where everything after the netmask is
* zero e.g. 10.0.0.0/8.
*
* @return True if everything after netmask bits is zero
*/
bool isNetwork() const noexcept;
/**
* @return True if address family is non-zero
*/
explicit ZT_INLINE operator bool() const noexcept { return (as.ss.ss_family != 0); }
static constexpr int marshalSizeMax() noexcept { return ZT_INETADDRESS_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_INETADDRESS_MARSHAL_SIZE_MAX]) const noexcept;
int unmarshal(const uint8_t *restrict data, int len) noexcept;
ZT_INLINE bool operator==(const InetAddress &a) const noexcept
{
if (as.ss.ss_family == a.as.ss.ss_family) {
if (as.ss.ss_family == AF_INET)
return ((as.sa_in.sin_port == a.as.sa_in.sin_port) && (as.sa_in.sin_addr.s_addr == a.as.sa_in.sin_addr.s_addr));
if (as.ss.ss_family == AF_INET6)
return ((as.sa_in6.sin6_port == a.as.sa_in6.sin6_port) && (memcmp(as.sa_in6.sin6_addr.s6_addr, a.as.sa_in6.sin6_addr.s6_addr, 16) == 0));
return memcmp(this, &a, sizeof(InetAddress)) == 0;
}
return false;
}
ZT_INLINE bool operator<(const InetAddress &a) const noexcept
{
if (as.ss.ss_family == a.as.ss.ss_family) {
if (as.ss.ss_family == AF_INET) {
const uint16_t p0 = Utils::ntoh((uint16_t)as.sa_in.sin_port);
const uint16_t p1 = Utils::ntoh((uint16_t)a.as.sa_in.sin_port);
if (p0 == p1)
return Utils::ntoh((uint32_t)as.sa_in.sin_addr.s_addr) < Utils::ntoh((uint32_t)a.as.sa_in.sin_addr.s_addr);
return p0 < p1;
}
if (as.ss.ss_family == AF_INET6) {
const uint16_t p0 = Utils::ntoh((uint16_t)as.sa_in6.sin6_port);
const uint16_t p1 = Utils::ntoh((uint16_t)a.as.sa_in6.sin6_port);
if (p0 == p1)
return memcmp(as.sa_in6.sin6_addr.s6_addr, a.as.sa_in6.sin6_addr.s6_addr, 16) < 0;
return p0 < p1;
}
return memcmp(this, &a, sizeof(InetAddress)) < 0;
}
return as.ss.ss_family < a.as.ss.ss_family;
}
ZT_INLINE bool operator!=(const InetAddress &a) const noexcept { return !(*this == a); }
ZT_INLINE bool operator>(const InetAddress &a) const noexcept { return (a < *this); }
ZT_INLINE bool operator<=(const InetAddress &a) const noexcept { return !(a < *this); }
ZT_INLINE bool operator>=(const InetAddress &a) const noexcept { return !(*this < a); }
/**
* Compute an IPv6 link-local address
*
* @param mac MAC address seed
* @return IPv6 link-local address
*/
static InetAddress makeIpv6LinkLocal(const MAC &mac) noexcept;
/**
* Compute private IPv6 unicast address from network ID and ZeroTier address
*
* This generates a private unicast IPv6 address that is mostly compliant
* with the letter of RFC4193 and certainly compliant in spirit.
*
* RFC4193 specifies a format of:
*
* | 7 bits |1| 40 bits | 16 bits | 64 bits |
* | Prefix |L| Global ID | Subnet ID | Interface ID |
*
* The 'L' bit is set to 1, yielding an address beginning with 0xfd. Then
* the network ID is filled into the global ID, subnet ID, and first byte
* of the "interface ID" field. Since the first 40 bits of the network ID
* is the unique ZeroTier address of its controller, this makes a very
* good random global ID. Since network IDs have 24 more bits, we let it
* overflow into the interface ID.
*
* After that we pad with two bytes: 0x99, 0x93, namely the default ZeroTier
* port in hex.
*
* Finally we fill the remaining 40 bits of the interface ID field with
* the 40-bit unique ZeroTier device ID of the network member.
*
* This yields a valid RFC4193 address with a random global ID, a
* meaningful subnet ID, and a unique interface ID, all mappable back onto
* ZeroTier space.
*
* This in turn could allow us, on networks numbered this way, to emulate
* IPv6 NDP and eliminate all multicast. This could be beneficial for
* small devices and huge networks, e.g. IoT applications.
*
* The returned address is given an odd prefix length of /88, since within
* a given network only the last 40 bits (device ID) are variable. This
* is a bit unusual but as far as we know should not cause any problems with
* any non-braindead IPv6 stack.
*
* @param nwid 64-bit network ID
* @param zeroTierAddress 40-bit device address (in least significant 40 bits, highest 24 bits ignored)
* @return IPv6 private unicast address with /88 netmask
*/
static InetAddress makeIpv6rfc4193(uint64_t nwid, uint64_t zeroTierAddress) noexcept;
/**
* Compute a private IPv6 "6plane" unicast address from network ID and ZeroTier address
*/
static InetAddress makeIpv66plane(uint64_t nwid, uint64_t zeroTierAddress) noexcept;
/**
* Union allowing this to be accessed as a sockaddr of any supported type.
*/
union {
sockaddr_storage ss;
sockaddr sa;
sockaddr_in sa_in;
sockaddr_in6 sa_in6;
} as;
};
static ZT_INLINE InetAddress *asInetAddress(sockaddr_in *const p) noexcept { return reinterpret_cast<InetAddress *>(p); }
static ZT_INLINE InetAddress *asInetAddress(sockaddr_in6 *const p) noexcept { return reinterpret_cast<InetAddress *>(p); }
static ZT_INLINE InetAddress *asInetAddress(sockaddr *const p) noexcept { return reinterpret_cast<InetAddress *>(p); }
static ZT_INLINE InetAddress *asInetAddress(sockaddr_storage *const p) noexcept { return reinterpret_cast<InetAddress *>(p); }
static ZT_INLINE InetAddress *asInetAddress(ZT_InetAddress *const p) noexcept { return reinterpret_cast<InetAddress *>(p); }
static ZT_INLINE const InetAddress *asInetAddress(const sockaddr_in *const p) noexcept { return reinterpret_cast<const InetAddress *>(p); }
static ZT_INLINE const InetAddress *asInetAddress(const sockaddr_in6 *const p) noexcept { return reinterpret_cast<const InetAddress *>(p); }
static ZT_INLINE const InetAddress *asInetAddress(const sockaddr *const p) noexcept { return reinterpret_cast<const InetAddress *>(p); }
static ZT_INLINE const InetAddress *asInetAddress(const sockaddr_storage *const p) noexcept { return reinterpret_cast<const InetAddress *>(p); }
static ZT_INLINE const InetAddress *asInetAddress(const ZT_InetAddress *const p) noexcept { return reinterpret_cast<const InetAddress *>(p); }
static ZT_INLINE InetAddress &asInetAddress(sockaddr_in &p) noexcept { return *reinterpret_cast<InetAddress *>(&p); }
static ZT_INLINE InetAddress &asInetAddress(sockaddr_in6 &p) noexcept { return *reinterpret_cast<InetAddress *>(&p); }
static ZT_INLINE InetAddress &asInetAddress(sockaddr &p) noexcept { return *reinterpret_cast<InetAddress *>(&p); }
static ZT_INLINE InetAddress &asInetAddress(sockaddr_storage &p) noexcept { return *reinterpret_cast<InetAddress *>(&p); }
static ZT_INLINE InetAddress &asInetAddress(ZT_InetAddress &p) noexcept { return *reinterpret_cast<InetAddress *>(&p); }
static ZT_INLINE const InetAddress &asInetAddress(const sockaddr_in &p) noexcept { return *reinterpret_cast<const InetAddress *>(&p); }
static ZT_INLINE const InetAddress &asInetAddress(const sockaddr_in6 &p) noexcept { return *reinterpret_cast<const InetAddress *>(&p); }
static ZT_INLINE const InetAddress &asInetAddress(const sockaddr &p) noexcept { return *reinterpret_cast<const InetAddress *>(&p); }
static ZT_INLINE const InetAddress &asInetAddress(const sockaddr_storage &p) noexcept { return *reinterpret_cast<const InetAddress *>(&p); }
static ZT_INLINE const InetAddress &asInetAddress(const ZT_InetAddress &p) noexcept { return *reinterpret_cast<const InetAddress *>(&p); }
} // namespace ZeroTier
#endif

View file

@ -1,872 +0,0 @@
/*
* LZ4 - Fast LZ compression algorithm
* Header File
* Copyright (C) 2011-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- LZ4 homepage : http://www.lz4.org
- LZ4 source repository : https://github.com/lz4/lz4
*/
// Some modifications were made for ZeroTier but this code remains under the
// original LZ4 license.
#include "LZ4.hpp"
#include "Utils.hpp"
#include <cstddef>
#include <cstdlib>
#include <cstring>
#ifdef _MSC_VER
#define FORCE_INLINE __forceinline
#include <intrin.h>
#pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
#pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */
#else
#define FORCE_INLINE ZT_INLINE
#endif
namespace ZeroTier {
namespace {
// #define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
// #define LZ4_VERSION_MINOR 7 /* for new (non-breaking) interface capabilities */
// #define LZ4_VERSION_RELEASE 5 /* for tweaks, bug-fixes, or development */
#define LZ4_MEMORY_USAGE 14
typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */
FORCE_INLINE void LZ4_resetStream(LZ4_stream_t *streamPtr);
#define LZ4_HASHLOG (LZ4_MEMORY_USAGE - 2)
#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
typedef struct {
uint32_t hashTable[LZ4_HASH_SIZE_U32];
uint32_t currentOffset;
uint32_t initCheck;
const uint8_t *dictionary;
uint8_t *bufferStart; /* obsolete, used for slideInputBuffer */
uint32_t dictSize;
} LZ4_stream_t_internal;
typedef struct {
const uint8_t *externalDict;
size_t extDictSize;
const uint8_t *prefixEnd;
size_t prefixSize;
} LZ4_streamDecode_t_internal;
#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE - 3)) + 4)
union LZ4_stream_u {
unsigned long long table[LZ4_STREAMSIZE_U64];
LZ4_stream_t_internal internal_donotuse;
}; /* previously typedef'd to LZ4_stream_t */
#define LZ4_STREAMDECODESIZE_U64 4
union LZ4_streamDecode_u {
unsigned long long table[LZ4_STREAMDECODESIZE_U64];
LZ4_streamDecode_t_internal internal_donotuse;
}; /* previously typedef'd to LZ4_streamDecode_t */
#ifndef HEAPMODE
#define HEAPMODE 0
#endif
#ifdef ZT_NO_UNALIGNED_ACCESS
#define LZ4_FORCE_MEMORY_ACCESS 0
#else
#define LZ4_FORCE_MEMORY_ACCESS 2
#endif
#if defined(_MSC_VER) && defined(_WIN32_WCE)
#define LZ4_FORCE_SW_BITCOUNT
#endif
#define ALLOCATOR(n, s) calloc(n, s)
#define FREEMEM free
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
typedef uintptr_t uptrval;
typedef uintptr_t reg_t;
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define LZ4_isLittleEndian() (1)
#else
#define LZ4_isLittleEndian() (0)
#endif
#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS == 2)
FORCE_INLINE U16 LZ4_read16(const void *memPtr) { return *(const U16 *)memPtr; }
FORCE_INLINE U32 LZ4_read32(const void *memPtr) { return *(const U32 *)memPtr; }
FORCE_INLINE reg_t LZ4_read_ARCH(const void *memPtr) { return *(const reg_t *)memPtr; }
FORCE_INLINE void LZ4_write16(void *memPtr, U16 value) { *(U16 *)memPtr = value; }
FORCE_INLINE void LZ4_write32(void *memPtr, U32 value) { *(U32 *)memPtr = value; }
#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS == 1)
typedef union {
U16 u16;
U32 u32;
reg_t uArch;
} __attribute__((packed)) unalign;
FORCE_INLINE U16 LZ4_read16(const void *ptr) { return ((const unalign *)ptr)->u16; }
FORCE_INLINE U32 LZ4_read32(const void *ptr) { return ((const unalign *)ptr)->u32; }
FORCE_INLINE reg_t LZ4_read_ARCH(const void *ptr) { return ((const unalign *)ptr)->uArch; }
FORCE_INLINE void LZ4_write16(void *memPtr, U16 value) { ((unalign *)memPtr)->u16 = value; }
FORCE_INLINE void LZ4_write32(void *memPtr, U32 value) { ((unalign *)memPtr)->u32 = value; }
#else /* safe and portable */
FORCE_INLINE U16 LZ4_read16(const void *memPtr)
{
U16 val;
Utils::copy(&val, memPtr, sizeof(val));
return val;
}
FORCE_INLINE U32 LZ4_read32(const void *memPtr)
{
U32 val;
Utils::copy(&val, memPtr, sizeof(val));
return val;
}
FORCE_INLINE reg_t LZ4_read_ARCH(const void *memPtr)
{
reg_t val;
Utils::copy(&val, memPtr, sizeof(val));
return val;
}
FORCE_INLINE void LZ4_write16(void *memPtr, U16 value) { Utils::copy(memPtr, &value, sizeof(value)); }
FORCE_INLINE void LZ4_write32(void *memPtr, U32 value) { Utils::copy(memPtr, &value, sizeof(value)); }
#endif /* LZ4_FORCE_MEMORY_ACCESS */
FORCE_INLINE U16 LZ4_readLE16(const void *memPtr)
{
if (LZ4_isLittleEndian()) {
return LZ4_read16(memPtr);
}
else {
const BYTE *p = (const BYTE *)memPtr;
return (U16)((U16)p[0] + (p[1] << 8));
}
}
FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
{
if (LZ4_isLittleEndian()) {
LZ4_write16(memPtr, value);
}
else {
BYTE *p = (BYTE *)memPtr;
p[0] = (BYTE)value;
p[1] = (BYTE)(value >> 8);
}
}
FORCE_INLINE void LZ4_copy8(void *dst, const void *src) { Utils::copy<8>(dst, src); }
FORCE_INLINE void LZ4_wildCopy(void *dstPtr, const void *srcPtr, void *dstEnd)
{
BYTE *d = (BYTE *)dstPtr;
const BYTE *s = (const BYTE *)srcPtr;
BYTE *const e = (BYTE *)dstEnd;
do {
LZ4_copy8(d, s);
d += 8;
s += 8;
} while (d < e);
}
#define MINMATCH 4
#define WILDCOPYLENGTH 8
#define LASTLITERALS 5
#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
static const int LZ4_minLength = (MFLIMIT + 1);
#define KB *(1 << 10)
// #define MB *(1 <<20)
// #define GB *(1U<<30)
#define MAXD_LOG 16
#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
#define ML_BITS 4
#define ML_MASK ((1U << ML_BITS) - 1)
#define RUN_BITS (8 - ML_BITS)
#define RUN_MASK ((1U << RUN_BITS) - 1)
// #define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable
// declarations */
FORCE_INLINE unsigned LZ4_NbCommonBytes(reg_t val)
{
if (LZ4_isLittleEndian()) {
if (sizeof(val) == 8) {
#if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
unsigned long r = 0;
_BitScanForward64(&r, (U64)val);
return (int)(r >> 3);
#elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
return (__builtin_ctzll((U64)val) >> 3);
#else
static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
#endif
}
else /* 32 bits */ {
#if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
unsigned long r;
_BitScanForward(&r, (U32)val);
return (int)(r >> 3);
#elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
return (__builtin_ctz((U32)val) >> 3);
#else
static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
#endif
}
}
else /* Big Endian CPU */ {
if (sizeof(val) == 8) {
#if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
unsigned long r = 0;
_BitScanReverse64(&r, val);
return (unsigned)(r >> 3);
#elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
return (__builtin_clzll((U64)val) >> 3);
#else
unsigned r;
if (!(val >> 32)) {
r = 4;
}
else {
r = 0;
val >>= 32;
}
if (!(val >> 16)) {
r += 2;
val >>= 8;
}
else {
val >>= 24;
}
r += (!val);
return r;
#endif
}
else /* 32 bits */ {
#if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
unsigned long r = 0;
_BitScanReverse(&r, (unsigned long)val);
return (unsigned)(r >> 3);
#elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
return (__builtin_clz((U32)val) >> 3);
#else
unsigned r;
if (!(val >> 16)) {
r = 2;
val >>= 8;
}
else {
r = 0;
val >>= 24;
}
r += (!val);
return r;
#endif
}
}
}
#define STEPSIZE sizeof(reg_t)
FORCE_INLINE unsigned LZ4_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *pInLimit)
{
const BYTE *const pStart = pIn;
while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
if (!diff) {
pIn += STEPSIZE;
pMatch += STEPSIZE;
continue;
}
pIn += LZ4_NbCommonBytes(diff);
return (unsigned)(pIn - pStart);
}
if ((STEPSIZE == 8) && (pIn < (pInLimit - 3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
pIn += 4;
pMatch += 4;
}
if ((pIn < (pInLimit - 1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
pIn += 2;
pMatch += 2;
}
if ((pIn < pInLimit) && (*pMatch == *pIn))
pIn++;
return (unsigned)(pIn - pStart);
}
static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT - 1));
static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
typedef enum { byPtr, byU32, byU16 } tableType_t;
typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
typedef enum { full = 0, partial = 1 } earlyEnd_directive;
FORCE_INLINE int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
{
if (tableType == byU16)
return ((sequence * 2654435761U) >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
else
return ((sequence * 2654435761U) >> ((MINMATCH * 8) - LZ4_HASHLOG));
}
FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
{
static const U64 prime5bytes = 889523592379ULL;
static const U64 prime8bytes = 11400714785074694791ULL;
const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG + 1 : LZ4_HASHLOG;
if (LZ4_isLittleEndian())
return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
else
return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
}
FORCE_INLINE U32 LZ4_hashPosition(const void *const p, tableType_t const tableType)
{
if ((sizeof(reg_t) == 8) && (tableType != byU16))
return LZ4_hash5(LZ4_read_ARCH(p), tableType);
return LZ4_hash4(LZ4_read32(p), tableType);
}
FORCE_INLINE void LZ4_putPositionOnHash(const BYTE *p, U32 h, void *tableBase, tableType_t const tableType, const BYTE *srcBase)
{
switch (tableType) {
case byPtr: {
const BYTE **hashTable = (const BYTE **)tableBase;
hashTable[h] = p;
return;
}
case byU32: {
U32 *hashTable = (U32 *)tableBase;
hashTable[h] = (U32)(p - srcBase);
return;
}
case byU16: {
U16 *hashTable = (U16 *)tableBase;
hashTable[h] = (U16)(p - srcBase);
return;
}
}
}
FORCE_INLINE void LZ4_putPosition(const BYTE *p, void *tableBase, tableType_t tableType, const BYTE *srcBase)
{
U32 const h = LZ4_hashPosition(p, tableType);
LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
}
FORCE_INLINE const BYTE *LZ4_getPositionOnHash(U32 h, void *tableBase, tableType_t tableType, const BYTE *srcBase)
{
if (tableType == byPtr) {
const BYTE **hashTable = (const BYTE **)tableBase;
return hashTable[h];
}
if (tableType == byU32) {
const U32 *const hashTable = (U32 *)tableBase;
return hashTable[h] + srcBase;
}
{
const U16 *const hashTable = (U16 *)tableBase;
return hashTable[h] + srcBase;
} /* default, to ensure a return */
}
FORCE_INLINE const BYTE *LZ4_getPosition(const BYTE *p, void *tableBase, tableType_t tableType, const BYTE *srcBase)
{
U32 const h = LZ4_hashPosition(p, tableType);
return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
}
FORCE_INLINE int LZ4_compress_generic(LZ4_stream_t_internal *const cctx, const char *const source, char *const dest, const int inputSize, const int maxOutputSize, const limitedOutput_directive outputLimited, const tableType_t tableType, const dict_directive dict, const dictIssue_directive dictIssue, const U32 acceleration)
{
const BYTE *ip = (const BYTE *)source;
const BYTE *base;
const BYTE *lowLimit;
const BYTE *const lowRefLimit = ip - cctx->dictSize;
const BYTE *const dictionary = cctx->dictionary;
const BYTE *const dictEnd = dictionary + cctx->dictSize;
const ptrdiff_t dictDelta = dictEnd - (const BYTE *)source;
const BYTE *anchor = (const BYTE *)source;
const BYTE *const iend = ip + inputSize;
const BYTE *const mflimit = iend - MFLIMIT;
const BYTE *const matchlimit = iend - LASTLITERALS;
BYTE *op = (BYTE *)dest;
BYTE *const olimit = op + maxOutputSize;
U32 forwardH;
/* Init conditions */
if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE)
return 0; /* Unsupported inputSize, too large (or negative) */
switch (dict) {
case noDict:
default:
base = (const BYTE *)source;
lowLimit = (const BYTE *)source;
break;
case withPrefix64k:
base = (const BYTE *)source - cctx->currentOffset;
lowLimit = (const BYTE *)source - cctx->dictSize;
break;
case usingExtDict:
base = (const BYTE *)source - cctx->currentOffset;
lowLimit = (const BYTE *)source;
break;
}
if ((tableType == byU16) && (inputSize >= LZ4_64Klimit))
return 0; /* Size too large (not within 64K limit) */
if (inputSize < LZ4_minLength)
goto _last_literals; /* Input too small, no compression (all literals) */
/* First Byte */
LZ4_putPosition(ip, cctx->hashTable, tableType, base);
ip++;
forwardH = LZ4_hashPosition(ip, tableType);
/* Main Loop */
for (;;) {
ptrdiff_t refDelta = 0;
const BYTE *match;
BYTE *token;
/* Find a match */
{
const BYTE *forwardIp = ip;
unsigned step = 1;
unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
do {
U32 const h = forwardH;
ip = forwardIp;
forwardIp += step;
step = (searchMatchNb++ >> LZ4_skipTrigger);
if (unlikely(forwardIp > mflimit))
goto _last_literals;
match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
if (dict == usingExtDict) {
if (match < (const BYTE *)source) {
refDelta = dictDelta;
lowLimit = dictionary;
}
else {
refDelta = 0;
lowLimit = (const BYTE *)source;
}
}
forwardH = LZ4_hashPosition(forwardIp, tableType);
LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
} while (((dictIssue == dictSmall) ? (match < lowRefLimit) : 0) || ((tableType == byU16) ? 0 : (match + MAX_DISTANCE < ip)) || (LZ4_read32(match + refDelta) != LZ4_read32(ip)));
}
/* Catch up */
while (((ip > anchor) & (match + refDelta > lowLimit)) && (unlikely(ip[-1] == match[refDelta - 1]))) {
ip--;
match--;
}
/* Encode Literals */
{
unsigned const litLength = (unsigned)(ip - anchor);
token = op++;
if ((outputLimited) && /* Check output buffer overflow */
(unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength / 255) > olimit)))
return 0;
if (litLength >= RUN_MASK) {
int len = (int)litLength - RUN_MASK;
*token = (RUN_MASK << ML_BITS);
for (; len >= 255; len -= 255)
*op++ = 255;
*op++ = (BYTE)len;
}
else
*token = (BYTE)(litLength << ML_BITS);
/* Copy Literals */
LZ4_wildCopy(op, anchor, op + litLength);
op += litLength;
}
_next_match:
/* Encode Offset */
LZ4_writeLE16(op, (U16)(ip - match));
op += 2;
/* Encode MatchLength */
{
unsigned matchCode;
if ((dict == usingExtDict) && (lowLimit == dictionary)) {
const BYTE *limit;
match += refDelta;
limit = ip + (dictEnd - match);
if (limit > matchlimit)
limit = matchlimit;
matchCode = LZ4_count(ip + MINMATCH, match + MINMATCH, limit);
ip += MINMATCH + matchCode;
if (ip == limit) {
unsigned const more = LZ4_count(ip, (const BYTE *)source, matchlimit);
matchCode += more;
ip += more;
}
}
else {
matchCode = LZ4_count(ip + MINMATCH, match + MINMATCH, matchlimit);
ip += MINMATCH + matchCode;
}
if (outputLimited && /* Check output buffer overflow */
(unlikely(op + (1 + LASTLITERALS) + (matchCode >> 8) > olimit)))
return 0;
if (matchCode >= ML_MASK) {
*token += ML_MASK;
matchCode -= ML_MASK;
LZ4_write32(op, 0xFFFFFFFF);
while (matchCode >= 4 * 255)
op += 4, LZ4_write32(op, 0xFFFFFFFF), matchCode -= 4 * 255;
op += matchCode / 255;
*op++ = (BYTE)(matchCode % 255);
}
else
*token += (BYTE)(matchCode);
}
anchor = ip;
/* Test end of chunk */
if (ip > mflimit)
break;
/* Fill table */
LZ4_putPosition(ip - 2, cctx->hashTable, tableType, base);
/* Test next position */
match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
if (dict == usingExtDict) {
if (match < (const BYTE *)source) {
refDelta = dictDelta;
lowLimit = dictionary;
}
else {
refDelta = 0;
lowLimit = (const BYTE *)source;
}
}
LZ4_putPosition(ip, cctx->hashTable, tableType, base);
if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1) && (match + MAX_DISTANCE >= ip) && (LZ4_read32(match + refDelta) == LZ4_read32(ip))) {
token = op++;
*token = 0;
goto _next_match;
}
/* Prepare next loop */
forwardH = LZ4_hashPosition(++ip, tableType);
}
_last_literals:
/* Encode Last Literals */
{
size_t const lastRun = (size_t)(iend - anchor);
if ((outputLimited) && /* Check output buffer overflow */
((op - (BYTE *)dest) + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize))
return 0;
if (lastRun >= RUN_MASK) {
size_t accumulator = lastRun - RUN_MASK;
*op++ = RUN_MASK << ML_BITS;
for (; accumulator >= 255; accumulator -= 255)
*op++ = 255;
*op++ = (BYTE)accumulator;
}
else {
*op++ = (BYTE)(lastRun << ML_BITS);
}
Utils::copy(op, anchor, lastRun);
op += lastRun;
}
/* End */
return (int)(((char *)op) - dest);
}
ZT_INLINE int LZ4_compress_fast_extState(void *state, const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
{
LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse;
LZ4_resetStream((LZ4_stream_t *)state);
// if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
if (maxOutputSize >= LZ4_compressBound(inputSize)) {
if (inputSize < LZ4_64Klimit)
return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
else
return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, (sizeof(void *) == 8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
}
else {
if (inputSize < LZ4_64Klimit)
return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
else
return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void *) == 8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
}
}
FORCE_INLINE void LZ4_resetStream(LZ4_stream_t *LZ4_stream) { Utils::zero<sizeof(LZ4_stream_t)>(LZ4_stream); }
FORCE_INLINE int LZ4_decompress_generic(
const char *const source, char *const dest, int inputSize, int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
int endOnInput, /* endOnOutputSize, endOnInputSize */
int partialDecoding, /* full, partial */
int targetOutputSize, /* only used if partialDecoding==partial */
int dict, /* noDict, withPrefix64k, usingExtDict */
const BYTE *const lowPrefix, /* == dest when no prefix */
const BYTE *const dictStart, /* only if dict==usingExtDict */
const size_t dictSize /* note : = 0 if noDict */
)
{
/* Local Variables */
const BYTE *ip = (const BYTE *)source;
const BYTE *const iend = ip + inputSize;
BYTE *op = (BYTE *)dest;
BYTE *const oend = op + outputSize;
BYTE *cpy;
BYTE *oexit = op + targetOutputSize;
const BYTE *const lowLimit = lowPrefix - dictSize;
const BYTE *const dictEnd = (const BYTE *)dictStart + dictSize;
const unsigned dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };
const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };
const int safeDecode = (endOnInput == endOnInputSize);
const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
/* Special cases */
if ((partialDecoding) && (oexit > oend - MFLIMIT))
oexit = oend - MFLIMIT; /* targetOutputSize too high => decode everything */
if ((endOnInput) && (unlikely(outputSize == 0)))
return ((inputSize == 1) && (*ip == 0)) ? 0 : -1; /* Empty output buffer */
if ((!endOnInput) && (unlikely(outputSize == 0)))
return (*ip == 0 ? 1 : -1);
/* Main Loop : decode sequences */
while (1) {
size_t length;
const BYTE *match;
size_t offset;
/* get literal length */
unsigned const token = *ip++;
if ((length = (token >> ML_BITS)) == RUN_MASK) {
unsigned s;
do {
s = *ip++;
length += s;
} while (likely(endOnInput ? ip < iend - RUN_MASK : 1) & (s == 255));
if ((safeDecode) && unlikely((uptrval)(op) + length < (uptrval)(op)))
goto _output_error; /* overflow detection */
if ((safeDecode) && unlikely((uptrval)(ip) + length < (uptrval)(ip)))
goto _output_error; /* overflow detection */
}
/* copy literals */
cpy = op + length;
if (((endOnInput) && ((cpy > (partialDecoding ? oexit : oend - MFLIMIT)) || (ip + length > iend - (2 + 1 + LASTLITERALS)))) || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
if (partialDecoding) {
if (cpy > oend)
goto _output_error; /* Error : write attempt beyond end of output buffer */
if ((endOnInput) && (ip + length > iend))
goto _output_error; /* Error : read attempt beyond end of input buffer */
}
else {
if ((!endOnInput) && (cpy != oend))
goto _output_error; /* Error : block decoding must stop exactly there */
if ((endOnInput) && ((ip + length != iend) || (cpy > oend)))
goto _output_error; /* Error : input must be consumed */
}
Utils::copy(op, ip, length);
ip += length;
op += length;
break; /* Necessarily EOF, due to parsing restrictions */
}
LZ4_wildCopy(op, ip, cpy);
ip += length;
op = cpy;
/* get offset */
offset = LZ4_readLE16(ip);
ip += 2;
match = op - offset;
if ((checkOffset) && (unlikely(match < lowLimit)))
goto _output_error; /* Error : offset outside buffers */
LZ4_write32(op, (U32)offset); /* costs ~1%; silence an msan warning when offset==0 */
/* get matchlength */
length = token & ML_MASK;
if (length == ML_MASK) {
unsigned s;
do {
s = *ip++;
if ((endOnInput) && (ip > iend - LASTLITERALS))
goto _output_error;
length += s;
} while (s == 255);
if ((safeDecode) && unlikely((uptrval)(op) + length < (uptrval)op))
goto _output_error; /* overflow detection */
}
length += MINMATCH;
/* check external dictionary */
if ((dict == usingExtDict) && (match < lowPrefix)) {
if (unlikely(op + length > oend - LASTLITERALS))
goto _output_error; /* doesn't respect parsing restriction */
if (length <= (size_t)(lowPrefix - match)) {
/* match can be copied as a single segment from external dictionary */
memmove(op, dictEnd - (lowPrefix - match), length);
op += length;
}
else {
/* match encompass external dictionary and current block */
size_t const copySize = (size_t)(lowPrefix - match);
size_t const restSize = length - copySize;
Utils::copy(op, dictEnd - copySize, copySize);
op += copySize;
if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
BYTE *const endOfMatch = op + restSize;
const BYTE *copyFrom = lowPrefix;
while (op < endOfMatch)
*op++ = *copyFrom++;
}
else {
Utils::copy(op, lowPrefix, restSize);
op += restSize;
}
}
continue;
}
/* copy match within block */
cpy = op + length;
if (unlikely(offset < 8)) {
const int dec64 = dec64table[offset];
op[0] = match[0];
op[1] = match[1];
op[2] = match[2];
op[3] = match[3];
match += dec32table[offset];
Utils::copy<4>(op + 4, match);
match -= dec64;
}
else {
LZ4_copy8(op, match);
match += 8;
}
op += 8;
if (unlikely(cpy > oend - 12)) {
BYTE *const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
if (cpy > oend - LASTLITERALS)
goto _output_error; /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
if (op < oCopyLimit) {
LZ4_wildCopy(op, match, oCopyLimit);
match += oCopyLimit - op;
op = oCopyLimit;
}
while (op < cpy)
*op++ = *match++;
}
else {
LZ4_copy8(op, match);
if (length > 16)
LZ4_wildCopy(op + 8, match + 8, cpy);
}
op = cpy; /* correction */
}
/* end of decoding */
if (endOnInput)
return (int)(((char *)op) - dest); /* Nb of output bytes decoded */
else
return (int)(((const char *)ip) - source); /* Nb of input bytes read */
/* Overflow error detected */
_output_error:
return (int)(-(((const char *)ip) - source)) - 1;
}
} // anonymous namespace
int LZ4_compress_fast(const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration) noexcept
{
#if (HEAPMODE)
void *ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
#else
LZ4_stream_t ctx;
void *const ctxPtr = &ctx;
#endif
int const result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
#if (HEAPMODE)
FREEMEM(ctxPtr);
#endif
return result;
}
int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, int maxDecompressedSize) noexcept { return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE *)dest, NULL, 0); }
} // namespace ZeroTier

View file

@ -1,29 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_LZ4_HPP
#define ZT_LZ4_HPP
#include "Constants.hpp"
namespace ZeroTier {
#define LZ4_MAX_INPUT_SIZE 0x7E000000
#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize) / 255) + 16)
int LZ4_compress_fast(const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration = 1) noexcept;
int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, int maxDecompressedSize) noexcept;
} // namespace ZeroTier
#endif

View file

@ -1,206 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "Locator.hpp"
#include "Identity.hpp"
#include <algorithm>
namespace ZeroTier {
const SharedPtr<const Locator::EndpointAttributes> Locator::EndpointAttributes::DEFAULT(new Locator::EndpointAttributes());
Locator::Locator(const char *const str) noexcept : __refCount(0)
{
if (!fromString(str)) {
m_revision = 0;
m_signer.zero();
m_endpoints.clear();
m_signature.clear();
}
}
bool Locator::add(const Endpoint &ep, const SharedPtr<const EndpointAttributes> &a)
{
for (Vector<std::pair<Endpoint, SharedPtr<const EndpointAttributes>>>::iterator i(m_endpoints.begin()); i != m_endpoints.end(); ++i) {
if (i->first == ep) {
i->second = ((a) && (a->data[0] != 0)) ? a : EndpointAttributes::DEFAULT;
return true;
}
}
if (m_endpoints.size() < ZT_LOCATOR_MAX_ENDPOINTS) {
m_endpoints.push_back(std::pair<Endpoint, SharedPtr<const EndpointAttributes>>(ep, ((a) && (a->data[0] != 0)) ? a : EndpointAttributes::DEFAULT));
return true;
}
return false;
}
bool Locator::sign(const int64_t rev, const Identity &id) noexcept
{
m_revision = rev;
m_signer = id.address();
m_sortEndpoints();
uint8_t signdata[ZT_LOCATOR_MARSHAL_SIZE_MAX];
const unsigned int signlen = marshal(signdata, true);
const unsigned int siglen = id.sign(signdata, signlen, m_signature.data(), m_signature.capacity());
if (siglen == 0)
return false;
m_signature.unsafeSetSize(siglen);
return true;
}
bool Locator::verify(const Identity &id) const noexcept
{
try {
if ((m_revision > 0) && (m_signer == id.address())) {
uint8_t signdata[ZT_LOCATOR_MARSHAL_SIZE_MAX];
const unsigned int signlen = marshal(signdata, true);
return id.verify(signdata, signlen, m_signature.data(), m_signature.size());
}
}
catch (...) {
} // fail verify on any unexpected exception
return false;
}
char *Locator::toString(char s[ZT_LOCATOR_STRING_SIZE_MAX]) const noexcept
{
static_assert(ZT_LOCATOR_STRING_SIZE_MAX > ((((ZT_LOCATOR_MARSHAL_SIZE_MAX / 5) + 1) * 8) + ZT_ADDRESS_LENGTH_HEX + 1), "overflow");
uint8_t bin[ZT_LOCATOR_MARSHAL_SIZE_MAX];
m_signer.toString(s);
s[ZT_ADDRESS_LENGTH_HEX] = '@';
Utils::b32e(bin, marshal(bin, false), s + (ZT_ADDRESS_LENGTH_HEX + 1), ZT_LOCATOR_STRING_SIZE_MAX - (ZT_ADDRESS_LENGTH_HEX + 1));
return s;
}
bool Locator::fromString(const char *s) noexcept
{
if (!s)
return false;
if (strlen(s) < (ZT_ADDRESS_LENGTH_HEX + 1))
return false;
uint8_t bin[ZT_LOCATOR_MARSHAL_SIZE_MAX];
const int bl = Utils::b32d(s + (ZT_ADDRESS_LENGTH_HEX + 1), bin, ZT_LOCATOR_MARSHAL_SIZE_MAX);
if ((bl <= 0) || (bl > ZT_LOCATOR_MARSHAL_SIZE_MAX))
return false;
return unmarshal(bin, bl) > 0;
}
int Locator::marshal(uint8_t data[ZT_LOCATOR_MARSHAL_SIZE_MAX], const bool excludeSignature) const noexcept
{
Utils::storeBigEndian<uint64_t>(data, (uint64_t)m_revision);
m_signer.copyTo(data + 8);
int p = 8 + ZT_ADDRESS_LENGTH;
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_endpoints.size());
p += 2;
for (Vector<std::pair<Endpoint, SharedPtr<const EndpointAttributes>>>::const_iterator e(m_endpoints.begin()); e != m_endpoints.end(); ++e) {
int l = e->first.marshal(data + p);
if (l <= 0)
return -1;
p += l;
l = (int)e->second->data[0];
if (l > 0) {
Utils::copy(data + p, e->second->data, (unsigned int)l);
p += l;
}
else {
data[p++] = 0;
}
}
Utils::storeMachineEndian<uint16_t>(data + p, 0); // length of meta-data, currently always 0
p += 2;
if (!excludeSignature) {
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_signature.size());
p += 2;
Utils::copy(data + p, m_signature.data(), m_signature.size());
p += (int)m_signature.size();
}
return p;
}
int Locator::unmarshal(const uint8_t *data, const int len) noexcept
{
if (unlikely(len < (8 + ZT_ADDRESS_LENGTH)))
return -1;
m_revision = (int64_t)Utils::loadBigEndian<uint64_t>(data);
m_signer.setTo(data + 8);
int p = 8 + ZT_ADDRESS_LENGTH;
if (unlikely(p + 2) > len)
return -1;
unsigned int endpointCount = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
if (unlikely(endpointCount > ZT_LOCATOR_MAX_ENDPOINTS))
return -1;
m_endpoints.resize(endpointCount);
m_endpoints.shrink_to_fit();
for (unsigned int i = 0; i < endpointCount; ++i) {
int l = m_endpoints[i].first.unmarshal(data + p, len - p);
if (l <= 0)
return -1;
p += l;
if (unlikely(p + 1) > len)
return -1;
l = (int)data[p];
if (l <= 0) {
m_endpoints[i].second = EndpointAttributes::DEFAULT;
++p;
}
else {
m_endpoints[i].second.set(new EndpointAttributes());
Utils::copy(const_cast<uint8_t *>(m_endpoints[i].second->data), data + p, (unsigned int)l);
p += l;
}
}
if (unlikely((p + 2) > len))
return -1;
p += 2 + (int)Utils::loadBigEndian<uint16_t>(data + p);
if (unlikely((p + 2) > len))
return -1;
const unsigned int siglen = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
if (unlikely((siglen > ZT_SIGNATURE_BUFFER_SIZE) || ((p + (int)siglen) > len)))
return -1;
m_signature.unsafeSetSize(siglen);
Utils::copy(m_signature.data(), data + p, siglen);
p += (int)siglen;
if (unlikely(p > len))
return -1;
m_sortEndpoints();
return p;
}
struct p_SortByEndpoint {
// There can't be more than one of the same endpoint, so only need to sort
// by endpoint.
ZT_INLINE bool operator()(const std::pair<Endpoint, SharedPtr<const Locator::EndpointAttributes>> &a, const std::pair<Endpoint, SharedPtr<const Locator::EndpointAttributes>> &b) const noexcept { return a.first < b.first; }
};
void Locator::m_sortEndpoints() noexcept { std::sort(m_endpoints.begin(), m_endpoints.end(), p_SortByEndpoint()); }
} // namespace ZeroTier

View file

@ -1,223 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_LOCATOR_HPP
#define ZT_LOCATOR_HPP
#include "Constants.hpp"
#include "Containers.hpp"
#include "Dictionary.hpp"
#include "Endpoint.hpp"
#include "FCV.hpp"
#include "Identity.hpp"
#include "SharedPtr.hpp"
#include "TriviallyCopyable.hpp"
/**
* Maximum size of endpoint attributes dictionary plus one byte for size.
*
* This cannot be (easily) changed.
*/
#define ZT_LOCATOR_MAX_ENDPOINT_ATTRIBUTES_SIZE 256
/**
* Maximum number of endpoints, which can be increased.
*/
#define ZT_LOCATOR_MAX_ENDPOINTS 16
#define ZT_LOCATOR_MARSHAL_SIZE_MAX (8 + ZT_ADDRESS_LENGTH + 2 + (ZT_LOCATOR_MAX_ENDPOINTS * (ZT_ENDPOINT_MARSHAL_SIZE_MAX + ZT_LOCATOR_MAX_ENDPOINT_ATTRIBUTES_SIZE)) + 2 + 2 + ZT_SIGNATURE_BUFFER_SIZE)
/**
* Maximum size of a string format Locator (this is way larger than needed)
*/
#define ZT_LOCATOR_STRING_SIZE_MAX 16384
namespace ZeroTier {
/**
* Signed information about a node's location on the network
*
* A locator contains long-lived endpoints for a node such as IP/port pairs,
* URLs, or other nodes, and is signed by the node it describes.
*/
class Locator {
friend class SharedPtr<Locator>;
friend class SharedPtr<const Locator>;
public:
/**
* Attributes of an endpoint in this locator
*
* This is specified for future use, but there are currently no attributes
* defined. A Dictionary is used for serialization for extensibility.
*/
struct EndpointAttributes {
friend class SharedPtr<Locator::EndpointAttributes>;
friend class SharedPtr<const Locator::EndpointAttributes>;
/**
* Default endpoint attributes
*/
static const SharedPtr<const Locator::EndpointAttributes> DEFAULT;
/**
* Raw attributes data in the form of a dictionary prefixed by its size.
*
* The maximum size of attributes is 255, which is more than enough for
* tiny things like bandwidth and priority.
*/
uint8_t data[ZT_LOCATOR_MAX_ENDPOINT_ATTRIBUTES_SIZE];
ZT_INLINE EndpointAttributes() noexcept { Utils::zero<ZT_LOCATOR_MAX_ENDPOINT_ATTRIBUTES_SIZE>(data); }
ZT_INLINE bool operator==(const EndpointAttributes &a) const noexcept { return ((data[0] == a.data[0]) && (memcmp(data, a.data, data[0]) == 0)); }
ZT_INLINE bool operator<(const EndpointAttributes &a) const noexcept { return ((data[0] < a.data[0]) || ((data[0] == a.data[0]) && (memcmp(data, a.data, data[0]) < 0))); }
ZT_INLINE bool operator!=(const EndpointAttributes &a) const noexcept { return !(*this == a); }
ZT_INLINE bool operator>(const EndpointAttributes &a) const noexcept { return (a < *this); }
ZT_INLINE bool operator<=(const EndpointAttributes &a) const noexcept { return !(a < *this); }
ZT_INLINE bool operator>=(const EndpointAttributes &a) const noexcept { return !(*this < a); }
private:
std::atomic<int> __refCount;
};
ZT_INLINE Locator() noexcept : m_revision(0) {}
ZT_INLINE Locator(const Locator &l) noexcept : m_revision(l.m_revision), m_signer(l.m_signer), m_endpoints(l.m_endpoints), m_signature(l.m_signature), __refCount(0) {}
explicit Locator(const char *const str) noexcept;
static ZT_INLINE Locator *from(ZT_Locator *const loc) noexcept { return reinterpret_cast<Locator *>(loc); }
static ZT_INLINE const Locator *from(const ZT_Locator *const loc) noexcept { return reinterpret_cast<const Locator *>(loc); }
/**
* @return Timestamp (a.k.a. revision number) set by Location signer
*/
ZT_INLINE int64_t revision() const noexcept { return m_revision; }
/**
* @return ZeroTier address of signer
*/
ZT_INLINE Address signer() const noexcept { return m_signer; }
/**
* @return Endpoints specified in locator
*/
ZT_INLINE const Vector<std::pair<Endpoint, SharedPtr<const EndpointAttributes>>> &endpoints() const noexcept { return m_endpoints; }
/**
* @return Signature data
*/
ZT_INLINE const FCV<uint8_t, ZT_SIGNATURE_BUFFER_SIZE> &signature() const noexcept { return m_signature; }
/**
* Add an endpoint to this locator
*
* This doesn't check for the presence of the endpoint, so take
* care not to add duplicates.
*
* @param ep Endpoint to add
* @param a Endpoint attributes or NULL to use default
* @return True if endpoint was added (or already present), false if locator is full
*/
bool add(const Endpoint &ep, const SharedPtr<const EndpointAttributes> &a);
/**
* Sign this locator
*
* This sets timestamp, sorts endpoints so that the same set of endpoints
* will always produce the same locator, and signs.
*
* @param id Identity that includes private key
* @return True if signature successful
*/
bool sign(int64_t rev, const Identity &id) noexcept;
/**
* Verify this Locator's validity and signature
*
* @param id Identity corresponding to hash
* @return True if valid and signature checks out
*/
bool verify(const Identity &id) const noexcept;
/**
* Convert this locator to a string
*
* @param s String buffer
* @return Pointer to buffer
*/
char *toString(char s[ZT_LOCATOR_STRING_SIZE_MAX]) const noexcept;
ZT_INLINE String toString() const
{
char tmp[ZT_LOCATOR_STRING_SIZE_MAX];
return String(toString(tmp));
}
/**
* Decode a string format locator
*
* @param s Locator from toString()
* @return True if format was valid
*/
bool fromString(const char *s) noexcept;
explicit ZT_INLINE operator bool() const noexcept { return m_revision > 0; }
static constexpr int marshalSizeMax() noexcept { return ZT_LOCATOR_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_LOCATOR_MARSHAL_SIZE_MAX], bool excludeSignature = false) const noexcept;
int unmarshal(const uint8_t *data, int len) noexcept;
ZT_INLINE bool operator==(const Locator &l) const noexcept
{
const unsigned long es = (unsigned long)m_endpoints.size();
if ((m_revision == l.m_revision) && (m_signer == l.m_signer) && (es == (unsigned long)l.m_endpoints.size()) && (m_signature == l.m_signature)) {
for (unsigned long i = 0; i < es; ++i) {
if (m_endpoints[i].first != l.m_endpoints[i].first)
return false;
if (!m_endpoints[i].second) {
if (l.m_endpoints[i].second)
return false;
}
else {
if ((!l.m_endpoints[i].second) || (*(m_endpoints[i].second) != *(l.m_endpoints[i].second)))
return false;
}
}
return true;
}
return false;
}
ZT_INLINE bool operator!=(const Locator &l) const noexcept { return !(*this == l); }
private:
void m_sortEndpoints() noexcept;
int64_t m_revision;
Address m_signer;
Vector<std::pair<Endpoint, SharedPtr<const EndpointAttributes>>> m_endpoints;
FCV<uint8_t, ZT_SIGNATURE_BUFFER_SIZE> m_signature;
std::atomic<int> __refCount;
};
} // namespace ZeroTier
#endif

View file

@ -1,244 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_MAC_HPP
#define ZT_MAC_HPP
#include "Address.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "TriviallyCopyable.hpp"
#include "Utils.hpp"
namespace ZeroTier {
/**
* 48-byte Ethernet MAC address
*/
class MAC : public TriviallyCopyable {
public:
ZT_INLINE MAC() noexcept : m_mac(0ULL) {}
ZT_INLINE
MAC(const uint8_t a, const uint8_t b, const uint8_t c, const uint8_t d, const uint8_t e, const uint8_t f) noexcept : m_mac((((uint64_t)a) << 40U) | (((uint64_t)b) << 32U) | (((uint64_t)c) << 24U) | (((uint64_t)d) << 16U) | (((uint64_t)e) << 8U) | ((uint64_t)f)) {}
explicit ZT_INLINE MAC(const uint64_t m) noexcept : m_mac(m) {}
explicit ZT_INLINE MAC(const uint8_t b[6]) noexcept { setTo(b); }
ZT_INLINE MAC(const Address &ztaddr, const uint64_t nwid) noexcept { fromAddress(ztaddr, nwid); }
/**
* @return MAC in 64-bit integer
*/
ZT_INLINE uint64_t toInt() const noexcept { return m_mac; }
/**
* Set MAC to zero
*/
ZT_INLINE void zero() noexcept { m_mac = 0ULL; }
/**
* @param bits Raw MAC in big-endian byte order
* @param len Length, must be >= 6 or result is zero
*/
ZT_INLINE void setTo(const uint8_t b[6]) noexcept { m_mac = ((uint64_t)b[0] << 40U) | ((uint64_t)b[1] << 32U) | ((uint64_t)b[2] << 24U) | ((uint64_t)b[3] << 16U) | ((uint64_t)b[4] << 8U) | (uint64_t)b[5]; }
/**
* @param buf Destination buffer for MAC in big-endian byte order
* @param len Length of buffer, must be >= 6 or nothing is copied
*/
ZT_INLINE void copyTo(uint8_t b[6]) const noexcept
{
b[0] = (uint8_t)(m_mac >> 40U);
b[1] = (uint8_t)(m_mac >> 32U);
b[2] = (uint8_t)(m_mac >> 24U);
b[3] = (uint8_t)(m_mac >> 16U);
b[4] = (uint8_t)(m_mac >> 8U);
b[5] = (uint8_t)m_mac;
}
/**
* @return True if this is broadcast (all 0xff)
*/
ZT_INLINE bool isBroadcast() const noexcept { return m_mac; }
/**
* @return True if this is a multicast MAC
*/
ZT_INLINE bool isMulticast() const noexcept { return ((m_mac & 0x010000000000ULL) != 0ULL); }
/**
* Set this MAC to a MAC derived from an address and a network ID
*
* @param ztaddr ZeroTier address
* @param nwid 64-bit network ID
*/
ZT_INLINE void fromAddress(const Address &ztaddr, uint64_t nwid) noexcept
{
uint64_t m = ((uint64_t)firstOctetForNetwork(nwid)) << 40U;
m |= ztaddr.toInt(); // a is 40 bits
m ^= ((nwid >> 8U) & 0xffU) << 32U;
m ^= ((nwid >> 16U) & 0xffU) << 24U;
m ^= ((nwid >> 24U) & 0xffU) << 16U;
m ^= ((nwid >> 32U) & 0xffU) << 8U;
m ^= (nwid >> 40U) & 0xffU;
m_mac = m;
}
/**
* Get the ZeroTier address for this MAC on this network (assuming no bridging of course, basic unicast)
*
* This just XORs the next-lest-significant 5 bytes of the network ID again to unmask.
*
* @param nwid Network ID
*/
ZT_INLINE Address toAddress(uint64_t nwid) const noexcept
{
uint64_t a = m_mac & 0xffffffffffULL; // least significant 40 bits of MAC are formed from address
a ^= ((nwid >> 8U) & 0xffU) << 32U; // ... XORed with bits 8-48 of the nwid in little-endian byte order, so unmask it
a ^= ((nwid >> 16U) & 0xffU) << 24U;
a ^= ((nwid >> 24U) & 0xffU) << 16U;
a ^= ((nwid >> 32U) & 0xffU) << 8U;
a ^= (nwid >> 40U) & 0xffU;
return Address(a);
}
/**
* @param nwid Network ID
* @return First octet of MAC for this network
*/
static ZT_INLINE unsigned char firstOctetForNetwork(uint64_t nwid) noexcept
{
const uint8_t a = ((uint8_t)(nwid & 0xfeU) | 0x02U); // locally administered, not multicast, from LSB of network ID
return ((a == 0x52) ? 0x32 : a); // blacklist 0x52 since it's used by KVM, libvirt, and other popular
// virtualization engines... seems de-facto standard on Linux
}
/**
* @param i Value from 0 to 5 (inclusive)
* @return Byte at said position (address interpreted in big-endian order)
*/
ZT_INLINE uint8_t operator[](unsigned int i) const noexcept { return (uint8_t)(m_mac >> (unsigned int)(40 - (i * 8))); }
/**
* @return 6, which is the number of bytes in a MAC, for container compliance
*/
ZT_INLINE unsigned int size() const noexcept { return 6; }
ZT_INLINE unsigned long hashCode() const noexcept { return (unsigned long)Utils::hash64(m_mac); }
ZT_INLINE operator bool() const noexcept { return (m_mac != 0ULL); }
ZT_INLINE operator uint64_t() const noexcept { return m_mac; }
/**
* Convert this MAC to a standard format colon-separated hex string
*
* @param buf Buffer to store string
* @return Pointer to buf
*/
ZT_INLINE char *toString(char buf[18]) const noexcept
{
buf[0] = Utils::HEXCHARS[(m_mac >> 44U) & 0xfU];
buf[1] = Utils::HEXCHARS[(m_mac >> 40U) & 0xfU];
buf[2] = ':';
buf[3] = Utils::HEXCHARS[(m_mac >> 36U) & 0xfU];
buf[4] = Utils::HEXCHARS[(m_mac >> 32U) & 0xfU];
buf[5] = ':';
buf[6] = Utils::HEXCHARS[(m_mac >> 28U) & 0xfU];
buf[7] = Utils::HEXCHARS[(m_mac >> 24U) & 0xfU];
buf[8] = ':';
buf[9] = Utils::HEXCHARS[(m_mac >> 20U) & 0xfU];
buf[10] = Utils::HEXCHARS[(m_mac >> 16U) & 0xfU];
buf[11] = ':';
buf[12] = Utils::HEXCHARS[(m_mac >> 12U) & 0xfU];
buf[13] = Utils::HEXCHARS[(m_mac >> 8U) & 0xfU];
buf[14] = ':';
buf[15] = Utils::HEXCHARS[(m_mac >> 4U) & 0xfU];
buf[16] = Utils::HEXCHARS[m_mac & 0xfU];
buf[17] = (char)0;
return buf;
}
ZT_INLINE String toString() const
{
char tmp[18];
return String(toString(tmp));
}
/**
* Parse a MAC address in hex format with or without : separators and ignoring non-hex characters.
*
* @param s String to parse
*/
ZT_INLINE void fromString(const char *s) noexcept
{
m_mac = 0;
if (s) {
while (*s) {
uint64_t c;
const char hc = *s++;
if ((hc >= 48) && (hc <= 57))
c = (uint64_t)hc - 48;
else if ((hc >= 97) && (hc <= 102))
c = (uint64_t)hc - 87;
else if ((hc >= 65) && (hc <= 70))
c = (uint64_t)hc - 55;
else
continue;
m_mac = (m_mac << 4U) | c;
}
m_mac &= 0xffffffffffffULL;
}
}
ZT_INLINE MAC &operator=(const uint64_t m) noexcept
{
m_mac = m;
return *this;
}
ZT_INLINE bool operator==(const MAC &m) const noexcept { return (m_mac == m.m_mac); }
ZT_INLINE bool operator!=(const MAC &m) const noexcept { return (m_mac != m.m_mac); }
ZT_INLINE bool operator<(const MAC &m) const noexcept { return (m_mac < m.m_mac); }
ZT_INLINE bool operator<=(const MAC &m) const noexcept { return (m_mac <= m.m_mac); }
ZT_INLINE bool operator>(const MAC &m) const noexcept { return (m_mac > m.m_mac); }
ZT_INLINE bool operator>=(const MAC &m) const noexcept { return (m_mac >= m.m_mac); }
ZT_INLINE bool operator==(const uint64_t m) const noexcept { return (m_mac == m); }
ZT_INLINE bool operator!=(const uint64_t m) const noexcept { return (m_mac != m); }
ZT_INLINE bool operator<(const uint64_t m) const noexcept { return (m_mac < m); }
ZT_INLINE bool operator<=(const uint64_t m) const noexcept { return (m_mac <= m); }
ZT_INLINE bool operator>(const uint64_t m) const noexcept { return (m_mac > m); }
ZT_INLINE bool operator>=(const uint64_t m) const noexcept { return (m_mac >= m); }
private:
uint64_t m_mac;
};
static_assert(sizeof(MAC) == sizeof(uint64_t), "MAC contains unnecessary padding");
} // namespace ZeroTier
#endif

View file

@ -1,137 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "MIMC52.hpp"
#include "AES.hpp"
#include "Constants.hpp"
namespace {
// Largest 1024 primes of form 6k + 5 and less than 2^52. Only the least significant 32
// bits need to be here, as the most significant bits are all 1.
const uint32_t ZT_MIMC52_PRIMES[1024] = {
4294895267, 4294895477, 4294895513, 4294895519, 4294895543, 4294895567, 4294895657, 4294895711, 4294895777, 4294895861, 4294895909, 4294895921, 4294895969, 4294896011, 4294896149, 4294896227, 4294896401, 4294896473, 4294896527, 4294896563, 4294896653, 4294896731, 4294896863, 4294896899, 4294896983, 4294897037, 4294897103, 4294897331, 4294897349,
4294897451, 4294897571, 4294897661, 4294897703, 4294897757, 4294897793, 4294897811, 4294897817, 4294897829, 4294897877, 4294897919, 4294897991, 4294898027, 4294898129, 4294898153, 4294898231, 4294898273, 4294898279, 4294898291, 4294898363, 4294898369, 4294898417, 4294898423, 4294898453, 4294898489, 4294898573, 4294898579, 4294898639, 4294898693,
4294898747, 4294898759, 4294898867, 4294898879, 4294898909, 4294898921, 4294898933, 4294899011, 4294899041, 4294899047, 4294899203, 4294899221, 4294899227, 4294899287, 4294899341, 4294899431, 4294899509, 4294899533, 4294899539, 4294899551, 4294899629, 4294899791, 4294899809, 4294899971, 4294900001, 4294900007, 4294900013, 4294900307, 4294900331,
4294900427, 4294900469, 4294900481, 4294900541, 4294900583, 4294900781, 4294900853, 4294900931, 4294900991, 4294901033, 4294901087, 4294901159, 4294901267, 4294901393, 4294901411, 4294901489, 4294901657, 4294902011, 4294902071, 4294902101, 4294902107, 4294902353, 4294902377, 4294902599, 4294902647, 4294902743, 4294902869, 4294902977, 4294903067,
4294903103, 4294903259, 4294903289, 4294903397, 4294903421, 4294903493, 4294903577, 4294903631, 4294903637, 4294903733, 4294903799, 4294903823, 4294904003, 4294904033, 4294904081, 4294904129, 4294904279, 4294904297, 4294904303, 4294904333, 4294904351, 4294904381, 4294904453, 4294904519, 4294904561, 4294904639, 4294904657, 4294904747, 4294904807,
4294904843, 4294905089, 4294905149, 4294905293, 4294905299, 4294905311, 4294905443, 4294905479, 4294905539, 4294905623, 4294905641, 4294905671, 4294905707, 4294905887, 4294905977, 4294906091, 4294906103, 4294906139, 4294906157, 4294906223, 4294906259, 4294906487, 4294906493, 4294906523, 4294906547, 4294906553, 4294906571, 4294906577, 4294906589,
4294906703, 4294906733, 4294906763, 4294906841, 4294906859, 4294906937, 4294907057, 4294907063, 4294907141, 4294907231, 4294907249, 4294907261, 4294907267, 4294907387, 4294907417, 4294907567, 4294907603, 4294907699, 4294907789, 4294907849, 4294907873, 4294907879, 4294908023, 4294908071, 4294908119, 4294908209, 4294908227, 4294908329, 4294908491,
4294908503, 4294908569, 4294908653, 4294908713, 4294908719, 4294908791, 4294908839, 4294908869, 4294908989, 4294909031, 4294909067, 4294909109, 4294909253, 4294909529, 4294909589, 4294909643, 4294909739, 4294909799, 4294909811, 4294909853, 4294910003, 4294910039, 4294910189, 4294910201, 4294910219, 4294910273, 4294910333, 4294910369, 4294910393,
4294910471, 4294910549, 4294910651, 4294910669, 4294910681, 4294910711, 4294910753, 4294910801, 4294910981, 4294911053, 4294911143, 4294911227, 4294911239, 4294911359, 4294911383, 4294911407, 4294911521, 4294911551, 4294911611, 4294911641, 4294911689, 4294911719, 4294911869, 4294912109, 4294912133, 4294912151, 4294912187, 4294912223, 4294912331,
4294912439, 4294912607, 4294912703, 4294912859, 4294912871, 4294912907, 4294912961, 4294913003, 4294913111, 4294913309, 4294913333, 4294913357, 4294913399, 4294913411, 4294913459, 4294913501, 4294913531, 4294913591, 4294913609, 4294913663, 4294913783, 4294913819, 4294913903, 4294914137, 4294914413, 4294914473, 4294914497, 4294914527, 4294914551,
4294914593, 4294914611, 4294914659, 4294914671, 4294914743, 4294914863, 4294914917, 4294915061, 4294915103, 4294915139, 4294915217, 4294915223, 4294915253, 4294915283, 4294915373, 4294915433, 4294915607, 4294916069, 4294916213, 4294916267, 4294916303, 4294916393, 4294916441, 4294916477, 4294916507, 4294916573, 4294916633, 4294916687, 4294916783,
4294916837, 4294916897, 4294916921, 4294917029, 4294917047, 4294917101, 4294917203, 4294917287, 4294917299, 4294917389, 4294917437, 4294917527, 4294917557, 4294917611, 4294917617, 4294917689, 4294917821, 4294917857, 4294917917, 4294917941, 4294918169, 4294918187, 4294918307, 4294918409, 4294918433, 4294918481, 4294918703, 4294918709, 4294918733,
4294918799, 4294918871, 4294919009, 4294919249, 4294919279, 4294919291, 4294919363, 4294919381, 4294919441, 4294919447, 4294919549, 4294919579, 4294919633, 4294919657, 4294919669, 4294919693, 4294919711, 4294920029, 4294920059, 4294920089, 4294920197, 4294920239, 4294920257, 4294920263, 4294920269, 4294920341, 4294920353, 4294920407, 4294920503,
4294920599, 4294920647, 4294920743, 4294920803, 4294920809, 4294920881, 4294920899, 4294920983, 4294921043, 4294921139, 4294921151, 4294921181, 4294921229, 4294921289, 4294921331, 4294921343, 4294921391, 4294921469, 4294921709, 4294921721, 4294921823, 4294921847, 4294921889, 4294922057, 4294922171, 4294922201, 4294922237, 4294922309, 4294922399,
4294922447, 4294922507, 4294922513, 4294922549, 4294922609, 4294922663, 4294922861, 4294922933, 4294923101, 4294923191, 4294923209, 4294923221, 4294923251, 4294923263, 4294923359, 4294923371, 4294923377, 4294923461, 4294923521, 4294923953, 4294924001, 4294924091, 4294924121, 4294924319, 4294924397, 4294924571, 4294924583, 4294924751, 4294924817,
4294924823, 4294924847, 4294924877, 4294925003, 4294925027, 4294925117, 4294925237, 4294925243, 4294925297, 4294925369, 4294925627, 4294925639, 4294925729, 4294925747, 4294925873, 4294925891, 4294925933, 4294926047, 4294926059, 4294926209, 4294926221, 4294926233, 4294926257, 4294926329, 4294926371, 4294926401, 4294926413, 4294926437, 4294926563,
4294926569, 4294926917, 4294926923, 4294926947, 4294926971, 4294927067, 4294927073, 4294927151, 4294927349, 4294927367, 4294927403, 4294927481, 4294927523, 4294927553, 4294927589, 4294927649, 4294927673, 4294927727, 4294927739, 4294927763, 4294927889, 4294928183, 4294928207, 4294928249, 4294928327, 4294928351, 4294928399, 4294928483, 4294928489,
4294928543, 4294928597, 4294928951, 4294928963, 4294928981, 4294929017, 4294929059, 4294929161, 4294929197, 4294929233, 4294929269, 4294929311, 4294929323, 4294929341, 4294929383, 4294929401, 4294929497, 4294929509, 4294929581, 4294929707, 4294929743, 4294930043, 4294930121, 4294930193, 4294930223, 4294930349, 4294930403, 4294930571, 4294930613,
4294930721, 4294930751, 4294930877, 4294930931, 4294930961, 4294930967, 4294930973, 4294931021, 4294931051, 4294931057, 4294931063, 4294931219, 4294931273, 4294931339, 4294931423, 4294931441, 4294931453, 4294931567, 4294931639, 4294931717, 4294931897, 4294931969, 4294932023, 4294932053, 4294932239, 4294932299, 4294932443, 4294932671, 4294932677,
4294932731, 4294932743, 4294932767, 4294932773, 4294932779, 4294932881, 4294932899, 4294932929, 4294933067, 4294933277, 4294933307, 4294933343, 4294933451, 4294933523, 4294933763, 4294933793, 4294933829, 4294933847, 4294933871, 4294933997, 4294934033, 4294934111, 4294934207, 4294934243, 4294934267, 4294934279, 4294934291, 4294934327, 4294934363,
4294934423, 4294934489, 4294934561, 4294934867, 4294934921, 4294934969, 4294935137, 4294935239, 4294935299, 4294935431, 4294935539, 4294935629, 4294935701, 4294935791, 4294935797, 4294935803, 4294935959, 4294936001, 4294936007, 4294936037, 4294936079, 4294936127, 4294936163, 4294936247, 4294936307, 4294936331, 4294936409, 4294936451, 4294936601,
4294936607, 4294936619, 4294936667, 4294936709, 4294936733, 4294936751, 4294936763, 4294936829, 4294936937, 4294936997, 4294937027, 4294937051, 4294937093, 4294937177, 4294937213, 4294937291, 4294937381, 4294937417, 4294937429, 4294937681, 4294937693, 4294937753, 4294937771, 4294937813, 4294937837, 4294937891, 4294937969, 4294938071, 4294938101,
4294938323, 4294938371, 4294938401, 4294938467, 4294938473, 4294938521, 4294938599, 4294938731, 4294938779, 4294938833, 4294938899, 4294938977, 4294938983, 4294939067, 4294939127, 4294939223, 4294939277, 4294939331, 4294939337, 4294939391, 4294939457, 4294939559, 4294939673, 4294939691, 4294939901, 4294939991, 4294940087, 4294940093, 4294940189,
4294940213, 4294940417, 4294940657, 4294940699, 4294940753, 4294940801, 4294940873, 4294940951, 4294941047, 4294941143, 4294941161, 4294941227, 4294941281, 4294941377, 4294941509, 4294941551, 4294941701, 4294941731, 4294941767, 4294941911, 4294941923, 4294942043, 4294942139, 4294942313, 4294942343, 4294942373, 4294942427, 4294942529, 4294942601,
4294942649, 4294942673, 4294942679, 4294942733, 4294942769, 4294942811, 4294942961, 4294943129, 4294943141, 4294943219, 4294943369, 4294943423, 4294943471, 4294943651, 4294943687, 4294943717, 4294943729, 4294943747, 4294943759, 4294943813, 4294943819, 4294943891, 4294944077, 4294944191, 4294944233, 4294944239, 4294944353, 4294944389, 4294944581,
4294944623, 4294944629, 4294944659, 4294944821, 4294945031, 4294945157, 4294945211, 4294945229, 4294945301, 4294945337, 4294945343, 4294945511, 4294945547, 4294945667, 4294945709, 4294945757, 4294945841, 4294945991, 4294946033, 4294946099, 4294946153, 4294946477, 4294946687, 4294946747, 4294946957, 4294946993, 4294947023, 4294947131, 4294947167,
4294947287, 4294947311, 4294947413, 4294947581, 4294947599, 4294947671, 4294947851, 4294947959, 4294948067, 4294948073, 4294948193, 4294948259, 4294948421, 4294948451, 4294948613, 4294948673, 4294948883, 4294949027, 4294949057, 4294949069, 4294949519, 4294949531, 4294949603, 4294949609, 4294949627, 4294949693, 4294949729, 4294949741, 4294949807,
4294949921, 4294949939, 4294949981, 4294949993, 4294950083, 4294950173, 4294950197, 4294950251, 4294950287, 4294950317, 4294950323, 4294950329, 4294950581, 4294950593, 4294950617, 4294950629, 4294950713, 4294950929, 4294951151, 4294951163, 4294951169, 4294951379, 4294951583, 4294951613, 4294951853, 4294951907, 4294951913, 4294951937, 4294951961,
4294952063, 4294952183, 4294952393, 4294952543, 4294952549, 4294952597, 4294952627, 4294952687, 4294952723, 4294952729, 4294952789, 4294952819, 4294952873, 4294952891, 4294952903, 4294952969, 4294952999, 4294953023, 4294953107, 4294953173, 4294953281, 4294953341, 4294953431, 4294953599, 4294953689, 4294953719, 4294953827, 4294953887, 4294953977,
4294954073, 4294954079, 4294954157, 4294954217, 4294954283, 4294954607, 4294954667, 4294954859, 4294954901, 4294954973, 4294955081, 4294955237, 4294955273, 4294955327, 4294955441, 4294955507, 4294955591, 4294955789, 4294955831, 4294955837, 4294955927, 4294955963, 4294955969, 4294955987, 4294956041, 4294956047, 4294956197, 4294956323, 4294956359,
4294956551, 4294956593, 4294956623, 4294956629, 4294956641, 4294956719, 4294956761, 4294956767, 4294956797, 4294956821, 4294956833, 4294957037, 4294957079, 4294957103, 4294957181, 4294957349, 4294957379, 4294957433, 4294957463, 4294957511, 4294957577, 4294957727, 4294957859, 4294957877, 4294958039, 4294958153, 4294958309, 4294958417, 4294958441,
4294958693, 4294958717, 4294958753, 4294958903, 4294958909, 4294959017, 4294959071, 4294959107, 4294959161, 4294959257, 4294959299, 4294959329, 4294959431, 4294959593, 4294959599, 4294959659, 4294959893, 4294959917, 4294959983, 4294960001, 4294960031, 4294960061, 4294960079, 4294960097, 4294960271, 4294960283, 4294960349, 4294960367, 4294960421,
4294960529, 4294960541, 4294960583, 4294960613, 4294960673, 4294960691, 4294960697, 4294960787, 4294960919, 4294961003, 4294961039, 4294961153, 4294961159, 4294961171, 4294961321, 4294961411, 4294961471, 4294961507, 4294961537, 4294961669, 4294961717, 4294961741, 4294961873, 4294962059, 4294962137, 4294962167, 4294962263, 4294962281, 4294962311,
4294962341, 4294962413, 4294962521, 4294962563, 4294962761, 4294962893, 4294963103, 4294963163, 4294963223, 4294963313, 4294963349, 4294963427, 4294963547, 4294963559, 4294963721, 4294963799, 4294963817, 4294963901, 4294963919, 4294964021, 4294964279, 4294964297, 4294964363, 4294964387, 4294964411, 4294964567, 4294964603, 4294964687, 4294964777,
4294965041, 4294965071, 4294965119, 4294965221, 4294965251, 4294965287, 4294965413, 4294965569, 4294965647, 4294965671, 4294965689, 4294965779, 4294965839, 4294965893, 4294966091, 4294966109, 4294966127, 4294966157, 4294966187, 4294966199, 4294966211, 4294966403, 4294966457, 4294966499, 4294966541, 4294966637, 4294966661, 4294966739, 4294966823,
4294966883, 4294966901, 4294966961, 4294967027, 4294967087, 4294967099, 4294967123, 4294967153, 4294967249
};
#define mulmod52(a, b, m) (((a * b) - (((uint64_t)(((double)a * (double)b) / (double)m) - 1ULL) * m)) % m)
// Compute a^e%m (mf is m in floating point form to avoid repeated conversion)
ZT_INLINE uint64_t modpow52(uint64_t a, uint64_t e, const uint64_t m) noexcept
{
uint64_t res = 1ULL;
for (;;) {
if ((e << 63U)) {
res = mulmod52(res, a, m);
}
if (likely((e >>= 1U) != 0)) {
a = mulmod52(a, a, m);
}
else {
break;
}
}
return res;
}
static const ZeroTier::AES s_mimc52AES("abcdefghijklmnopqrstuvwxyz012345");
// This fills k[] with pseudorandom bytes from the challenge.
// This doesn't have to be non-reversible or secure, just strongly random.
ZT_INLINE void fillK(uint64_t k[34], const uint8_t challenge[32])
{
s_mimc52AES.encrypt(challenge, k);
s_mimc52AES.encrypt(challenge + 16, k + 2);
k[2] ^= k[0];
k[3] ^= k[1];
for (unsigned int i = 2, j = 4; j < 34; i += 2, j += 2)
s_mimc52AES.encrypt(k + i, k + j);
#if __BYTE_ORDER == __BIG_ENDIAN
for (unsigned int i = 0; i < 34; ++i)
k[i] = Utils::swapBytes(k[i]);
#endif
}
} // anonymous namespace
namespace ZeroTier {
namespace MIMC52 {
uint64_t delay(const uint8_t challenge[32], const unsigned long rounds)
{
uint64_t k[34];
fillK(k, challenge);
const uint64_t p = 0x000fffff00000000ULL | (uint64_t)ZT_MIMC52_PRIMES[((unsigned long)k[32]) & 1023];
const uint64_t e = ((p * 2ULL) - 1ULL) / 3ULL;
uint64_t x = k[33] % p;
for (unsigned long r = 0, kn = rounds; r < rounds; ++r) {
x = (x - k[--kn & 31]) & 0xfffffffffffffULL;
x = modpow52(x, e, p);
}
return x;
}
bool verify(const uint8_t challenge[32], const unsigned long rounds, uint64_t proof)
{
uint64_t k[34];
fillK(k, challenge);
const uint64_t p = 0x000fffff00000000ULL | (uint64_t)ZT_MIMC52_PRIMES[((unsigned long)k[32]) & 1023];
for (unsigned long r = 0; r < rounds; ++r) {
const uint64_t kk = k[r & 31];
proof = mulmod52(mulmod52(proof, proof, p), proof, p); // y = y ^ 3
proof = (proof + kk) & 0xfffffffffffffULL;
}
return ((proof % p) == (k[33] % p));
}
} // namespace MIMC52
} // namespace ZeroTier

View file

@ -1,62 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_MIMC52_HPP
#define ZT_MIMC52_HPP
#include "Constants.hpp"
/*
* This is a verifiable delay function (or serial proof of work) based on
* the MIMC prime field hash construction. MIMC is very fast to execute in
* one direction and inherently a lot slower in another, allowing expensive
* work to be quickly verified. The 52-bit algorithm implemented here can
* run very fast on a variety of systems by taking advantage of the
* interesting double precision FPU modular multiplication hack described
* here:
*
* https://stackoverflow.com/a/50479693
*
* 52 bits is not sufficient for high strength cryptographic applications
* like block chains, but is good enough to add a deterministic delay to
* identity generation. That's its only purpose here. This is used as the
* delay function for "type 1" identities to replace the ad-hoc memory hard
* hash used in "type 0." This is both simpler and faster to verify.
*/
namespace ZeroTier {
namespace MIMC52 {
/**
* Compute proof of execution for the delay function
*
* @param challenge 256-bit challenge input to randomize algorithm, making it a unique function
* @param rounds Number of rounds
* @return Proof of execution of delay function (only least significant 52 bits are meaningful)
*/
uint64_t delay(const uint8_t challenge[32], unsigned long rounds);
/**
* Verify a proof of execution
*
* @param challenge 256-bit challenge
* @param rounds Number of rounds
* @param proof Proof from delay()
* @return True if proof is valid
*/
bool verify(const uint8_t challenge[32], unsigned long rounds, uint64_t proof);
} // namespace MIMC52
} // namespace ZeroTier
#endif

View file

@ -1,212 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "Member.hpp"
#include "Context.hpp"
#include "Peer.hpp"
#include "Topology.hpp"
#include <algorithm>
namespace ZeroTier {
Member::Member() : m_comRevocationThreshold(0), m_lastPushedCredentials(0), m_comAgreementLocalTimestamp(0), m_comAgreementRemoteTimestamp(0) {}
void Member::pushCredentials(const Context &ctx, const CallContext &cc, const SharedPtr<Peer> &to, const NetworkConfig &nconf)
{
if (!nconf.com) // sanity check
return;
#if 0
SharedPtr<Buf> outp(new Buf());
Protocol::Header &ph = outp->as<Protocol::Header>();
unsigned int capPtr = 0,tagPtr = 0,cooPtr = 0;
bool sendCom = true;
bool complete = false;
while (!complete) {
ph.packetId = Protocol::getPacketId();
to->address().copyTo(ph.destination);
RR->identity.address().copyTo(ph.source);
ph.flags = 0;
ph.verb = Protocol::VERB_NETWORK_CREDENTIALS;
int outl = sizeof(Protocol::Header);
if (sendCom) {
sendCom = false;
outp->wO(outl,nconf.com);
}
outp->wI8(outl,0);
if ((outl + ZT_CAPABILITY_MARSHAL_SIZE_MAX + 2) < ZT_PROTO_MAX_PACKET_LENGTH) {
void *const capCountAt = outp->unsafeData + outl;
outl += 2;
unsigned int capCount = 0;
while (capPtr < nconf.capabilityCount) {
outp->wO(outl,nconf.capabilities[capPtr++]);
++capCount;
if ((outl + ZT_CAPABILITY_MARSHAL_SIZE_MAX) >= ZT_PROTO_MAX_PACKET_LENGTH)
break;
}
Utils::storeBigEndian(capCountAt,(uint16_t)capCount);
if ((outl + ZT_TAG_MARSHAL_SIZE_MAX + 4) < ZT_PROTO_MAX_PACKET_LENGTH) {
void *const tagCountAt = outp->unsafeData + outl;
outl += 2;
unsigned int tagCount = 0;
while (tagPtr < nconf.tagCount) {
outp->wO(outl,nconf.tags[tagPtr++]);
++tagCount;
if ((outl + ZT_TAG_MARSHAL_SIZE_MAX) >= ZT_PROTO_MAX_PACKET_LENGTH)
break;
}
Utils::storeBigEndian(tagCountAt,(uint16_t)tagCount);
outp->wI16(outl,0); // no revocations sent here as these propagate differently
if ((outl + ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX + 2) < ZT_PROTO_MAX_PACKET_LENGTH) {
void *const cooCountAt = outp->unsafeData + outl;
outl += 2;
unsigned int cooCount = 0;
while (cooPtr < nconf.certificateOfOwnershipCount) {
outp->wO(outl,nconf.certificatesOfOwnership[cooPtr++]);
++cooCount;
if ((outl + ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX) >= ZT_PROTO_MAX_PACKET_LENGTH)
break;
}
Utils::storeBigEndian(cooCountAt,(uint16_t)cooCount);
complete = true;
} else {
outp->wI16(outl,0);
}
} else {
outp->wI32(outl,0);
outp->wI16(outl,0); // three zero 16-bit integers
}
} else {
outp->wI64(outl,0); // four zero 16-bit integers
}
if (outl > (int)sizeof(Protocol::Header)) {
outl = Protocol::compress(outp,outl);
// TODO
}
}
#endif
m_lastPushedCredentials = cc.ticks;
}
void Member::clean(const NetworkConfig &nconf)
{
m_cleanCredImpl<TagCredential>(nconf, m_remoteTags);
m_cleanCredImpl<CapabilityCredential>(nconf, m_remoteCaps);
m_cleanCredImpl<OwnershipCredential>(nconf, m_remoteCoos);
}
Member::AddCredentialResult Member::addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const MembershipCredential &com)
{
const int64_t newts = com.timestamp();
if (newts <= m_comRevocationThreshold) {
ctx.t->credentialRejected(cc, 0xd9992121, com.networkId(), sourcePeerIdentity, com.id(), com.timestamp(), ZT_CREDENTIAL_TYPE_COM, ZT_TRACE_CREDENTIAL_REJECTION_REASON_REVOKED);
return ADD_REJECTED;
}
const int64_t oldts = m_com.timestamp();
if (newts < oldts) {
ctx.t->credentialRejected(cc, 0xd9928192, com.networkId(), sourcePeerIdentity, com.id(), com.timestamp(), ZT_CREDENTIAL_TYPE_COM, ZT_TRACE_CREDENTIAL_REJECTION_REASON_OLDER_THAN_LATEST);
return ADD_REJECTED;
}
if ((newts == oldts) && (m_com == com))
return ADD_ACCEPTED_REDUNDANT;
switch (com.verify(ctx, cc)) {
default: ctx.t->credentialRejected(cc, 0x0f198241, com.networkId(), sourcePeerIdentity, com.id(), com.timestamp(), ZT_CREDENTIAL_TYPE_COM, ZT_TRACE_CREDENTIAL_REJECTION_REASON_INVALID); return Member::ADD_REJECTED;
case Credential::VERIFY_OK: m_com = com; return ADD_ACCEPTED_NEW;
case Credential::VERIFY_BAD_SIGNATURE: ctx.t->credentialRejected(cc, 0xbaf0aaaa, com.networkId(), sourcePeerIdentity, com.id(), com.timestamp(), ZT_CREDENTIAL_TYPE_COM, ZT_TRACE_CREDENTIAL_REJECTION_REASON_SIGNATURE_VERIFICATION_FAILED); return ADD_REJECTED;
case Credential::VERIFY_NEED_IDENTITY: return ADD_DEFERRED_FOR_WHOIS;
}
}
// 3/5 of the credential types have identical addCredential() code
template <typename C> static ZT_INLINE Member::AddCredentialResult _addCredImpl(Map<uint32_t, C> &remoteCreds, const Map<uint64_t, int64_t> &revocations, const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const C &cred)
{
typename Map<uint32_t, C>::const_iterator rc(remoteCreds.find(cred.id()));
if (rc != remoteCreds.end()) {
if (rc->second.revision() > cred.revision()) {
ctx.t->credentialRejected(cc, 0x40000001, nconf.networkId, sourcePeerIdentity, cred.id(), cred.revision(), C::credentialType(), ZT_TRACE_CREDENTIAL_REJECTION_REASON_OLDER_THAN_LATEST);
return Member::ADD_REJECTED;
}
if (rc->second == cred)
return Member::ADD_ACCEPTED_REDUNDANT;
}
typename Map<uint64_t, int64_t>::const_iterator rt(revocations.find(Member::credentialKey(C::credentialType(), cred.id())));
if ((rt != revocations.end()) && (rt->second >= cred.revision())) {
ctx.t->credentialRejected(cc, 0x24248124, nconf.networkId, sourcePeerIdentity, cred.id(), cred.revision(), C::credentialType(), ZT_TRACE_CREDENTIAL_REJECTION_REASON_REVOKED);
return Member::ADD_REJECTED;
}
switch (cred.verify(ctx, cc)) {
default: ctx.t->credentialRejected(cc, 0x01feba012, nconf.networkId, sourcePeerIdentity, cred.id(), cred.revision(), C::credentialType(), ZT_TRACE_CREDENTIAL_REJECTION_REASON_INVALID); return Member::ADD_REJECTED;
case 0:
if (rc == remoteCreds.end())
remoteCreds[cred.id()] = cred;
return Member::ADD_ACCEPTED_NEW;
case 1: return Member::ADD_DEFERRED_FOR_WHOIS;
}
}
Member::AddCredentialResult Member::addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const TagCredential &tag) { return _addCredImpl<TagCredential>(m_remoteTags, m_revocations, ctx, cc, sourcePeerIdentity, nconf, tag); }
Member::AddCredentialResult Member::addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const CapabilityCredential &cap) { return _addCredImpl<CapabilityCredential>(m_remoteCaps, m_revocations, ctx, cc, sourcePeerIdentity, nconf, cap); }
Member::AddCredentialResult Member::addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const OwnershipCredential &coo) { return _addCredImpl<OwnershipCredential>(m_remoteCoos, m_revocations, ctx, cc, sourcePeerIdentity, nconf, coo); }
Member::AddCredentialResult Member::addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const RevocationCredential &rev)
{
int64_t *rt;
switch (rev.verify(ctx, cc)) {
default: ctx.t->credentialRejected(cc, 0x938ff009, nconf.networkId, sourcePeerIdentity, rev.id(), 0, ZT_CREDENTIAL_TYPE_REVOCATION, ZT_TRACE_CREDENTIAL_REJECTION_REASON_INVALID); return ADD_REJECTED;
case 0: {
const ZT_CredentialType ct = rev.typeBeingRevoked();
switch (ct) {
case ZT_CREDENTIAL_TYPE_COM:
if (rev.threshold() > m_comRevocationThreshold) {
m_comRevocationThreshold = rev.threshold();
return ADD_ACCEPTED_NEW;
}
return ADD_ACCEPTED_REDUNDANT;
case ZT_CREDENTIAL_TYPE_CAPABILITY:
case ZT_CREDENTIAL_TYPE_TAG:
case ZT_CREDENTIAL_TYPE_COO:
rt = &(m_revocations[credentialKey(ct, rev.credentialId())]);
if (*rt < rev.threshold()) {
*rt = rev.threshold();
m_comRevocationThreshold = rev.threshold();
return ADD_ACCEPTED_NEW;
}
return ADD_ACCEPTED_REDUNDANT;
default: ctx.t->credentialRejected(cc, 0x0bbbb1a4, nconf.networkId, sourcePeerIdentity, rev.id(), 0, ZT_CREDENTIAL_TYPE_REVOCATION, ZT_TRACE_CREDENTIAL_REJECTION_REASON_INVALID); return ADD_REJECTED;
}
}
case 1: return ADD_DEFERRED_FOR_WHOIS;
}
}
bool Member::m_isUnspoofableAddress(const NetworkConfig &nconf, const InetAddress &ip) const noexcept { return (ip.isV6() && nconf.ndpEmulation() && ((ip == InetAddress::makeIpv66plane(nconf.networkId, m_com.issuedTo().address)) || (ip == InetAddress::makeIpv6rfc4193(nconf.networkId, m_com.issuedTo().address)))); }
} // namespace ZeroTier

View file

@ -1,219 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_MEMBERSHIP_HPP
#define ZT_MEMBERSHIP_HPP
#include "CapabilityCredential.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Credential.hpp"
#include "MembershipCredential.hpp"
#include "NetworkConfig.hpp"
#include "RevocationCredential.hpp"
#include "TagCredential.hpp"
namespace ZeroTier {
class Context;
class Network;
/**
* A container for certificates of membership and other network credentials
*
* This is essentially a relational join between Peer and Network.
*
* This class is not thread safe. It must be locked externally.
*/
class Member {
public:
enum AddCredentialResult { ADD_REJECTED, ADD_ACCEPTED_NEW, ADD_ACCEPTED_REDUNDANT, ADD_DEFERRED_FOR_WHOIS };
Member();
/**
* Send COM and other credentials to this peer
*
* @param to Peer identity
* @param nconf My network config
*/
void pushCredentials(const Context &ctx, const CallContext &cc, const SharedPtr<Peer> &to, const NetworkConfig &nconf);
/**
* @return Time we last pushed credentials to this member
*/
ZT_INLINE int64_t lastPushedCredentials() const noexcept { return m_lastPushedCredentials; }
/**
* Get a remote member's tag (if we have it)
*
* @param nconf Network configuration
* @param id Tag ID
* @return Pointer to tag or NULL if not found
*/
ZT_INLINE const TagCredential *getTag(const NetworkConfig &nconf, const uint32_t id) const noexcept
{
Map<uint32_t, TagCredential>::const_iterator t(m_remoteTags.find(id));
return (((t != m_remoteTags.end()) && (m_isCredentialTimestampValid(nconf, t->second))) ? &(t->second) : (TagCredential *)0);
}
/**
* Clean internal databases of stale entries
*
* @param nconf Current network configuration
*/
void clean(const NetworkConfig &nconf);
/**
* Generates a key for internal use in indexing credentials by type and credential ID
*/
static ZT_INLINE uint64_t credentialKey(const ZT_CredentialType &t, const uint32_t i) noexcept { return (((uint64_t)t << 32U) | (uint64_t)i); }
/**
* Check whether the peer represented by this Membership owns a given address
*
* @tparam Type of resource: InetAddress or MAC
* @param nconf Our network config
* @param r Resource to check
* @return True if this peer has a certificate of ownership for the given resource
*/
template <typename T> ZT_INLINE bool peerOwnsAddress(const NetworkConfig &nconf, const T &r) const noexcept
{
if (m_isUnspoofableAddress(nconf, r))
return true;
for (Map<uint32_t, OwnershipCredential>::const_iterator i(m_remoteCoos.begin()); i != m_remoteCoos.end(); ++i) {
if (m_isCredentialTimestampValid(nconf, i->second) && (i->second.owns(r)))
return true;
}
return false;
}
/**
* Check if our local COM agrees with theirs, with possible memo-ization.
*
* @param localCom
*/
ZT_INLINE bool certificateOfMembershipAgress(const MembershipCredential &localCom, const Identity &remoteIdentity)
{
if ((m_comAgreementLocalTimestamp == localCom.timestamp()) && (m_comAgreementRemoteTimestamp == m_com.timestamp()))
return true;
if (m_com.agreesWith(localCom)) {
// SECURITY: newer network controllers embed the full fingerprint into the COM. If we are
// joined to a network managed by one of these, our COM will contain one. If it's present
// we compare vs the other and require them to match. If our COM does not contain a full
// identity fingerprint we compare by address only, which is a legacy mode supported for
// old network controllers. Note that this is safe because the controller issues us our COM
// and in so doing indicates if it's new or old. However this will go away after a while
// once we can be pretty sure there are no ancient controllers around.
if (localCom.issuedTo().haveHash()) {
if (localCom.issuedTo() != m_com.issuedTo())
return false;
}
else {
// LEGACY: support networks run by old controllers.
if (localCom.issuedTo().address != m_com.issuedTo().address)
return false;
}
// Remember that these two COMs agreed. If any are updated this is invalidated and a full
// agreement check will be done again.
m_comAgreementLocalTimestamp = localCom.timestamp();
m_comAgreementRemoteTimestamp = m_com.timestamp();
return true;
}
return false;
}
AddCredentialResult addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const MembershipCredential &com);
AddCredentialResult addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const TagCredential &tag);
AddCredentialResult addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const CapabilityCredential &cap);
AddCredentialResult addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const OwnershipCredential &coo);
AddCredentialResult addCredential(const Context &ctx, const CallContext &cc, const Identity &sourcePeerIdentity, const NetworkConfig &nconf, const RevocationCredential &rev);
private:
// This returns true if a resource is an IPv6 NDP-emulated address. These embed the ZT
// address of the peer and therefore cannot be spoofed, causing peerOwnsAddress() to
// always return true for them. A certificate is not required for these.
ZT_INLINE bool m_isUnspoofableAddress(const NetworkConfig &nconf, const MAC &m) const noexcept { return false; }
bool m_isUnspoofableAddress(const NetworkConfig &nconf, const InetAddress &ip) const noexcept;
// This compares the remote credential's timestamp to the timestamp in our network config
// plus or minus the permitted maximum timestamp delta.
template <typename C> ZT_INLINE bool m_isCredentialTimestampValid(const NetworkConfig &nconf, const C &remoteCredential) const noexcept
{
const int64_t ts = remoteCredential.revision();
if (((ts >= nconf.timestamp) ? (ts - nconf.timestamp) : (nconf.timestamp - ts)) <= nconf.credentialTimeMaxDelta) {
Map<uint64_t, int64_t>::const_iterator threshold(m_revocations.find(credentialKey(C::credentialType(), remoteCredential.id())));
return ((threshold == m_revocations.end()) || (ts > threshold->second));
}
return false;
}
template <typename C> ZT_INLINE void m_cleanCredImpl(const NetworkConfig &nconf, Map<uint32_t, C> &remoteCreds)
{
for (typename Map<uint32_t, C>::iterator i(remoteCreds.begin()); i != remoteCreds.end();) {
if (!m_isCredentialTimestampValid(nconf, i->second))
remoteCreds.erase(i++);
else
++i;
}
}
// Revocation threshold for COM or 0 if none
int64_t m_comRevocationThreshold;
// Time we last pushed credentials
int64_t m_lastPushedCredentials;
// COM timestamps at which we last agreed-- used to memo-ize agreement and avoid having to recompute constantly.
int64_t m_comAgreementLocalTimestamp, m_comAgreementRemoteTimestamp;
// Remote member's latest network COM
MembershipCredential m_com;
// Revocations by credentialKey()
Map<uint64_t, int64_t> m_revocations;
// Remote credentials that we have received from this member (and that are valid)
Map<uint32_t, TagCredential> m_remoteTags;
Map<uint32_t, CapabilityCredential> m_remoteCaps;
Map<uint32_t, OwnershipCredential> m_remoteCoos;
public:
class CapabilityIterator {
public:
ZT_INLINE CapabilityIterator(Member &m, const NetworkConfig &nconf) noexcept : m_hti(m.m_remoteCaps.begin()), m_parent(m), m_nconf(nconf) {}
ZT_INLINE CapabilityCredential *next() noexcept
{
while (m_hti != m_parent.m_remoteCaps.end()) {
Map<uint32_t, CapabilityCredential>::iterator i(m_hti++);
if (m_parent.m_isCredentialTimestampValid(m_nconf, i->second))
return &(i->second);
}
return nullptr;
}
private:
Map<uint32_t, CapabilityCredential>::iterator m_hti;
Member &m_parent;
const NetworkConfig &m_nconf;
};
};
} // namespace ZeroTier
#endif

View file

@ -1,285 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "MembershipCredential.hpp"
namespace ZeroTier {
MembershipCredential::MembershipCredential(
const int64_t timestamp, const int64_t timestampMaxDelta, const uint64_t nwid,
const Identity &issuedTo) noexcept
: // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
m_timestamp(timestamp)
, m_timestampMaxDelta(timestampMaxDelta)
, m_networkId(nwid)
, m_issuedTo(issuedTo.fingerprint())
, m_signatureLength(0)
{
}
bool MembershipCredential::agreesWith(const MembershipCredential &other) const noexcept
{
// NOTE: we always do explicit absolute value with an if() since llabs() can have overflow
// conditions that could introduce a vulnerability.
if (other.m_timestamp > m_timestamp) {
if ((other.m_timestamp - m_timestamp) > std::min(m_timestampMaxDelta, other.m_timestampMaxDelta))
return false;
}
else {
if ((m_timestamp - other.m_timestamp) > std::min(m_timestampMaxDelta, other.m_timestampMaxDelta))
return false;
}
// us <> them
for (FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS>::const_iterator i(m_additionalQualifiers.begin()); i != m_additionalQualifiers.end(); ++i) {
if (i->delta != 0xffffffffffffffffULL) {
const uint64_t *v2 = nullptr;
for (FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS>::const_iterator j(other.m_additionalQualifiers.begin()); j != other.m_additionalQualifiers.end(); ++i) {
if (j->id == i->id) {
v2 = &(j->value);
break;
}
}
if (!v2)
return false;
if (*v2 > i->value) {
if ((*v2 - i->value) > i->delta)
return false;
}
else {
if ((i->value - *v2) > i->delta)
return false;
}
}
}
// them <> us (we need a second pass in case they have qualifiers we don't or vice versa)
for (FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS>::const_iterator i(other.m_additionalQualifiers.begin()); i != other.m_additionalQualifiers.end(); ++i) {
if (i->delta != 0xffffffffffffffffULL) {
const uint64_t *v2 = nullptr;
for (FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS>::const_iterator j(m_additionalQualifiers.begin()); j != m_additionalQualifiers.end(); ++i) {
if (j->id == i->id) {
v2 = &(j->value);
break;
}
}
if (!v2)
return false;
if (*v2 > i->value) {
if ((*v2 - i->value) > i->delta)
return false;
}
else {
if ((i->value - *v2) > i->delta)
return false;
}
}
}
// SECURITY: check for issued-to inequality is a sanity check. This should be impossible since elsewhere
// in the code COMs are checked to ensure that they do in fact belong to their issued-to identities.
return (other.m_networkId == m_networkId) && (m_networkId != 0) && (other.m_issuedTo.address != m_issuedTo.address);
}
bool MembershipCredential::sign(const Identity &with) noexcept
{
m_signedBy = with.address();
uint64_t buf[ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX / 8];
const unsigned int bufSize = m_fillSigningBuf(buf);
m_signatureLength = with.sign(buf, bufSize, m_signature, sizeof(m_signature));
return m_signatureLength > 0;
}
int MembershipCredential::marshal(uint8_t data[ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX], const bool v2) const noexcept
{
data[0] = v2 ? 2 : 1;
// All formats start with the standard three qualifiers: timestamp with delta, network ID as a strict
// equality compare, and the address of the issued-to node as an informational tuple.
int p = 3;
Utils::storeBigEndian<uint64_t>(data + p, 0);
Utils::storeBigEndian<uint64_t>(data + p + 8, (uint64_t)m_timestamp);
Utils::storeBigEndian<uint64_t>(data + p + 16, (uint64_t)m_timestampMaxDelta);
Utils::storeBigEndian<uint64_t>(data + p + 24, 1);
Utils::storeBigEndian<uint64_t>(data + p + 32, m_networkId);
Utils::storeBigEndian<uint64_t>(data + p + 40, 0);
Utils::storeBigEndian<uint64_t>(data + p + 48, 2);
Utils::storeBigEndian<uint64_t>(data + p + 56, m_issuedTo.address);
Utils::storeMachineEndian<uint64_t>(data + p + 64, 0xffffffffffffffffULL);
p += 72;
if (v2) {
// V2 marshal format will have three tuples followed by the fingerprint hash.
Utils::storeBigEndian<uint16_t>(data + 1, 3);
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(data + p, m_issuedTo.hash);
p += 48;
}
else {
// V1 marshal format must shove everything into tuples, resulting in nine.
Utils::storeBigEndian<uint16_t>(data + 1, 9);
for (int k = 0; k < 6; ++k) {
Utils::storeBigEndian<uint64_t>(data + p, (uint64_t)k + 3);
Utils::storeMachineEndian<uint64_t>(data + p + 8, Utils::loadMachineEndian<uint64_t>(m_issuedTo.hash + (k * 8)));
Utils::storeMachineEndian<uint64_t>(data + p + 16, 0xffffffffffffffffULL);
p += 24;
}
}
m_signedBy.copyTo(data + p);
p += 5;
if (v2) {
// V2 marshal format prefixes signatures with a 16-bit length to support future signature types.
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_signatureLength);
p += 2;
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
}
else {
// V1 only supports 96-byte signature fields.
Utils::copy<96>(data + p, m_signature);
p += 96;
}
return p;
}
int MembershipCredential::unmarshal(const uint8_t *data, int len) noexcept
{
if (len < (1 + 2 + 72))
return -1;
TriviallyCopyable::memoryZero(this);
const unsigned int numq = Utils::loadBigEndian<uint16_t>(data + 1);
if ((numq < 3) || (numq > (ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS + 3)))
return -1;
int p = 3;
for (unsigned int q = 0; q < numq; ++q) {
if ((p + 24) > len)
return -1;
const uint64_t id = Utils::loadBigEndian<uint64_t>(data + p);
p += 8; // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint64_t value = Utils::loadBigEndian<uint64_t>(data + p);
p += 8; // NOLINT(hicpp-use-auto,modernize-use-auto)
const uint64_t delta = Utils::loadBigEndian<uint64_t>(data + p);
p += 8; // NOLINT(hicpp-use-auto,modernize-use-auto)
switch (id) {
case 0:
m_timestamp = (int64_t)value;
m_timestampMaxDelta = (int64_t)delta;
break;
case 1: m_networkId = value; break;
case 2:
m_issuedTo.address = value;
break;
// V1 nodes will pack the hash into qualifier tuples.
case 3: Utils::storeBigEndian<uint64_t>(m_issuedTo.hash, value); break;
case 4: Utils::storeBigEndian<uint64_t>(m_issuedTo.hash + 8, value); break;
case 5: Utils::storeBigEndian<uint64_t>(m_issuedTo.hash + 16, value); break;
case 6: Utils::storeBigEndian<uint64_t>(m_issuedTo.hash + 24, value); break;
case 7: Utils::storeBigEndian<uint64_t>(m_issuedTo.hash + 32, value); break;
case 8: Utils::storeBigEndian<uint64_t>(m_issuedTo.hash + 40, value); break;
default:
if (m_additionalQualifiers.size() >= ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS)
return -1;
m_additionalQualifiers.push_back(p_Qualifier(id, value, delta));
break;
}
}
std::sort(m_additionalQualifiers.begin(), m_additionalQualifiers.end());
if (data[0] == 1) {
if ((p + 96) > len)
return -1;
m_signatureLength = 96;
Utils::copy<96>(m_signature, data + p);
return p + 96;
}
else if (data[0] == 2) {
if ((p + 48) > len)
return -1;
Utils::copy<48>(m_issuedTo.hash, data + p);
p += 48;
if ((p + 2) > len)
return -1;
m_signatureLength = Utils::loadBigEndian<uint16_t>(data + p);
if ((m_signatureLength > (unsigned int)sizeof(m_signature)) || ((p + (int)m_signatureLength) > len))
return -1;
Utils::copy(m_signature, data + p, m_signatureLength);
return p + (int)m_signatureLength;
}
return -1;
}
unsigned int MembershipCredential::m_fillSigningBuf(uint64_t *buf) const noexcept
{
const uint64_t informational = 0xffffffffffffffffULL;
/*
* Signing always embeds all data to be signed in qualifier tuple format for
* backward compatibility with V1 nodes, since otherwise we'd need a signature
* for v1 nodes to verify and another for v2 nodes to verify.
*/
// The standard three tuples that must begin every COM.
buf[0] = 0;
buf[1] = Utils::hton((uint64_t)m_timestamp);
buf[2] = Utils::hton((uint64_t)m_timestampMaxDelta);
buf[3] = ZT_CONST_TO_BE_UINT64(1);
buf[4] = Utils::hton(m_networkId);
buf[5] = 0;
buf[6] = ZT_CONST_TO_BE_UINT64(2);
buf[7] = Utils::hton(m_issuedTo.address);
buf[8] = informational;
unsigned int p = 9;
// The full identity fingerprint of the peer to whom the COM was issued,
// embeded as a series of informational tuples.
if (m_issuedTo.haveHash()) {
buf[p++] = ZT_CONST_TO_BE_UINT64(3);
buf[p++] = Utils::loadMachineEndian<uint64_t>(m_issuedTo.hash);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(4);
buf[p++] = Utils::loadMachineEndian<uint64_t>(m_issuedTo.hash + 8);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(5);
buf[p++] = Utils::loadMachineEndian<uint64_t>(m_issuedTo.hash + 16);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(6);
buf[p++] = Utils::loadMachineEndian<uint64_t>(m_issuedTo.hash + 24);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(7);
buf[p++] = Utils::loadMachineEndian<uint64_t>(m_issuedTo.hash + 32);
buf[p++] = informational;
buf[p++] = ZT_CONST_TO_BE_UINT64(8);
buf[p++] = Utils::loadMachineEndian<uint64_t>(m_issuedTo.hash + 40);
buf[p++] = informational;
}
for (FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS>::const_iterator i(m_additionalQualifiers.begin()); i != m_additionalQualifiers.end(); ++i) { // NOLINT(modernize-loop-convert)
buf[p++] = Utils::hton(i->id);
buf[p++] = Utils::hton(i->value);
buf[p++] = Utils::hton(i->delta);
}
return p * 8;
}
} // namespace ZeroTier

View file

@ -1,214 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_CERTIFICATEOFMEMBERSHIP_HPP
#define ZT_CERTIFICATEOFMEMBERSHIP_HPP
#include "Address.hpp"
#include "C25519.hpp"
#include "Constants.hpp"
#include "Credential.hpp"
#include "FCV.hpp"
#include "Identity.hpp"
#include "Utils.hpp"
#include <algorithm>
#include <stdexcept>
#include <string>
// Maximum number of additional tuples beyond the standard always-present three.
#define ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS 8
// version + qualifier count + three required qualifiers + additional qualifiers +
#define ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX (1 + 2 + (3 * 3 * 8) + (ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS * 3 * 8) + 144 + 5 + 2 + 96)
namespace ZeroTier {
class Context;
/**
* Certificate of network membership
*
* This is the fundamental permission object issued by network controllers to members of networks
* to admit them into networks.
*
* A certificate of membership (COM) consists of a series of tuples called qualifiers as well
* as the full identity fingerprint of the node being admitted, the address of the controller
* (for sanity checking), and a signature.
*
* A qualifier is a tuple of three 64-bit unsigned integers: an id, a value, and a delta.
*
* Certiciates are checked between peers by determining if they agree. If the absolute value
* of the difference between any two qualifier values exceeds its delta, the certificates do
* not agree. A delta if 1 for example means that the values of two peers may differ by no more
* than one. A delta of 0 indicates values that must be the same. A delta of uint64_max is for
* informational tuples that are not included in certificate checking, as this means they may
* differ by any amount.
*
* All COMs contain three initial tuples: timestamp, network ID, and the address of the
* issued-to node. The latter is informational. The network ID must equal exactly, though in
* theory a controller could allow a delta there to e.g. allow cross-communication between all
* of its networks. (This has never been done in practice.) The most important field is the
* timestamp, whose delta defines a moving window within which certificates must be timestamped
* by the network controller to agree. A certificate that is too old will fall out of this
* window vs its peers and will no longer be considered valid.
*
* (Revocations are a method to rapidly revoke access that works alongside this slower but
* more definitive method.)
*
* Certificate of membership wire format:
*
* This wire format comes in two versions: version 1 for ZeroTier 1.x, which will
* eventually go away once 1.x is out of support, and version 2 for ZeroTier 2.x and later.
*
* Version 2:
*
* <[1] wire format type byte: 1 or 2>
* <[2] 16-bit number of qualifier tuples>
* <[...] qualifier tuples>
* <[48] fingerprint hash of identity of peer to whom COM was issued>
* <[5] address of network controller>
* <[2] 16-bit size of signature>
* <[...] signature>
*
* Version 1 is identical except the fingerprint hash is omitted and is instead loaded
* into a series of six informational tuples. The signature size is also omitted and a
* 96-byte signature field is assumed.
*
* Qualifier tuples must appear in numeric order of ID, and the first three tuples
* must have IDs 0, 1, and 2 being the timestamp, network ID, and issued-to address
* respectively. In version 1 COMs the IDs 3-8 are used to pack in the full identity
* fingerprint, so these are reserved as well. Optional additional tuples (not currently
* used) must use ID 65536 or higher.
*
* Signatures are computed over tuples only for backward compatibility with v1, and we
* don't plan to change this. Tuples are emitted into a buffer in ascending numeric
* order with the fingerprint hash being packed into tuple IDs 3-8 and this buffer is
* then signed.
*/
class MembershipCredential : public Credential {
friend class Credential;
public:
static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_COM; }
/**
* Create an empty certificate of membership
*/
ZT_INLINE MembershipCredential() noexcept { memoryZero(this); }
/**
* Create from required fields common to all networks
*
* @param timestamp Timestamp of certificate
* @param timestampMaxDelta Maximum variation between timestamps on this net
* @param nwid Network ID
* @param issuedTo Certificate recipient
*/
MembershipCredential(int64_t timestamp, int64_t timestampMaxDelta, uint64_t nwid, const Identity &issuedTo) noexcept;
/**
* @return True if there's something here
*/
ZT_INLINE operator bool() const noexcept { return (m_networkId != 0); }
/**
* @return Credential ID, always 0 for COMs
*/
ZT_INLINE uint32_t id() const noexcept { return 0; }
/**
* @return Timestamp for this cert and maximum delta for timestamp
*/
ZT_INLINE int64_t timestamp() const noexcept { return m_timestamp; }
ZT_INLINE int64_t revision() const noexcept { return m_timestamp; }
/**
* @return Maximum allowed difference between timestamps
*/
ZT_INLINE int64_t timestampMaxDelta() const noexcept { return m_timestampMaxDelta; }
/**
* @return Fingerprint of identity to which this cert was issued
*/
ZT_INLINE const Fingerprint &issuedTo() const noexcept { return m_issuedTo; }
/**
* @return Network ID for which this cert was issued
*/
ZT_INLINE uint64_t networkId() const noexcept { return m_networkId; }
/**
* Compare two certificates for parameter agreement
*
* This compares this certificate with the other and returns true if all
* parameters in this cert are present in the other and if they agree to
* within this cert's max delta value for each given parameter.
*
* Tuples present in other but not in this cert are ignored, but any
* tuples present in this cert but not in other result in 'false'.
*
* @param other Cert to compare with
* @return True if certs agree and 'other' may be communicated with
*/
bool agreesWith(const MembershipCredential &other) const noexcept;
/**
* Sign this certificate
*
* @param with Identity to sign with, must include private key
* @return True if signature was successful
*/
bool sign(const Identity &with) noexcept;
/**
* Verify this COM and its signature
*
* @param RR Runtime environment for looking up peers
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
*/
ZT_INLINE Credential::VerifyResult verify(const Context &ctx, const CallContext &cc) const { return s_verify(ctx, cc, *this); }
static constexpr int marshalSizeMax() noexcept { return ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX], bool v2 = false) const noexcept;
int unmarshal(const uint8_t *data, int len) noexcept;
private:
unsigned int m_fillSigningBuf(uint64_t *buf) const noexcept;
struct p_Qualifier {
ZT_INLINE p_Qualifier() noexcept : id(0), value(0), delta(0) {}
ZT_INLINE p_Qualifier(const uint64_t id_, const uint64_t value_, const uint64_t delta_) noexcept : id(id_), value(value_), delta(delta_) {}
uint64_t id;
uint64_t value;
uint64_t delta;
ZT_INLINE bool operator<(const p_Qualifier &q) const noexcept { return (id < q.id); } // sort order
};
FCV<p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS> m_additionalQualifiers;
int64_t m_timestamp;
int64_t m_timestampMaxDelta;
uint64_t m_networkId;
Fingerprint m_issuedTo;
Address m_signedBy;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
};
} // namespace ZeroTier
#endif

View file

@ -1,90 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_METER_HPP
#define ZT_METER_HPP
#include "Constants.hpp"
#include "Mutex.hpp"
#include <algorithm>
namespace ZeroTier {
/**
* Transfer rate and total transferred amount meter
*
* This class is lock-free and thread-safe.
*
* This maintains a set of buckets numbered according to the current time
* modulo TUNIT. Each bucket is incremented within that time window. When
* the time moves on to a new bucket, its old contents are added to a
* total accumulator and a new counter for that bucket starts.
*
* @tparam TUNIT Unit of time in milliseconds (default: 1000 for one second)
* @tparam LSIZE Log size in units of time (default: 10 for 10s worth of data)
*/
template <int64_t TUNIT = 1000, unsigned long LSIZE = 10> class Meter {
public:
/**
* Create and initialize a new meter
*
* @param now Start time
*/
ZT_INLINE Meter() noexcept {}
/**
* Add a measurement
*
* @param ts Timestamp for measurement
* @param count Count of items (usually bytes)
*/
ZT_INLINE void log(const int64_t ts, const uint64_t count) noexcept
{
// We log by choosing a log bucket based on the current time in units modulo
// the log size and then if it's a new bucket setting it or otherwise adding
// to it.
const unsigned long bucket = ((unsigned long)(ts / TUNIT)) % LSIZE;
if (unlikely(m_bucket.exchange(bucket, std::memory_order_relaxed) != bucket)) {
m_totalExclCounts.fetch_add(m_counts[bucket].exchange(count, std::memory_order_relaxed), std::memory_order_relaxed);
}
else {
m_counts[bucket].fetch_add(count, std::memory_order_relaxed);
}
}
/**
* Get rate per TUNIT time
*
* @param now Current time
* @param rate Result parameter: rate in count/TUNIT
* @param total Total count for life of object
*/
ZT_INLINE void rate(double &rate, uint64_t &total) const noexcept
{
total = 0;
for (unsigned long i = 0; i < LSIZE; ++i)
total += m_counts[i].load(std::memory_order_relaxed);
rate = (double)total / (double)LSIZE;
total += m_totalExclCounts.load(std::memory_order_relaxed);
}
private:
std::atomic<uint64_t> m_counts[LSIZE];
std::atomic<uint64_t> m_totalExclCounts;
std::atomic<unsigned long> m_bucket;
};
} // namespace ZeroTier
#endif

View file

@ -1,111 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_MULTICASTGROUP_HPP
#define ZT_MULTICASTGROUP_HPP
#include "Constants.hpp"
#include "InetAddress.hpp"
#include "MAC.hpp"
#include "TriviallyCopyable.hpp"
#include "Utils.hpp"
namespace ZeroTier {
/**
* A multicast group composed of a multicast MAC and a 32-bit ADI field
*
* ADI stands for additional distinguishing information. ADI is primarily for
* adding additional information to broadcast (ff:ff:ff:ff:ff:ff) memberships,
* since straight-up broadcast won't scale. Right now it's zero except for
* IPv4 ARP, where it holds the IPv4 address itself to make ARP into a
* selective multicast query that can scale.
*
* In the future we might add some kind of plugin architecture that can add
* ADI for things like mDNS (multicast DNS) to improve the selectivity of
* those protocols.
*
* MulticastGroup behaves as an immutable value object.
*/
class MulticastGroup : public TriviallyCopyable {
public:
ZT_INLINE MulticastGroup() noexcept : m_mac(), m_adi(0) {}
ZT_INLINE MulticastGroup(const MAC &m, uint32_t a) noexcept : m_mac(m), m_adi(a) {}
/**
* Derive the multicast group used for address resolution (ARP/NDP) for an IP
*
* @param ip IP address (port field is ignored)
* @return Multicast group for ARP/NDP
*/
static ZT_INLINE MulticastGroup deriveMulticastGroupForAddressResolution(const InetAddress &ip) noexcept
{
if (ip.isV4()) {
// IPv4 wants broadcast MACs, so we shove the V4 address itself into
// the Multicast Group ADI field. Making V4 ARP work is basically why
// ADI was added, as well as handling other things that want mindless
// Ethernet broadcast to all.
return MulticastGroup(MAC(0xffffffffffffULL), Utils::ntoh(*((const uint32_t *)ip.rawIpData())));
}
else if (ip.isV6()) {
// IPv6 is better designed in this respect. We can compute the IPv6
// multicast address directly from the IP address, and it gives us
// 24 bits of uniqueness. Collisions aren't likely to be common enough
// to care about.
const uint8_t *const a = reinterpret_cast<const uint8_t *>(ip.rawIpData()); // NOLINT(hicpp-use-auto,modernize-use-auto)
return MulticastGroup(MAC(0x33, 0x33, 0xff, a[13], a[14], a[15]), 0);
}
return MulticastGroup(); // NOLINT(modernize-return-braced-init-list)
}
/**
* @return Ethernet MAC portion of multicast group
*/
ZT_INLINE const MAC &mac() const noexcept { return m_mac; }
/**
* @return Additional distinguishing information, which is normally zero except for IPv4 ARP where it's the IPv4
* address
*/
ZT_INLINE uint32_t adi() const { return m_adi; }
ZT_INLINE bool operator==(const MulticastGroup &g) const noexcept { return ((m_mac == g.m_mac) && (m_adi == g.m_adi)); }
ZT_INLINE bool operator!=(const MulticastGroup &g) const noexcept { return ((m_mac != g.m_mac) || (m_adi != g.m_adi)); }
ZT_INLINE bool operator<(const MulticastGroup &g) const noexcept
{
if (m_mac < g.m_mac)
return true;
else if (m_mac == g.m_mac)
return (m_adi < g.m_adi);
return false;
}
ZT_INLINE bool operator>(const MulticastGroup &g) const noexcept { return (g < *this); }
ZT_INLINE bool operator<=(const MulticastGroup &g) const noexcept { return !(g < *this); }
ZT_INLINE bool operator>=(const MulticastGroup &g) const noexcept { return !(*this < g); }
ZT_INLINE unsigned long hashCode() const noexcept { return (m_mac.hashCode() + (unsigned long)m_adi); }
private:
MAC m_mac;
uint32_t m_adi;
};
} // namespace ZeroTier
#endif

View file

@ -1,175 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_MUTEX_HPP
#define ZT_MUTEX_HPP
#include "Constants.hpp"
// If C++17 is available use std::mutex and std::shared_mutex as
// these will probably use whatever is fastest on a given platform.
// Older compilers require pthreads to be available. The compiler
// now used on Windows is new enough to use C++17 stuff, so no more
// need for Windows-specific implementations here.
#if __cplusplus >= 201703L
#include <mutex>
#include <shared_mutex>
#else
#define ZT_USE_PTHREADS
#ifndef __WINDOWS__
#include <pthread.h>
#endif
#endif
namespace ZeroTier {
/**
* A simple mutual exclusion lock.
*/
class Mutex {
public:
#ifdef ZT_USE_PTHREADS
ZT_INLINE Mutex() noexcept { pthread_mutex_init(&_mh, nullptr); }
ZT_INLINE ~Mutex() noexcept { pthread_mutex_destroy(&_mh); }
ZT_INLINE void lock() const noexcept { pthread_mutex_lock(&((const_cast<Mutex *>(this))->_mh)); }
ZT_INLINE void unlock() const noexcept { pthread_mutex_unlock(&((const_cast<Mutex *>(this))->_mh)); }
#else
ZT_INLINE Mutex() noexcept : _m() {}
ZT_INLINE void lock() const noexcept { const_cast<Mutex *>(this)->_m.lock(); }
ZT_INLINE void unlock() const noexcept { const_cast<Mutex *>(this)->_m.unlock(); }
#endif
class Lock {
public:
explicit ZT_INLINE Lock(Mutex &m) noexcept : _m(&m) { m.lock(); }
explicit ZT_INLINE Lock(const Mutex &m) noexcept : _m(const_cast<Mutex *>(&m)) { _m->lock(); }
ZT_INLINE ~Lock() { _m->unlock(); }
private:
Mutex *const _m;
};
private:
ZT_INLINE Mutex(const Mutex &) noexcept {}
ZT_INLINE const Mutex &operator=(const Mutex &) noexcept { return *this; }
#ifdef ZT_USE_PTHREADS
pthread_mutex_t _mh;
#else
std::mutex _m;
#endif
};
/**
* A lock allowing multiple threads to read but making all wait on any writing thread.
*/
class RWMutex {
public:
#ifdef ZT_USE_PTHREADS
ZT_INLINE RWMutex() noexcept { pthread_rwlock_init(&_mh, nullptr); }
ZT_INLINE ~RWMutex() noexcept { pthread_rwlock_destroy(&_mh); }
ZT_INLINE void lock() const noexcept { pthread_rwlock_wrlock(&((const_cast<RWMutex *>(this))->_mh)); }
ZT_INLINE void rlock() const noexcept { pthread_rwlock_rdlock(&((const_cast<RWMutex *>(this))->_mh)); }
ZT_INLINE void unlock() const noexcept { pthread_rwlock_unlock(&((const_cast<RWMutex *>(this))->_mh)); }
ZT_INLINE void runlock() const noexcept { pthread_rwlock_unlock(&((const_cast<RWMutex *>(this))->_mh)); }
#else
ZT_INLINE RWMutex() noexcept : _m() {}
ZT_INLINE void lock() const noexcept { const_cast<RWMutex *>(this)->_m.lock(); }
ZT_INLINE void rlock() const noexcept { const_cast<RWMutex *>(this)->_m.lock_shared(); }
ZT_INLINE void unlock() const noexcept { const_cast<RWMutex *>(this)->_m.unlock(); }
ZT_INLINE void runlock() const noexcept { const_cast<RWMutex *>(this)->_m.unlock_shared(); }
#endif
/**
* RAAI locker that acquires only the read lock (shared read)
*/
class RLock {
public:
explicit ZT_INLINE RLock(RWMutex &m) noexcept : _m(&m) { m.rlock(); }
explicit ZT_INLINE RLock(const RWMutex &m) noexcept : _m(const_cast<RWMutex *>(&m)) { _m->rlock(); }
ZT_INLINE ~RLock() { _m->runlock(); }
private:
RWMutex *const _m;
};
/**
* RAAI locker that acquires the write lock (exclusive write, no readers)
*/
class Lock {
public:
explicit ZT_INLINE Lock(RWMutex &m) noexcept : _m(&m) { m.lock(); }
explicit ZT_INLINE Lock(const RWMutex &m) noexcept : _m(const_cast<RWMutex *>(&m)) { _m->lock(); }
ZT_INLINE ~Lock() { _m->unlock(); }
private:
RWMutex *const _m;
};
/**
* RAAI locker that acquires the read lock first and can switch to writing.
*
* Use writing() to acquire the write lock if not already acquired. Use reading() to
* let go of the write lock and go back to only holding the read lock. Note that on
* most platforms there's a brief moment where the lock is unlocked during the
* transition, meaning protected variable states can change. Code must not assume
* that the lock is held constantly if writing() is used to change mode.
*/
class RMaybeWLock {
public:
explicit ZT_INLINE RMaybeWLock(RWMutex &m) noexcept : _m(&m), _w(false) { m.rlock(); }
explicit ZT_INLINE RMaybeWLock(const RWMutex &m) noexcept : _m(const_cast<RWMutex *>(&m)), _w(false) { _m->rlock(); }
ZT_INLINE void writing() noexcept
{
if (!_w) {
_w = true;
_m->runlock();
_m->lock();
}
}
ZT_INLINE void reading() noexcept
{
if (_w) {
_w = false;
_m->unlock();
_m->rlock();
}
}
ZT_INLINE bool isWriting() const noexcept { return _w; }
ZT_INLINE ~RMaybeWLock()
{
if (_w)
_m->unlock();
else
_m->runlock();
}
private:
RWMutex *const _m;
bool _w;
};
private:
ZT_INLINE RWMutex(const RWMutex &) noexcept {}
ZT_INLINE const RWMutex &operator=(const RWMutex &) noexcept { return *this; }
#ifdef ZT_USE_PTHREADS
pthread_rwlock_t _mh;
#else
std::shared_mutex _m;
#endif
};
} // namespace ZeroTier
#endif

File diff suppressed because it is too large Load diff

View file

@ -1,334 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_NETWORK_HPP
#define ZT_NETWORK_HPP
#include "Address.hpp"
#include "Buf.hpp"
#include "CallContext.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Dictionary.hpp"
#include "MAC.hpp"
#include "Member.hpp"
#include "MembershipCredential.hpp"
#include "MulticastGroup.hpp"
#include "Mutex.hpp"
#include "NetworkConfig.hpp"
#include "SharedPtr.hpp"
#define ZT_NETWORK_MAX_INCOMING_UPDATES 3
namespace ZeroTier {
class Context;
class Peer;
/**
* A virtual LAN
*/
class Network {
friend class SharedPtr<Network>;
public:
/**
* Broadcast multicast group: ff:ff:ff:ff:ff:ff / 0
*/
static const MulticastGroup BROADCAST;
/**
* Compute primary controller device ID from network ID
*/
static ZT_INLINE Address controllerFor(uint64_t nwid) noexcept { return Address(nwid >> 24U); }
/**
* Construct a new network
*
* Note that init() should be called immediately after the network is
* constructed to actually configure the port.
*
* @param nwid Network ID
* @param controllerFingerprint Initial controller fingerprint if non-NULL
* @param uptr Arbitrary pointer used by externally-facing API (for user use)
* @param nconf Network config, if known
*/
Network(const Context &ctx, const CallContext &cc, uint64_t nwid, const Fingerprint &controllerFingerprint, void *uptr, const NetworkConfig *nconf);
~Network();
ZT_INLINE uint64_t id() const noexcept { return m_id; }
ZT_INLINE Address controller() const noexcept { return Address(m_id >> 24U); }
ZT_INLINE bool multicastEnabled() const noexcept { return (m_config.multicastLimit > 0); }
ZT_INLINE bool hasConfig() const noexcept { return (m_config); }
ZT_INLINE uint64_t lastConfigUpdate() const noexcept { return m_lastConfigUpdate; }
ZT_INLINE ZT_VirtualNetworkStatus status() const noexcept { return m_status(); }
ZT_INLINE const NetworkConfig &config() const noexcept { return m_config; }
ZT_INLINE const MAC &mac() const noexcept { return m_mac; }
/**
* Apply filters to an outgoing packet
*
* This applies filters from our network config and, if that doesn't match,
* our capabilities in ascending order of capability ID. Additional actions
* such as TEE may be taken, and credentials may be pushed, so this is not
* side-effect-free. It's basically step one in sending something over VL2.
*
* @param noTee If true, do not TEE anything anywhere (for two-pass filtering as done with multicast and bridging)
* @param ztSource Source ZeroTier address
* @param ztDest Destination ZeroTier address
* @param macSource Ethernet layer source address
* @param macDest Ethernet layer destination address
* @param frameData Ethernet frame data
* @param frameLen Ethernet frame payload length
* @param etherType 16-bit ethernet type ID
* @param vlanId 16-bit VLAN ID
* @return True if packet should be sent, false if dropped or redirected
*/
bool filterOutgoingPacket(const CallContext &cc, bool noTee, const Address &ztSource, const Address &ztDest, const MAC &macSource, const MAC &macDest, const uint8_t *frameData, unsigned int frameLen, unsigned int etherType, unsigned int vlanId, uint8_t &qosBucket);
/**
* Apply filters to an incoming packet
*
* This applies filters from our network config and, if that doesn't match,
* the peer's capabilities in ascending order of capability ID. If there is
* a match certain actions may be taken such as sending a copy of the packet
* to a TEE or REDIRECT target.
*
* @param sourcePeer Source Peer
* @param ztDest Destination ZeroTier address
* @param macSource Ethernet layer source address
* @param macDest Ethernet layer destination address
* @param frameData Ethernet frame data
* @param frameLen Ethernet frame payload length
* @param etherType 16-bit ethernet type ID
* @param vlanId 16-bit VLAN ID
* @return 0 == drop, 1 == accept, 2 == accept even if bridged
*/
int filterIncomingPacket(const CallContext &cc, const SharedPtr<Peer> &sourcePeer, const Address &ztDest, const MAC &macSource, const MAC &macDest, const uint8_t *frameData, unsigned int frameLen, unsigned int etherType, unsigned int vlanId);
/**
* Subscribe to a multicast group
*
* @param mg New multicast group
*/
void multicastSubscribe(const CallContext &cc, const MulticastGroup &mg);
/**
* Unsubscribe from a multicast group
*
* @param mg Multicast group
*/
void multicastUnsubscribe(const MulticastGroup &mg);
/**
* Parse, verify, and handle an inbound network config chunk
*
* This is called from IncomingPacket to handle incoming network config
* chunks via OK(NETWORK_CONFIG_REQUEST) or NETWORK_CONFIG. It's a common
* bit of packet parsing code that also verifies chunks and replicates
* them (via rumor mill flooding) if their fast propagate flag is set.
*
* @param packetId Packet ID or 0 if none (e.g. via cluster path)
* @param source Peer that actually sent this chunk (probably controller)
* @param chunk Buffer containing chunk
* @param ptr Index of chunk and related fields in packet (starting with network ID)
* @param size Size of data in chunk buffer (total, not relative to ptr)
* @return Update ID if update was fully assembled and accepted or 0 otherwise
*/
uint64_t handleConfigChunk(const CallContext &cc, uint64_t packetId, const SharedPtr<Peer> &source, const Buf &chunk, int ptr, int size);
/**
* Set network configuration
*
* This is normally called internally when a configuration is received
* and fully assembled, but it can also be called on Node startup when
* cached configurations are re-read from the data store.
*
* @param nconf Network configuration
* @param saveToDisk Save to disk? Used during loading, should usually be true otherwise.
* @return 0 == bad, 1 == accepted but duplicate/unchanged, 2 == accepted and new
*/
int setConfiguration(const CallContext &cc, const NetworkConfig &nconf, bool saveToDisk);
/**
* Set netconf failure to 'access denied' -- called in IncomingPacket when controller reports this
*/
ZT_INLINE void setAccessDenied() noexcept { _netconfFailure = NETCONF_FAILURE_ACCESS_DENIED; }
/**
* Set netconf failure to 'not found' -- called by IncomingPacket when controller reports this
*/
ZT_INLINE void setNotFound() noexcept { _netconfFailure = NETCONF_FAILURE_NOT_FOUND; }
/**
* Determine whether this peer is permitted to communicate on this network
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param peer Peer to check
*/
bool gate(void *tPtr, const SharedPtr<Peer> &peer) noexcept;
/**
* Do periodic cleanup and housekeeping tasks
*/
void doPeriodicTasks(const CallContext &cc);
/**
* Find the node on this network that has this MAC behind it (if any)
*
* @param mac MAC address
* @return ZeroTier address of bridge to this MAC
*/
ZT_INLINE Address findBridgeTo(const MAC &mac) const
{
Mutex::Lock _l(m_remoteBridgeRoutes_l);
Map<MAC, Address>::const_iterator br(m_remoteBridgeRoutes.find(mac));
return ((br == m_remoteBridgeRoutes.end()) ? Address() : br->second);
}
/**
* Set a bridge route
*
* @param mac MAC address of destination
* @param addr Bridge this MAC is reachable behind
*/
void learnBridgeRoute(const MAC &mac, const Address &addr);
/**
* Learn a multicast group that is bridged to our tap device
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param mg Multicast group
* @param now Current time
*/
ZT_INLINE void learnBridgedMulticastGroup(const MulticastGroup &mg, int64_t now)
{
Mutex::Lock l(m_myMulticastGroups_l);
m_multicastGroupsBehindMe[mg] = now;
}
/**
* Validate a credential and learn it if it passes certificate and other checks
*/
Member::AddCredentialResult addCredential(const CallContext &cc, const Identity &sourcePeerIdentity, const MembershipCredential &com);
/**
* Validate a credential and learn it if it passes certificate and other checks
*/
Member::AddCredentialResult addCredential(const CallContext &cc, const Identity &sourcePeerIdentity, const CapabilityCredential &cap);
/**
* Validate a credential and learn it if it passes certificate and other checks
*/
Member::AddCredentialResult addCredential(const CallContext &cc, const Identity &sourcePeerIdentity, const TagCredential &tag);
/**
* Validate a credential and learn it if it passes certificate and other checks
*/
Member::AddCredentialResult addCredential(const CallContext &cc, const Identity &sourcePeerIdentity, const RevocationCredential &rev);
/**
* Validate a credential and learn it if it passes certificate and other checks
*/
Member::AddCredentialResult addCredential(const CallContext &cc, const Identity &sourcePeerIdentity, const OwnershipCredential &coo);
/**
* Push credentials to a peer if timeouts indicate that we should do so
*
* @param to Destination peer
*/
void pushCredentials(const CallContext &cc, const SharedPtr<Peer> &to);
/**
* Destroy this network
*
* This sets the network to completely remove itself on delete. This also prevents the
* call of the normal port shutdown event on delete.
*/
void destroy();
/**
* Get this network's config for export via the ZT core API
*
* @param ec Buffer to fill with externally-visible network configuration
*/
void externalConfig(ZT_VirtualNetworkConfig *ec) const;
/**
* Iterate through memberships
*
* @param f Function of (const Address,const Membership)
*/
template <typename F> ZT_INLINE void eachMember(F f)
{
Mutex::Lock ml(m_memberships_l);
for (Map<Address, Member>::iterator i(m_memberships.begin()); i != m_memberships.end(); ++i) { // NOLINT(modernize-loop-convert,hicpp-use-auto,modernize-use-auto)
if (!f(i->first, i->second))
break;
}
}
/**
* @return Externally usable pointer-to-pointer exported via the core API
*/
ZT_INLINE void **userPtr() noexcept { return &m_uPtr; }
private:
void m_requestConfiguration(const CallContext &cc);
ZT_VirtualNetworkStatus m_status() const;
void m_externalConfig(ZT_VirtualNetworkConfig *ec) const; // assumes _lock is locked
void m_announceMulticastGroups(void *tPtr, bool force);
void m_announceMulticastGroupsTo(void *tPtr, const Address &peer, const Vector<MulticastGroup> &allMulticastGroups);
Vector<MulticastGroup> m_allMulticastGroups() const;
const Context &m_ctx;
void *m_uPtr;
const uint64_t m_id;
Fingerprint m_controllerFingerprint;
MAC m_mac; // local MAC address
bool m_portInitialized;
std::atomic<bool> m_destroyed;
Vector<MulticastGroup> m_myMulticastGroups; // multicast groups that we belong to (according to tap)
Map<MulticastGroup, int64_t> m_multicastGroupsBehindMe; // multicast groups that seem to be behind us and when we
// last saw them (if we are a bridge)
Map<MAC, Address> m_remoteBridgeRoutes; // remote addresses where given MACs are reachable (for tracking devices
// behind remote bridges)
NetworkConfig m_config;
std::atomic<int64_t> m_lastConfigUpdate;
volatile enum { NETCONF_FAILURE_NONE, NETCONF_FAILURE_ACCESS_DENIED, NETCONF_FAILURE_NOT_FOUND, NETCONF_FAILURE_INIT_FAILED } _netconfFailure;
Map<Address, Member> m_memberships;
Mutex m_myMulticastGroups_l;
Mutex m_remoteBridgeRoutes_l;
Mutex m_config_l;
Mutex m_memberships_l;
std::atomic<int> __refCount;
};
} // namespace ZeroTier
#endif

View file

@ -1,289 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "NetworkConfig.hpp"
#include "Buf.hpp"
#include "ScopedPtr.hpp"
#include <algorithm>
#include <cstdint>
namespace ZeroTier {
bool NetworkConfig::toDictionary(Dictionary &d) const
{
uint8_t tmp[ZT_BUF_MEM_SIZE];
try {
d.clear();
d.add(ZT_NETWORKCONFIG_DICT_KEY_NETWORK_ID, this->networkId);
d.add(ZT_NETWORKCONFIG_DICT_KEY_TIMESTAMP, this->timestamp);
d.add(ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TIME_MAX_DELTA, this->credentialTimeMaxDelta);
d.add(ZT_NETWORKCONFIG_DICT_KEY_REVISION, this->revision);
d.add(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO, this->issuedTo.toString((char *)tmp));
d.add(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO_IDENTITY_HASH, this->issuedToFingerprintHash, ZT_FINGERPRINT_HASH_SIZE);
d.add(ZT_NETWORKCONFIG_DICT_KEY_FLAGS, this->flags);
d.add(ZT_NETWORKCONFIG_DICT_KEY_MULTICAST_LIMIT, (uint64_t)this->multicastLimit);
d.add(ZT_NETWORKCONFIG_DICT_KEY_TYPE, (uint64_t)this->type);
d.add(ZT_NETWORKCONFIG_DICT_KEY_NAME, this->name);
d.add(ZT_NETWORKCONFIG_DICT_KEY_MTU, (uint64_t)this->mtu);
if (this->com) {
d.add(ZT_NETWORKCONFIG_DICT_KEY_COM, tmp, this->com.marshal(tmp));
}
Vector<uint8_t> *blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_CAPABILITIES]);
for (unsigned int i = 0; i < this->capabilityCount; ++i) {
int l = this->capabilities[i].marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(), tmp, tmp + l);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_TAGS]);
for (unsigned int i = 0; i < this->tagCount; ++i) {
int l = this->tags[i].marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(), tmp, tmp + l);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_CERTIFICATES_OF_OWNERSHIP]);
for (unsigned int i = 0; i < this->certificateOfOwnershipCount; ++i) {
int l = this->certificatesOfOwnership[i].marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(), tmp, tmp + l);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_SPECIALISTS]);
for (unsigned int i = 0; i < this->specialistCount; ++i) {
Utils::storeBigEndian<uint64_t>(tmp, this->specialists[i]);
blob->insert(blob->end(), tmp, tmp + 8);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_ROUTES]);
for (unsigned int i = 0; i < this->routeCount; ++i) {
int l = asInetAddress(this->routes[i].target).marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(), tmp, tmp + l);
l = asInetAddress(this->routes[i].via).marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(), tmp, tmp + l);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_STATIC_IPS]);
for (unsigned int i = 0; i < this->staticIpCount; ++i) {
int l = this->staticIps[i].marshal(tmp);
if (l < 0)
return false;
blob->insert(blob->end(), tmp, tmp + l);
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_RULES]);
if (this->ruleCount) {
blob->resize(ruleCount * ZT_VIRTUALNETWORKRULE_MARSHAL_SIZE_MAX);
int l = CapabilityCredential::marshalVirtualNetworkRules(blob->data(), rules, ruleCount);
if (l > 0)
blob->resize(l);
}
return true;
}
catch (...) {
}
return false;
}
bool NetworkConfig::fromDictionary(const Dictionary &d)
{
static const NetworkConfig NIL_NC;
try {
*this = NIL_NC;
this->networkId = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_NETWORK_ID, 0);
if (!this->networkId)
return false;
this->timestamp = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_TIMESTAMP, 0);
if (this->timestamp <= 0)
return false;
this->credentialTimeMaxDelta = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TIME_MAX_DELTA, 0);
this->revision = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_REVISION, 0);
this->issuedTo = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO, 0);
const Vector<uint8_t> *blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO_IDENTITY_HASH]);
if (blob->size() == ZT_FINGERPRINT_HASH_SIZE) {
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(this->issuedToFingerprintHash, blob->data());
}
else {
Utils::zero<ZT_FINGERPRINT_HASH_SIZE>(this->issuedToFingerprintHash);
}
if (!this->issuedTo)
return false;
this->multicastLimit = (unsigned int)d.getUI(ZT_NETWORKCONFIG_DICT_KEY_MULTICAST_LIMIT, 0);
d.getS(ZT_NETWORKCONFIG_DICT_KEY_NAME, this->name, sizeof(this->name));
this->mtu = (unsigned int)d.getUI(ZT_NETWORKCONFIG_DICT_KEY_MTU, ZT_DEFAULT_MTU);
if (this->mtu < 1280)
this->mtu = 1280; // minimum MTU allowed by IPv6 standard and others
else if (this->mtu > ZT_MAX_MTU)
this->mtu = ZT_MAX_MTU;
if (d.getUI(ZT_NETWORKCONFIG_DICT_KEY_VERSION, 0) < 6) {
return false;
}
else {
this->flags = d.getUI(ZT_NETWORKCONFIG_DICT_KEY_FLAGS, 0);
this->type = (ZT_VirtualNetworkType)d.getUI(ZT_NETWORKCONFIG_DICT_KEY_TYPE, (uint64_t)ZT_NETWORK_TYPE_PRIVATE);
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_COM]);
if (!blob->empty()) {
if (this->com.unmarshal(blob->data(), (int)(blob->size()) < 0))
return false;
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_CAPABILITIES]);
if (!blob->empty()) {
try {
unsigned int p = 0;
while (p < blob->size()) {
CapabilityCredential cap;
int l = cap.unmarshal(blob->data() + p, (int)(blob->size() - p));
if (l < 0)
return false;
p += l;
if (this->capabilityCount < ZT_MAX_NETWORK_CAPABILITIES)
this->capabilities[this->capabilityCount++] = cap;
}
}
catch (...) {
}
std::sort(&(this->capabilities[0]), &(this->capabilities[this->capabilityCount]));
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_TAGS]);
if (!blob->empty()) {
try {
unsigned int p = 0;
while (p < blob->size()) {
TagCredential tag;
int l = tag.unmarshal(blob->data() + p, (int)(blob->size() - p));
if (l < 0)
return false;
p += l;
if (this->tagCount < ZT_MAX_NETWORK_TAGS)
this->tags[this->tagCount++] = tag;
}
}
catch (...) {
}
std::sort(&(this->tags[0]), &(this->tags[this->tagCount]));
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_CERTIFICATES_OF_OWNERSHIP]);
if (!blob->empty()) {
try {
unsigned int p = 0;
while (p < blob->size()) {
OwnershipCredential coo;
int l = coo.unmarshal(blob->data() + p, (int)(blob->size() - p));
if (l < 0)
return false;
p += l;
if (this->certificateOfOwnershipCount < ZT_MAX_CERTIFICATES_OF_OWNERSHIP)
this->certificatesOfOwnership[certificateOfOwnershipCount++] = coo;
}
}
catch (...) {
}
std::sort(&(this->certificatesOfOwnership[0]), &(this->certificatesOfOwnership[this->certificateOfOwnershipCount]));
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_SPECIALISTS]);
if (!blob->empty()) {
unsigned int p = 0;
while (((p + 8) <= blob->size()) && (specialistCount < ZT_MAX_NETWORK_SPECIALISTS)) {
this->specialists[this->specialistCount++] = Utils::loadBigEndian<uint64_t>(blob->data() + p);
p += 8;
}
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_ROUTES]);
if (!blob->empty()) {
unsigned int p = 0;
while ((p < blob->size()) && (routeCount < ZT_MAX_NETWORK_ROUTES)) {
int l = asInetAddress(this->routes[this->routeCount].target).unmarshal(blob->data(), (int)(blob->size() - p));
if (l < 0)
return false;
p += l;
if (p >= blob->size())
return false;
l = asInetAddress(this->routes[this->routeCount].via).unmarshal(blob->data(), (int)(blob->size() - p));
if (l < 0)
return false;
p += l;
if ((p + 4) > blob->size())
return false;
this->routes[this->routeCount].flags = Utils::loadBigEndian<uint16_t>(blob->data() + p);
p += 2;
this->routes[this->routeCount].metric = Utils::loadBigEndian<uint16_t>(blob->data() + p);
p += 2;
++this->routeCount;
}
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_STATIC_IPS]);
if (!blob->empty()) {
unsigned int p = 0;
while ((p < blob->size()) && (staticIpCount < ZT_MAX_ZT_ASSIGNED_ADDRESSES)) {
int l = this->staticIps[this->staticIpCount].unmarshal(blob->data() + p, (int)(blob->size() - p));
if (l < 0)
return false;
p += l;
++this->staticIpCount;
}
}
blob = &(d[ZT_NETWORKCONFIG_DICT_KEY_RULES]);
if (!blob->empty()) {
this->ruleCount = 0;
if (CapabilityCredential::unmarshalVirtualNetworkRules(blob->data(), (int)blob->size(), this->rules, this->ruleCount, ZT_MAX_NETWORK_RULES) < 0)
return false;
}
}
return true;
}
catch (...) {
}
return false;
}
bool NetworkConfig::addSpecialist(const Address &a, const uint64_t f) noexcept
{
const uint64_t aint = a.toInt();
for (unsigned int i = 0; i < specialistCount; ++i) {
if ((specialists[i] & 0xffffffffffULL) == aint) {
specialists[i] |= f;
return true;
}
}
if (specialistCount < ZT_MAX_NETWORK_SPECIALISTS) {
specialists[specialistCount++] = f | aint;
return true;
}
return false;
}
} // namespace ZeroTier

View file

@ -1,373 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_NETWORKCONFIG_HPP
#define ZT_NETWORKCONFIG_HPP
#include "Address.hpp"
#include "CapabilityCredential.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Dictionary.hpp"
#include "Identity.hpp"
#include "InetAddress.hpp"
#include "MembershipCredential.hpp"
#include "MulticastGroup.hpp"
#include "OwnershipCredential.hpp"
#include "TagCredential.hpp"
#include "Trace.hpp"
#include "TriviallyCopyable.hpp"
#include "Utils.hpp"
#include <algorithm>
#include <stdexcept>
namespace ZeroTier {
/**
* Default maximum time delta for COMs, tags, and capabilities
*
* The current value is two hours, providing ample time for a controller to
* experience fail-over, etc.
*/
#define ZT_NETWORKCONFIG_DEFAULT_CREDENTIAL_TIME_MAX_MAX_DELTA 7200000ULL
/**
* Default minimum credential TTL and maxDelta for COM timestamps
*
* This is just slightly over three minutes and provides three retries for
* all currently online members to refresh.
*/
#define ZT_NETWORKCONFIG_DEFAULT_CREDENTIAL_TIME_MIN_MAX_DELTA 185000ULL
/**
* Flag: enable broadcast
*/
#define ZT_NETWORKCONFIG_FLAG_ENABLE_BROADCAST 0x0000000000000002ULL
/**
* Flag: enable IPv6 NDP emulation for certain V6 address patterns
*/
#define ZT_NETWORKCONFIG_FLAG_ENABLE_IPV6_NDP_EMULATION 0x0000000000000004ULL
/**
* Flag: result of unrecognized MATCH entries in a rules table: match if set, no-match if clear
*/
#define ZT_NETWORKCONFIG_FLAG_RULES_RESULT_OF_UNSUPPORTED_MATCH 0x0000000000000008ULL
/**
* Device can bridge to other Ethernet networks and gets unknown recipient multicasts
*/
#define ZT_NETWORKCONFIG_SPECIALIST_TYPE_ACTIVE_BRIDGE 0x0000020000000000ULL
// Fields for meta-data sent with network config requests
// Protocol version (see Packet.hpp)
#define ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_PROTOCOL_VERSION "pv"
// Software vendor
#define ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_VENDOR "vend"
// Software major version
#define ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_MAJOR_VERSION "majv"
// Software minor version
#define ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_MINOR_VERSION "minv"
// Software revision
#define ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_REVISION "revv"
// Rules engine revision
#define ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_RULES_ENGINE_REV "revr"
// Maximum number of rules per network this node can accept
#define ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_MAX_NETWORK_RULES "mr"
// Maximum number of capabilities this node can accept
#define ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_MAX_NETWORK_CAPABILITIES "mc"
// Maximum number of rules per capability this node can accept
#define ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_MAX_CAPABILITY_RULES "mcr"
// Maximum number of tags this node can accept
#define ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_MAX_NETWORK_TAGS "mt"
// Network join authorization token (if any)
#define ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_AUTH "a"
// Network configuration meta-data flags
#define ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_FLAGS "f"
// These dictionary keys are short so they don't take up much room.
// By convention we use upper case for binary blobs, but it doesn't really matter.
// network config version
#define ZT_NETWORKCONFIG_DICT_KEY_VERSION "v"
// network ID
#define ZT_NETWORKCONFIG_DICT_KEY_NETWORK_ID "nwid"
// integer(hex)
#define ZT_NETWORKCONFIG_DICT_KEY_TIMESTAMP "ts"
// integer(hex)
#define ZT_NETWORKCONFIG_DICT_KEY_REVISION "r"
// address of member
#define ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO "id"
// full identity hash of member
#define ZT_NETWORKCONFIG_DICT_KEY_ISSUED_TO_IDENTITY_HASH "IDH"
// flags(hex)
#define ZT_NETWORKCONFIG_DICT_KEY_FLAGS "f"
// integer(hex)
#define ZT_NETWORKCONFIG_DICT_KEY_MULTICAST_LIMIT "ml"
// network type (hex)
#define ZT_NETWORKCONFIG_DICT_KEY_TYPE "t"
// text
#define ZT_NETWORKCONFIG_DICT_KEY_NAME "n"
// network MTU
#define ZT_NETWORKCONFIG_DICT_KEY_MTU "mtu"
// credential time max delta in ms
#define ZT_NETWORKCONFIG_DICT_KEY_CREDENTIAL_TIME_MAX_DELTA "ctmd"
// binary serialized certificate of membership
#define ZT_NETWORKCONFIG_DICT_KEY_COM "C"
// specialists (binary array of uint64_t)
#define ZT_NETWORKCONFIG_DICT_KEY_SPECIALISTS "S"
// routes (binary blob)
#define ZT_NETWORKCONFIG_DICT_KEY_ROUTES "RT"
// static IPs (binary blob)
#define ZT_NETWORKCONFIG_DICT_KEY_STATIC_IPS "I"
// rules (binary blob)
#define ZT_NETWORKCONFIG_DICT_KEY_RULES "R"
// capabilities (binary blobs)
#define ZT_NETWORKCONFIG_DICT_KEY_CAPABILITIES "CAP"
// tags (binary blobs)
#define ZT_NETWORKCONFIG_DICT_KEY_TAGS "TAG"
// tags (binary blobs)
#define ZT_NETWORKCONFIG_DICT_KEY_CERTIFICATES_OF_OWNERSHIP "COO"
/**
* Network configuration received from network controller nodes
*/
struct NetworkConfig : TriviallyCopyable {
ZT_INLINE NetworkConfig() noexcept { memoryZero(this); } // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
/**
* Write this network config to a dictionary for transport
*
* @param d Dictionary
* @return True if dictionary was successfully created, false if e.g. overflow
*/
bool toDictionary(Dictionary &d) const;
/**
* Read this network config from a dictionary
*
* @param d Dictionary (non-const since it might be modified during parse, should not be used after call)
* @return True if dictionary was valid and network config successfully initialized
*/
bool fromDictionary(const Dictionary &d);
/**
* @return True if broadcast (ff:ff:ff:ff:ff:ff) address should work on this network
*/
ZT_INLINE bool enableBroadcast() const noexcept { return ((this->flags & ZT_NETWORKCONFIG_FLAG_ENABLE_BROADCAST) != 0); }
/**
* @return True if IPv6 NDP emulation should be allowed for certain "magic" IPv6 address patterns
*/
ZT_INLINE bool ndpEmulation() const noexcept { return ((this->flags & ZT_NETWORKCONFIG_FLAG_ENABLE_IPV6_NDP_EMULATION) != 0); }
/**
* @return Network type is public (no access control)
*/
ZT_INLINE bool isPublic() const noexcept { return (this->type == ZT_NETWORK_TYPE_PUBLIC); }
/**
* @return Network type is private (certificate access control)
*/
ZT_INLINE bool isPrivate() const noexcept { return (this->type == ZT_NETWORK_TYPE_PRIVATE); }
/**
* @param fromPeer Peer attempting to bridge other Ethernet peers onto network
* @return True if this network allows bridging
*/
ZT_INLINE bool permitsBridging(const Address &fromPeer) const noexcept
{
for (unsigned int i = 0; i < specialistCount; ++i) {
if ((fromPeer.toInt() == (specialists[i] & ZT_ADDRESS_MASK)) && ((specialists[i] & ZT_NETWORKCONFIG_SPECIALIST_TYPE_ACTIVE_BRIDGE) != 0))
return true;
}
return false;
}
ZT_INLINE operator bool() const noexcept { return (networkId != 0); } // NOLINT(google-explicit-constructor,hicpp-explicit-conversions)
ZT_INLINE bool operator==(const NetworkConfig &nc) const noexcept { return (memcmp(this, &nc, sizeof(NetworkConfig)) == 0); }
ZT_INLINE bool operator!=(const NetworkConfig &nc) const noexcept { return (!(*this == nc)); }
/**
* Add a specialist or mask flags if already present
*
* This masks the existing flags if the specialist is already here or adds
* it otherwise.
*
* @param a Address of specialist
* @param f Flags (OR of specialist role/type flags)
* @return True if successfully masked or added
*/
bool addSpecialist(const Address &a, uint64_t f) noexcept;
ZT_INLINE const CapabilityCredential *capability(const uint32_t id) const
{
for (unsigned int i = 0; i < capabilityCount; ++i) {
if (capabilities[i].id() == id)
return &(capabilities[i]);
}
return nullptr;
}
ZT_INLINE const TagCredential *tag(const uint32_t id) const
{
for (unsigned int i = 0; i < tagCount; ++i) {
if (tags[i].id() == id)
return &(tags[i]);
}
return nullptr;
}
/**
* Network ID that this configuration applies to
*/
uint64_t networkId;
/**
* Controller-side time of config generation/issue
*/
int64_t timestamp;
/**
* Max difference between timestamp and tag/capability timestamp
*/
int64_t credentialTimeMaxDelta;
/**
* Controller-side revision counter for this configuration
*/
uint64_t revision;
/**
* Address of device to which this config is issued
*/
Address issuedTo;
/**
* Hash of identity public key(s) of node to whom this is issued
*
* If this field is all zero it is treated as undefined since old controllers
* do not set it.
*/
uint8_t issuedToFingerprintHash[ZT_FINGERPRINT_HASH_SIZE];
/**
* Flags (64-bit)
*/
uint64_t flags;
/**
* Network MTU
*/
unsigned int mtu;
/**
* Maximum number of recipients per multicast (not including active bridges)
*/
unsigned int multicastLimit;
/**
* Number of specialists
*/
unsigned int specialistCount;
/**
* Number of routes
*/
unsigned int routeCount;
/**
* Number of ZT-managed static IP assignments
*/
unsigned int staticIpCount;
/**
* Number of rule table entries
*/
unsigned int ruleCount;
/**
* Number of capabilities
*/
unsigned int capabilityCount;
/**
* Number of tags
*/
unsigned int tagCount;
/**
* Number of certificates of ownership
*/
unsigned int certificateOfOwnershipCount;
/**
* Specialist devices
*
* For each entry the least significant 40 bits are the device's ZeroTier
* address and the most significant 24 bits are flags indicating its role.
*/
uint64_t specialists[ZT_MAX_NETWORK_SPECIALISTS];
/**
* Statically defined "pushed" routes (including default gateways)
*/
ZT_VirtualNetworkRoute routes[ZT_MAX_NETWORK_ROUTES];
/**
* Static IP assignments
*/
InetAddress staticIps[ZT_MAX_ZT_ASSIGNED_ADDRESSES];
/**
* Base network rules
*/
ZT_VirtualNetworkRule rules[ZT_MAX_NETWORK_RULES];
/**
* Capabilities for this node on this network, in ascending order of capability ID
*/
CapabilityCredential capabilities[ZT_MAX_NETWORK_CAPABILITIES];
/**
* Tags for this node on this network, in ascending order of tag ID
*/
TagCredential tags[ZT_MAX_NETWORK_TAGS];
/**
* Certificates of ownership for this network member
*/
OwnershipCredential certificatesOfOwnership[ZT_MAX_CERTIFICATES_OF_OWNERSHIP];
/**
* Network type (currently just public or private)
*/
ZT_VirtualNetworkType type;
/**
* Network short name or empty string if not defined
*/
char name[ZT_MAX_NETWORK_SHORT_NAME_LENGTH + 1];
/**
* Certificate of membership (for private networks)
*/
MembershipCredential com;
};
} // namespace ZeroTier
#endif

View file

@ -1,97 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_NETWORKCONFIGMASTER_HPP
#define ZT_NETWORKCONFIGMASTER_HPP
#include "Address.hpp"
#include "Constants.hpp"
#include "Dictionary.hpp"
#include "NetworkConfig.hpp"
#include "RevocationCredential.hpp"
namespace ZeroTier {
class Identity;
struct InetAddress;
/**
* Interface for network controller implementations
*/
class NetworkController {
public:
enum ErrorCode { NC_ERROR_NONE = 0, NC_ERROR_OBJECT_NOT_FOUND = 1, NC_ERROR_ACCESS_DENIED = 2, NC_ERROR_INTERNAL_SERVER_ERROR = 3 };
/**
* Interface for sender used to send pushes and replies
*/
class Sender {
public:
/**
* Send a configuration to a remote peer
*
* @param nwid Network ID
* @param requestPacketId Request packet ID to send OK(NETWORK_CONFIG_REQUEST) or 0 to send NETWORK_CONFIG
* (push)
* @param destination Destination peer Address
* @param nc Network configuration to send
* @param sendLegacyFormatConfig If true, send an old-format network config
*/
virtual void ncSendConfig(void *tPtr, int64_t clock, int64_t ticks, uint64_t nwid, uint64_t requestPacketId, const Address &destination, const NetworkConfig &nc, bool sendLegacyFormatConfig) = 0;
/**
* Send revocation to a node
*
* @param destination Destination node address
* @param rev Revocation to send
*/
virtual void ncSendRevocation(void *tPtr, int64_t clock, int64_t ticks, const Address &destination, const RevocationCredential &rev) = 0;
/**
* Send a network configuration request error
*
* @param nwid Network ID
* @param requestPacketId Request packet ID or 0 if none
* @param destination Destination peer Address
* @param errorCode Error code
*/
virtual void ncSendError(void *tPtr, int64_t clock, int64_t ticks, uint64_t nwid, uint64_t requestPacketId, const Address &destination, NetworkController::ErrorCode errorCode) = 0;
};
NetworkController() {}
virtual ~NetworkController() {}
/**
* Called when this is added to a Node to initialize and supply info
*
* @param signingId Identity for signing of network configurations, certs, etc.
* @param sender Sender implementation for sending replies or config pushes
*/
virtual void init(const Identity &signingId, Sender *sender) = 0;
/**
* Handle a network configuration request
*
* @param nwid 64-bit network ID
* @param fromAddr Originating wire address or null address if packet is not direct (or from self)
* @param requestPacketId Packet ID of request packet or 0 if not initiated by remote request
* @param identity ZeroTier identity of originating peer
* @param metaData Meta-data bundled with request (if any)
* @return Returns NETCONF_QUERY_OK if result 'nc' is valid, or an error code on error
*/
virtual void request(uint64_t nwid, const InetAddress &fromAddr, uint64_t requestPacketId, const Identity &identity, const Dictionary &metaData) = 0;
};
} // namespace ZeroTier
#endif

View file

@ -1,715 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "Node.hpp"
#include "Address.hpp"
#include "Buf.hpp"
#include "Constants.hpp"
#include "Expect.hpp"
#include "Identity.hpp"
#include "Locator.hpp"
#include "Network.hpp"
#include "NetworkController.hpp"
#include "SelfAwareness.hpp"
#include "SharedPtr.hpp"
#include "Store.hpp"
#include "Topology.hpp"
#include "Trace.hpp"
#include "TrustStore.hpp"
#include "VL1.hpp"
#include "VL2.hpp"
namespace ZeroTier {
namespace {
struct _NodeObjects {
ZT_INLINE _NodeObjects(Context &ctx, const CallContext &cc) : networks(), t(ctx), expect(), vl2(ctx), vl1(ctx), topology(ctx, cc), sa(ctx), ts()
{
ctx.networks = &networks;
ctx.t = &t;
ctx.expect = &expect;
ctx.vl2 = &vl2;
ctx.vl1 = &vl1;
ctx.topology = &topology;
ctx.sa = &sa;
ctx.ts = &ts;
}
TinyMap<SharedPtr<Network>> networks;
Trace t;
Expect expect;
VL2 vl2;
VL1 vl1;
Topology topology;
SelfAwareness sa;
TrustStore ts;
};
} // anonymous namespace
Node::Node(void *uPtr, const struct ZT_Node_Callbacks *callbacks, const CallContext &cc) : m_ctx(this), m_store(m_ctx), m_objects(nullptr), m_lastPeerPulse(0), m_lastHousekeepingRun(0), m_lastNetworkHousekeepingRun(0), m_lastTrustStoreUpdate(0), m_online(false)
{
ZT_SPEW("Node starting up!");
Utils::copy<sizeof(ZT_Node_Callbacks)>(&m_ctx.cb, callbacks);
m_ctx.uPtr = uPtr;
m_ctx.store = &m_store;
Vector<uint8_t> data(m_store.get(cc, ZT_STATE_OBJECT_IDENTITY_SECRET, Utils::ZERO256, 0));
bool haveIdentity = false;
if (!data.empty()) {
data.push_back(0); // zero-terminate string
if (m_ctx.identity.fromString((const char *)data.data())) {
m_ctx.identity.toString(false, m_ctx.publicIdentityStr);
m_ctx.identity.toString(true, m_ctx.secretIdentityStr);
haveIdentity = true;
ZT_SPEW("loaded identity %s", m_ctx.identity.toString().c_str());
}
}
if (!haveIdentity) {
m_ctx.identity.generate(Identity::C25519);
m_ctx.identity.toString(false, m_ctx.publicIdentityStr);
m_ctx.identity.toString(true, m_ctx.secretIdentityStr);
m_store.put(cc, ZT_STATE_OBJECT_IDENTITY_SECRET, Utils::ZERO256, 0, m_ctx.secretIdentityStr, (unsigned int)strlen(m_ctx.secretIdentityStr));
m_store.put(cc, ZT_STATE_OBJECT_IDENTITY_PUBLIC, Utils::ZERO256, 0, m_ctx.publicIdentityStr, (unsigned int)strlen(m_ctx.publicIdentityStr));
ZT_SPEW("no pre-existing identity found, created %s", m_ctx.identity.toString().c_str());
}
else {
data = m_store.get(cc, ZT_STATE_OBJECT_IDENTITY_PUBLIC, Utils::ZERO256, 0);
if ((data.empty()) || (memcmp(data.data(), m_ctx.publicIdentityStr, strlen(m_ctx.publicIdentityStr)) != 0))
m_store.put(cc, ZT_STATE_OBJECT_IDENTITY_PUBLIC, Utils::ZERO256, 0, m_ctx.publicIdentityStr, (unsigned int)strlen(m_ctx.publicIdentityStr));
}
uint8_t localSecretCipherKey[ZT_FINGERPRINT_HASH_SIZE];
m_ctx.identity.hashWithPrivate(localSecretCipherKey);
++localSecretCipherKey[0];
SHA384(localSecretCipherKey, localSecretCipherKey, ZT_FINGERPRINT_HASH_SIZE);
m_ctx.localSecretCipher.init(localSecretCipherKey);
for (unsigned int i = 0; i < 1023; ++i)
m_ctx.randomPrivilegedPortOrder[i] = (uint16_t)(i + 1);
for (unsigned int i = 0; i < 512; ++i) {
uint64_t rn = Utils::random();
const unsigned int a = (unsigned int)rn % 1023;
const unsigned int b = (unsigned int)(rn >> 32U) % 1023;
if (a != b) {
const uint16_t tmp = m_ctx.randomPrivilegedPortOrder[a];
m_ctx.randomPrivilegedPortOrder[a] = m_ctx.randomPrivilegedPortOrder[b];
m_ctx.randomPrivilegedPortOrder[b] = tmp;
}
}
m_objects = new _NodeObjects(m_ctx, cc);
ZT_SPEW("node initialized!");
postEvent(cc.tPtr, ZT_EVENT_UP);
}
Node::~Node()
{
ZT_SPEW("Node shutting down (in destructor).");
m_allNetworks_l.lock();
m_ctx.networks->clear();
m_allNetworks.clear();
m_allNetworks_l.unlock();
delete reinterpret_cast<_NodeObjects *>(m_objects);
// Let go of cached Buf objects. If other nodes happen to be running in this
// same process space new Bufs will be allocated as needed, but this is almost
// never the case. Calling this here saves RAM if we are running inside something
// that wants to keep running after tearing down its ZeroTier core instance.
Buf::freePool();
}
void Node::shutdown(const CallContext &cc)
{
m_allNetworks_l.lock();
m_ctx.networks->clear();
m_allNetworks.clear();
m_allNetworks_l.unlock();
postEvent(cc.tPtr, ZT_EVENT_DOWN);
if (m_ctx.topology)
m_ctx.topology->saveAll(cc);
}
ZT_ResultCode Node::processBackgroundTasks(const CallContext &cc, volatile int64_t *nextBackgroundTaskDeadline)
{
Mutex::Lock bl(m_backgroundTasksLock);
try {
// Updating the trust store means checking certificates and certificate chains
// against the current time, etc., and also resynchronizing roots as specified by
// certificates. This also happens on demand when the trust store is changed.
if ((cc.ticks - m_lastTrustStoreUpdate) >= ZT_TRUSTSTORE_UPDATE_PERIOD) {
m_lastTrustStoreUpdate = cc.ticks;
if (unlikely(m_ctx.ts->update(cc.ticks, nullptr)))
m_ctx.topology->trustStoreChanged(cc);
}
// Networks perform housekeeping here such as refreshing configs.
if ((cc.ticks - m_lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) {
m_lastNetworkHousekeepingRun = cc.ticks;
ZT_SPEW("running networking housekeeping...");
Mutex::Lock l(m_allNetworks_l);
for (Vector<SharedPtr<Network>>::const_iterator n(m_allNetworks.begin()); n != m_allNetworks.end(); ++n)
(*n)->doPeriodicTasks(cc);
}
// Perform general housekeeping for other objects in the system.
if ((cc.ticks - m_lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
m_lastHousekeepingRun = cc.ticks;
ZT_SPEW("running housekeeping...");
m_ctx.topology->doPeriodicTasks(cc);
m_ctx.sa->clean(cc);
}
// Peers have a "pulse" method that does things like keepalive and path housekeeping.
// This happens last because keepalives are only necessary if nothing has been sent
// in a while, and some of the above actions may cause peers to send things which may
// reduce the need for keepalives. Root ranking also happens here.
if ((cc.ticks - m_lastPeerPulse) >= ZT_PEER_PULSE_INTERVAL) {
m_lastPeerPulse = cc.ticks;
ZT_SPEW("running pulse() on each peer...");
try {
Vector<SharedPtr<Peer>> allPeers, rootPeers;
m_ctx.topology->allPeers(allPeers, rootPeers);
std::sort(rootPeers.begin(), rootPeers.end());
bool online = false;
for (Vector<SharedPtr<Peer>>::iterator p(allPeers.begin()); p != allPeers.end(); ++p) {
(*p)->pulse(m_ctx, cc);
if (!online) {
online = ((std::binary_search(rootPeers.begin(), rootPeers.end(), *p) || rootPeers.empty()) && (*p)->directlyConnected());
}
}
if (unlikely(m_online.exchange(online, std::memory_order_relaxed) != online))
postEvent(cc.tPtr, online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
ZT_SPEW("ranking roots...");
m_ctx.topology->rankRoots(cc);
}
catch (...) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
}
*nextBackgroundTaskDeadline = cc.ticks + ZT_TIMER_TASK_INTERVAL;
}
catch (...) {
return ZT_RESULT_FATAL_ERROR_INTERNAL;
}
return ZT_RESULT_OK;
}
ZT_ResultCode Node::join(uint64_t nwid, const ZT_Fingerprint *controllerFingerprint, void *uptr, const CallContext &cc)
{
Mutex::Lock l(m_allNetworks_l);
Fingerprint fp;
if (controllerFingerprint) {
fp = *controllerFingerprint;
ZT_SPEW("joining network %.16llx with controller fingerprint %s", nwid, fp.toString().c_str());
}
else {
ZT_SPEW("joining network %.16llx", nwid);
}
for (Vector<SharedPtr<Network>>::iterator n(m_allNetworks.begin()); n != m_allNetworks.end(); ++n) {
if ((*n)->id() == nwid)
return ZT_RESULT_OK;
}
SharedPtr<Network> network(new Network(m_ctx, cc, nwid, fp, uptr, nullptr));
m_allNetworks.push_back(network);
m_ctx.networks->set(nwid, network);
return ZT_RESULT_OK;
}
ZT_ResultCode Node::leave(uint64_t nwid, void **uptr, const CallContext &cc)
{
Mutex::Lock l(m_allNetworks_l);
ZT_SPEW("leaving network %.16llx", nwid);
ZT_VirtualNetworkConfig ctmp;
SharedPtr<Network> network;
m_ctx.networks->erase(nwid);
for (Vector<SharedPtr<Network>>::iterator n(m_allNetworks.begin()); n != m_allNetworks.end(); ++n) {
if ((*n)->id() == nwid) {
network.move(*n);
m_allNetworks.erase(n);
break;
}
}
uint64_t tmp[2];
tmp[0] = nwid;
tmp[1] = 0;
m_store.erase(cc, ZT_STATE_OBJECT_NETWORK_CONFIG, tmp, 1);
if (network) {
if (uptr)
*uptr = *network->userPtr();
network->externalConfig(&ctmp);
m_ctx.cb.virtualNetworkConfigFunction(reinterpret_cast<ZT_Node *>(this), m_ctx.uPtr, cc.tPtr, nwid, network->userPtr(), ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY, &ctmp);
network->destroy();
return ZT_RESULT_OK;
}
else {
return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
}
}
ZT_ResultCode Node::multicastSubscribe(const CallContext &cc, uint64_t nwid, uint64_t multicastGroup, unsigned long multicastAdi)
{
ZT_SPEW("multicast subscribe to %s:%lu", MAC(multicastGroup).toString().c_str(), multicastAdi);
const SharedPtr<Network> nw(m_ctx.networks->get(nwid));
if (nw) {
nw->multicastSubscribe(cc, MulticastGroup(MAC(multicastGroup), (uint32_t)(multicastAdi & 0xffffffff)));
return ZT_RESULT_OK;
}
else {
return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
}
}
ZT_ResultCode Node::multicastUnsubscribe(const CallContext &cc, uint64_t nwid, uint64_t multicastGroup, unsigned long multicastAdi)
{
ZT_SPEW("multicast unsubscribe from %s:%lu", MAC(multicastGroup).toString().c_str(), multicastAdi);
const SharedPtr<Network> nw(m_ctx.networks->get(nwid));
if (nw) {
nw->multicastUnsubscribe(MulticastGroup(MAC(multicastGroup), (uint32_t)(multicastAdi & 0xffffffff)));
return ZT_RESULT_OK;
}
else {
return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
}
}
void Node::status(ZT_NodeStatus *status) const
{
status->address = m_ctx.identity.address().toInt();
status->identity = reinterpret_cast<const ZT_Identity *>(&m_ctx.identity);
status->publicIdentity = m_ctx.publicIdentityStr;
status->secretIdentity = m_ctx.secretIdentityStr;
status->online = m_online ? 1 : 0;
}
struct p_ZT_PeerListPrivate : public ZT_PeerList {
// Actual containers for the memory, hidden from external users.
Vector<ZT_Peer> p_peers;
ForwardList<Vector<ZT_Path>> p_paths;
ForwardList<Identity> p_identities;
ForwardList<Blob<ZT_LOCATOR_MARSHAL_SIZE_MAX>> p_locators;
};
static void p_peerListFreeFunction(const void *pl)
{
if (pl)
delete reinterpret_cast<p_ZT_PeerListPrivate *>(const_cast<void *>(pl));
}
struct p_sortPeerPtrsByAddress {
ZT_INLINE bool operator()(const SharedPtr<Peer> &a, const SharedPtr<Peer> &b) const noexcept { return (a->address() < b->address()); }
};
ZT_PeerList *Node::peers(const CallContext &cc) const
{
p_ZT_PeerListPrivate *pl = nullptr;
try {
pl = new p_ZT_PeerListPrivate;
pl->freeFunction = p_peerListFreeFunction;
Vector<SharedPtr<Peer>> peers, rootPeers;
m_ctx.topology->allPeers(peers, rootPeers);
std::sort(peers.begin(), peers.end(), p_sortPeerPtrsByAddress());
std::sort(rootPeers.begin(), rootPeers.end());
for (Vector<SharedPtr<Peer>>::iterator pi(peers.begin()); pi != peers.end(); ++pi) {
pl->p_peers.push_back(ZT_Peer());
ZT_Peer &p = pl->p_peers.back();
Peer &pp = **pi;
p.address = pp.address();
pl->p_identities.push_front(pp.identity());
p.identity = reinterpret_cast<const ZT_Identity *>(&(pl->p_identities.front()));
p.fingerprint = &(pl->p_identities.front().fingerprint());
uint16_t vProto, vMajor, vMinor, vRevision;
if (pp.remoteVersion(vProto, vMajor, vMinor, vRevision)) {
p.versionMajor = (int)vMajor;
p.versionMinor = (int)vMinor;
p.versionRev = (int)vRevision;
p.versionProto = (int)vProto;
}
else {
p.versionMajor = -1;
p.versionMinor = -1;
p.versionRev = -1;
p.versionProto = -1;
}
p.latency = pp.latency();
p.root = std::binary_search(rootPeers.begin(), rootPeers.end(), *pi) ? 1 : 0;
p.networks = nullptr;
p.networkCount = 0; // TODO: networks this peer belongs to
Vector<SharedPtr<Path>> ztPaths;
pp.getAllPaths(ztPaths);
if (ztPaths.empty()) {
pl->p_paths.push_front(Vector<ZT_Path>());
std::vector<ZT_Path> &apiPaths = pl->p_paths.front();
apiPaths.resize(ztPaths.size());
for (unsigned long i = 0; i < (unsigned long)ztPaths.size(); ++i) {
SharedPtr<Path> &ztp = ztPaths[i];
ZT_Path &apip = apiPaths[i];
apip.endpoint.type = ZT_ENDPOINT_TYPE_IP_UDP;
Utils::copy<sizeof(struct sockaddr_storage)>(&(apip.endpoint.value.ss), &(ztp->address().as.ss));
apip.lastSend = ztp->lastOut();
apip.lastReceive = ztp->lastIn();
apip.alive = ztp->alive(cc) ? 1 : 0;
apip.preferred = (i == 0) ? 1 : 0;
}
p.paths = apiPaths.data();
p.pathCount = (unsigned int)apiPaths.size();
}
else {
p.paths = nullptr;
p.pathCount = 0;
}
const SharedPtr<const Locator> loc(pp.locator());
if (loc) {
pl->p_locators.push_front(Blob<ZT_LOCATOR_MARSHAL_SIZE_MAX>());
Blob<ZT_LOCATOR_MARSHAL_SIZE_MAX> &lb = pl->p_locators.front();
Utils::zero<ZT_LOCATOR_MARSHAL_SIZE_MAX>(lb.data);
const int ls = loc->marshal(lb.data);
if (ls > 0) {
p.locatorSize = (unsigned int)ls;
p.locator = lb.data;
}
}
}
pl->peers = pl->p_peers.data();
pl->peerCount = (unsigned long)pl->p_peers.size();
return pl;
}
catch (...) {
delete pl;
return nullptr;
}
}
ZT_VirtualNetworkConfig *Node::networkConfig(uint64_t nwid) const
{
const SharedPtr<Network> nw(m_ctx.networks->get(nwid));
if (nw) {
ZT_VirtualNetworkConfig *const nc = (ZT_VirtualNetworkConfig *)::malloc(sizeof(ZT_VirtualNetworkConfig));
nw->externalConfig(nc);
return nc;
}
else {
return nullptr;
}
}
ZT_VirtualNetworkList *Node::networks() const
{
Mutex::Lock l(m_allNetworks_l);
char *const buf = (char *)::malloc(sizeof(ZT_VirtualNetworkList) + (sizeof(ZT_VirtualNetworkConfig) * m_allNetworks.size()));
if (!buf)
return nullptr;
ZT_VirtualNetworkList *nl = (ZT_VirtualNetworkList *)buf;
nl->freeFunction = reinterpret_cast<void (*)(const void *)>(free);
nl->networks = (ZT_VirtualNetworkConfig *)(buf + sizeof(ZT_VirtualNetworkList));
nl->networkCount = 0;
for (Vector<SharedPtr<Network>>::const_iterator i(m_allNetworks.begin()); i != m_allNetworks.end(); ++i)
(*i)->externalConfig(&(nl->networks[nl->networkCount++]));
return nl;
}
void Node::setNetworkUserPtr(uint64_t nwid, void *ptr)
{
SharedPtr<Network> nw(m_ctx.networks->get(nwid));
if (nw) {
m_allNetworks_l.lock(); // ensure no concurrent modification of user PTR in network
*(nw->userPtr()) = ptr;
m_allNetworks_l.unlock();
}
}
void Node::setInterfaceAddresses(const ZT_InterfaceAddress *addrs, unsigned int addrCount)
{
Mutex::Lock _l(m_localInterfaceAddresses_m);
m_localInterfaceAddresses.clear();
for (unsigned int i = 0; i < addrCount; ++i) {
bool dupe = false;
for (unsigned int j = 0; j < i; ++j) {
if (*(reinterpret_cast<const InetAddress *>(&addrs[j].address)) == *(reinterpret_cast<const InetAddress *>(&addrs[i].address))) {
dupe = true;
break;
}
}
if (!dupe)
m_localInterfaceAddresses.push_back(addrs[i]);
}
}
ZT_CertificateError Node::addCertificate(const CallContext &cc, unsigned int localTrust, const ZT_Certificate *cert, const void *certData, unsigned int certSize)
{
Certificate c;
if (cert) {
c = *cert;
}
else {
if ((!certData) || (!certSize))
return ZT_CERTIFICATE_ERROR_INVALID_FORMAT;
if (!c.decode(certData, certSize))
return ZT_CERTIFICATE_ERROR_INVALID_FORMAT;
}
m_ctx.ts->add(c, localTrust);
m_ctx.ts->update(cc.clock, nullptr);
SharedPtr<TrustStore::Entry> ent(m_ctx.ts->get(c.getSerialNo()));
return (ent) ? ent->error() : ZT_CERTIFICATE_ERROR_INVALID_FORMAT; // should never be null, but if so it means invalid
}
ZT_ResultCode Node::deleteCertificate(const CallContext &cc, const void *serialNo)
{
if (!serialNo)
return ZT_RESULT_ERROR_BAD_PARAMETER;
m_ctx.ts->erase(H384(serialNo));
m_ctx.ts->update(-1, nullptr);
return ZT_RESULT_OK;
}
struct p_certificateListInternal {
Vector<SharedPtr<TrustStore::Entry>> entries;
Vector<const ZT_Certificate *> c;
Vector<unsigned int> t;
};
static void p_freeCertificateList(const void *cl)
{
if (cl) {
reinterpret_cast<const p_certificateListInternal *>(reinterpret_cast<const uint8_t *>(cl) + sizeof(ZT_CertificateList))->~p_certificateListInternal();
free(const_cast<void *>(cl));
}
}
ZT_CertificateList *Node::listCertificates()
{
ZT_CertificateList *const cl = (ZT_CertificateList *)malloc(sizeof(ZT_CertificateList) + sizeof(p_certificateListInternal));
if (!cl)
return nullptr;
p_certificateListInternal *const clint = reinterpret_cast<p_certificateListInternal *>(reinterpret_cast<uint8_t *>(cl) + sizeof(ZT_CertificateList));
new (clint) p_certificateListInternal;
clint->entries = m_ctx.ts->all(false);
clint->c.reserve(clint->entries.size());
clint->t.reserve(clint->entries.size());
for (Vector<SharedPtr<TrustStore::Entry>>::const_iterator i(clint->entries.begin()); i != clint->entries.end(); ++i) {
clint->c.push_back(&((*i)->certificate()));
clint->t.push_back((*i)->localTrust());
}
cl->freeFunction = p_freeCertificateList;
cl->certs = clint->c.data();
cl->localTrust = clint->t.data();
cl->certCount = (unsigned long)clint->c.size();
return cl;
}
int Node::sendUserMessage(const CallContext &cc, uint64_t dest, uint64_t /*typeId*/, const void * /*data*/, unsigned int /*len*/)
{
try {
if (m_ctx.identity.address().toInt() != dest) {
// TODO
/*
Packet outp(Address(dest),m_ctx.identity.address(),Packet::VERB_USER_MESSAGE);
outp.append(typeId);
outp.append(data,len);
outp.compress();
m_ctx.sw->send(tptr,outp,true);
*/
return 1;
}
}
catch (...) {
}
return 0;
}
void Node::setController(void *networkControllerInstance)
{
m_ctx.localNetworkController = reinterpret_cast<NetworkController *>(networkControllerInstance);
if (networkControllerInstance)
m_ctx.localNetworkController->init(m_ctx.identity, this);
}
bool Node::filterPotentialPath(void *tPtr, const Identity &id, int64_t localSocket, const InetAddress &remoteAddress)
{
{
Mutex::Lock l(m_allNetworks_l);
for (Vector<SharedPtr<Network>>::iterator i(m_allNetworks.begin()); i != m_allNetworks.end(); ++i) {
for (unsigned int k = 0, j = (*i)->config().staticIpCount; k < j; ++k) {
if ((*i)->config().staticIps[k].containsAddress(remoteAddress))
return false;
}
}
}
if (m_ctx.cb.pathCheckFunction) {
return (m_ctx.cb.pathCheckFunction(reinterpret_cast<ZT_Node *>(this), m_ctx.uPtr, tPtr, id.address().toInt(), (const ZT_Identity *)&id, localSocket, reinterpret_cast<const ZT_InetAddress *>(&remoteAddress)) != 0);
}
return true;
}
bool Node::externalPathLookup(void *tPtr, const Identity &id, int family, InetAddress &addr)
{
if (m_ctx.cb.pathLookupFunction) {
return (m_ctx.cb.pathLookupFunction(reinterpret_cast<ZT_Node *>(this), m_ctx.uPtr, tPtr, id.address().toInt(), reinterpret_cast<const ZT_Identity *>(&id), family, reinterpret_cast<ZT_InetAddress *>(&addr)) == ZT_RESULT_OK);
}
return false;
}
// Implementation of NetworkController::Sender ------------------------------------------------------------------------
void Node::ncSendConfig(void *tPtr, int64_t clock, int64_t ticks, uint64_t nwid, uint64_t requestPacketId, const Address &destination, const NetworkConfig &nc, bool sendLegacyFormatConfig)
{
if (destination == m_ctx.identity.address()) {
SharedPtr<Network> n(m_ctx.networks->get(nwid));
if (!n)
return;
CallContext cc(clock, ticks, tPtr);
n->setConfiguration(cc, nc, true);
}
else {
Dictionary dconf;
if (nc.toDictionary(dconf)) {
uint64_t configUpdateId = Utils::random();
if (!configUpdateId)
++configUpdateId;
Vector<uint8_t> ddata;
dconf.encode(ddata);
// TODO
/*
unsigned int chunkIndex = 0;
while (chunkIndex < totalSize) {
const unsigned int chunkLen = std::min(totalSize - chunkIndex,(unsigned int)(ZT_PROTO_MAX_PACKET_LENGTH
- (ZT_PACKET_IDX_PAYLOAD + 256))); Packet outp(destination,m_ctx.identity.address(),(requestPacketId) ?
Packet::VERB_OK : Packet::VERB_NETWORK_CONFIG); if (requestPacketId) { outp.append((unsigned
char)Packet::VERB_NETWORK_CONFIG_REQUEST); outp.append(requestPacketId);
}
const unsigned int sigStart = outp.size();
outp.append(nwid);
outp.append((uint16_t)chunkLen);
outp.append((const void *)(dconf->data() + chunkIndex),chunkLen);
outp.append((uint8_t)0); // no flags
outp.append((uint64_t)configUpdateId);
outp.append((uint32_t)totalSize);
outp.append((uint32_t)chunkIndex);
uint8_t sig[256];
const unsigned int siglen = m_ctx.identity.sign(reinterpret_cast<const uint8_t *>(outp.data()) +
sigStart,outp.size() - sigStart,sig,sizeof(sig)); outp.append((uint8_t)1); outp.append((uint16_t)siglen);
outp.append(sig,siglen);
outp.compress();
m_ctx.sw->send((void *)0,outp,true);
chunkIndex += chunkLen;
}
*/
}
}
}
void Node::ncSendRevocation(void *tPtr, int64_t clock, int64_t ticks, const Address &destination, const RevocationCredential &rev)
{
if (destination == m_ctx.identity.address()) {
SharedPtr<Network> n(m_ctx.networks->get(rev.networkId()));
if (!n)
return;
CallContext cc(clock, ticks, tPtr);
n->addCredential(cc, m_ctx.identity, rev);
}
else {
// TODO
/*
Packet outp(destination,m_ctx.identity.address(),Packet::VERB_NETWORK_CREDENTIALS);
outp.append((uint8_t)0x00);
outp.append((uint16_t)0);
outp.append((uint16_t)0);
outp.append((uint16_t)1);
rev.serialize(outp);
outp.append((uint16_t)0);
m_ctx.sw->send((void *)0,outp,true);
*/
}
}
void Node::ncSendError(void *tPtr, int64_t clock, int64_t ticks, uint64_t nwid, uint64_t requestPacketId, const Address &destination, NetworkController::ErrorCode errorCode)
{
if (destination == m_ctx.identity.address()) {
SharedPtr<Network> n(m_ctx.networks->get(nwid));
if (!n)
return;
switch (errorCode) {
case NetworkController::NC_ERROR_OBJECT_NOT_FOUND:
case NetworkController::NC_ERROR_INTERNAL_SERVER_ERROR: n->setNotFound(); break;
case NetworkController::NC_ERROR_ACCESS_DENIED: n->setAccessDenied(); break;
default: break;
}
}
else if (requestPacketId) {
// TODO
/*
Packet outp(destination,m_ctx.identity.address(),Packet::VERB_ERROR);
outp.append((unsigned char)Packet::VERB_NETWORK_CONFIG_REQUEST);
outp.append(requestPacketId);
switch(errorCode) {
//case NetworkController::NC_ERROR_OBJECT_NOT_FOUND:
//case NetworkController::NC_ERROR_INTERNAL_SERVER_ERROR:
default:
outp.append((unsigned char)Packet::ERROR_OBJ_NOT_FOUND);
break;
case NetworkController::NC_ERROR_ACCESS_DENIED:
outp.append((unsigned char)Packet::ERROR_NETWORK_ACCESS_DENIED_);
break;
}
outp.append(nwid);
m_ctx.sw->send((void *)0,outp,true);
*/
} // else we can't send an ERROR() in response to nothing, so discard
}
} // namespace ZeroTier

View file

@ -1,160 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_NODE_HPP
#define ZT_NODE_HPP
#include "Buf.hpp"
#include "CallContext.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Context.hpp"
#include "InetAddress.hpp"
#include "MAC.hpp"
#include "Mutex.hpp"
#include "Network.hpp"
#include "NetworkController.hpp"
#include "Path.hpp"
#include "Salsa20.hpp"
#include "Store.hpp"
namespace ZeroTier {
/**
* Implementation of Node object as defined in CAPI
*
* The pointer returned by ZT_Node_new() is an instance of this class.
*/
class Node : public NetworkController::Sender {
public:
// Get rid of alignment warnings on 32-bit Windows
#ifdef __WINDOWS__
void *operator new(size_t i) { return _mm_malloc(i, 16); }
void operator delete(void *p) { _mm_free(p); }
#endif
Node(void *uPtr, const struct ZT_Node_Callbacks *callbacks, const CallContext &cc);
virtual ~Node();
void shutdown(const CallContext &cc);
ZT_ResultCode processBackgroundTasks(const CallContext &cc, volatile int64_t *nextBackgroundTaskDeadline);
ZT_ResultCode join(uint64_t nwid, const ZT_Fingerprint *controllerFingerprint, void *uptr, const CallContext &cc);
ZT_ResultCode leave(uint64_t nwid, void **uptr, const CallContext &cc);
ZT_ResultCode multicastSubscribe(const CallContext &cc, uint64_t nwid, uint64_t multicastGroup, unsigned long multicastAdi);
ZT_ResultCode multicastUnsubscribe(const CallContext &cc, uint64_t nwid, uint64_t multicastGroup, unsigned long multicastAdi);
void status(ZT_NodeStatus *status) const;
ZT_PeerList *peers(const CallContext &cc) const;
ZT_VirtualNetworkConfig *networkConfig(uint64_t nwid) const;
ZT_VirtualNetworkList *networks() const;
void setNetworkUserPtr(uint64_t nwid, void *ptr);
void setInterfaceAddresses(const ZT_InterfaceAddress *addrs, unsigned int addrCount);
ZT_CertificateError addCertificate(const CallContext &cc, unsigned int localTrust, const ZT_Certificate *cert, const void *certData, unsigned int certSize);
ZT_ResultCode deleteCertificate(const CallContext &cc, const void *serialNo);
ZT_CertificateList *listCertificates();
int sendUserMessage(const CallContext &cc, uint64_t dest, uint64_t typeId, const void *data, unsigned int len);
void setController(void *networkControllerInstance);
/**
* Post an event via external callback
*
* @param tPtr Thread pointer
* @param ev Event object
* @param md Event data or NULL if none
* @param mdSize Size of event data
*/
ZT_INLINE void postEvent(void *const tPtr, const ZT_Event ev, const void *const md = nullptr, const unsigned int mdSize = 0) noexcept { m_ctx.cb.eventCallback(reinterpret_cast<ZT_Node *>(this), m_ctx.uPtr, tPtr, ev, md, mdSize); }
/**
* Check whether a path should be used for ZeroTier traffic
*
* This performs internal checks and also calls out to an external callback if one is defined.
*
* @param tPtr Thread pointer
* @param id Identity of peer
* @param localSocket Local socket or -1 if unknown
* @param remoteAddress Remote address
* @return True if path should be used
*/
bool filterPotentialPath(void *tPtr, const Identity &id, int64_t localSocket, const InetAddress &remoteAddress);
/**
* Query callback for a physical address for a peer
*
* @param tPtr Thread pointer
* @param id Full identity of ZeroTier node
* @param family Desired address family or -1 for any
* @param addr Buffer to store address (result paramter)
* @return True if addr was filled with something
*/
bool externalPathLookup(void *tPtr, const Identity &id, int family, InetAddress &addr);
ZT_INLINE const Identity &identity() const noexcept { return m_ctx.identity; }
ZT_INLINE const Context &context() const noexcept { return m_ctx; }
// Implementation of NetworkController::Sender interface
virtual void ncSendConfig(void *tPtr, int64_t clock, int64_t ticks, uint64_t nwid, uint64_t requestPacketId, const Address &destination, const NetworkConfig &nc, bool sendLegacyFormatConfig);
virtual void ncSendRevocation(void *tPtr, int64_t clock, int64_t ticks, const Address &destination, const RevocationCredential &rev);
virtual void ncSendError(void *tPtr, int64_t clock, int64_t ticks, uint64_t nwid, uint64_t requestPacketId, const Address &destination, NetworkController::ErrorCode errorCode);
private:
Context m_ctx;
// Data store wrapper
Store m_store;
// Pointer to a struct defined in Node that holds instances of core objects.
void *m_objects;
// This stores networks for rapid iteration, while RR->networks is the primary lookup.
Vector<SharedPtr<Network>> m_allNetworks;
Mutex m_allNetworks_l;
// These are local interface addresses that have been configured via the API
// and can be pushed to other nodes.
Vector<ZT_InterfaceAddress> m_localInterfaceAddresses;
Mutex m_localInterfaceAddresses_m;
// This is locked while running processBackgroundTasks().
Mutex m_backgroundTasksLock;
// These are locked via _backgroundTasksLock as they're only checked and modified in processBackgroundTasks().
int64_t m_lastPeerPulse;
int64_t m_lastHousekeepingRun;
int64_t m_lastNetworkHousekeepingRun;
int64_t m_lastTrustStoreUpdate;
// True if at least one root appears reachable.
std::atomic<bool> m_online;
};
} // namespace ZeroTier
#endif

View file

@ -1,277 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_OS_HPP
#define ZT_OS_HPP
/* Uncomment this to force a whole lot of debug output. */
#define ZT_DEBUG_SPEW
#if !defined(__GNUC__) && (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(__INTEL_COMPILER) || defined(__clang__))
#define __GNUC__ 3
#endif
#if defined(_WIN32) || defined(_WIN64)
#ifndef _WIN32_WINNT
#define _WIN32_WINNT 0x06010000
#endif
#ifdef _MSC_VER
#pragma warning(disable : 4290)
#pragma warning(disable : 4996)
#pragma warning(disable : 4101)
#endif
#ifndef __WINDOWS__
#define __WINDOWS__ 1
#endif
#ifndef NOMINMAX
#define NOMINMAX
#endif
#ifdef __UNIX_LIKE__
#undef __UNIX_LIKE__
#endif
#ifdef __BSD__
#undef __BSD__
#endif
#include <Shlobj.h>
#include <WinSock2.h>
#include <Windows.h>
#include <memoryapi.h>
#include <shlwapi.h>
#include <sys/param.h>
#include <ws2tcpip.h>
#endif /* Microsoft Windows */
#ifndef __WINDOWS__
#include <arpa/inet.h>
#include <netinet/in.h>
#include <sys/socket.h>
#include <sys/types.h>
#endif /* NOT Microsoft Windows */
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#if (defined(__amd64) || defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || defined(__AMD64) || defined(__AMD64__) || defined(_M_X64))
#define ZT_ARCH_X64 1
#include <emmintrin.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <xmmintrin.h>
#endif
#if defined(ZT_ARCH_X64) || defined(i386) || defined(__i386) || defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) || defined(_M_IX86) || defined(__X86__) || defined(_X86_) || defined(__I86__) || defined(__INTEL__) || defined(__386)
#define ZT_ARCH_X86 1
#endif
#if defined(__ARM_NEON) || defined(__ARM_NEON__) || defined(ZT_ARCH_ARM_HAS_NEON)
#if (defined(__APPLE__) && !defined(__LP64__)) || (defined(__ANDROID__) && defined(__arm__))
#ifdef ZT_ARCH_ARM_HAS_NEON
#undef ZT_ARCH_ARM_HAS_NEON
#endif
#else
#ifndef ZT_ARCH_ARM_HAS_NEON
#define ZT_ARCH_ARM_HAS_NEON 1
#endif
#endif
#include <arm_neon.h>
/*#include <arm_acle.h>*/
#endif
#if !defined(ZT_ARCH_X86) && !defined(__aarch64__)
#ifndef ZT_NO_UNALIGNED_ACCESS
#define ZT_NO_UNALIGNED_ACCESS 1
#endif
#endif
#ifdef __APPLE__
#include <TargetConditionals.h>
#include <machine/endian.h>
#ifndef __UNIX_LIKE__
#define __UNIX_LIKE__ 1
#endif
#ifndef __BSD__
#define __BSD__ 1
#endif
#ifndef __BYTE_ORDER
#define __BYTE_ORDER __DARWIN_BYTE_ORDER
#define __BIG_ENDIAN __DARWIN_BIG_ENDIAN
#define __LITTLE_ENDIAN __DARWIN_LITTLE_ENDIAN
#endif
#endif
#if defined(__linux__) || defined(linux) || defined(__LINUX__) || defined(__linux)
#ifndef __LINUX__
#define __LINUX__ 1
#endif
#ifndef __UNIX_LIKE__
#define __UNIX_LIKE__ 1
#endif
#include <endian.h>
#endif
#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
#ifndef __UNIX_LIKE__
#define __UNIX_LIKE__ 1
#endif
#ifndef __BSD__
#define __BSD__ 1
#endif
#include <sys/endian.h>
#ifndef RTF_MULTICAST
#define RTF_MULTICAST 0x20000000
#endif
#endif
#ifdef __WINDOWS__
#define ZT_PATH_SEPARATOR '\\'
#define ZT_PATH_SEPARATOR_S "\\"
#define ZT_EOL_S "\r\n"
#else
#define ZT_PATH_SEPARATOR '/'
#define ZT_PATH_SEPARATOR_S "/"
#define ZT_EOL_S "\n"
#endif
#ifdef SOCKET
#define ZT_SOCKET SOCKET
#else
#define ZT_SOCKET int
#endif
#ifdef INVALID_SOCKET
#define ZT_INVALID_SOCKET INVALID_SOCKET
#else
#define ZT_INVALID_SOCKET (-1)
#endif
#ifdef __cplusplus
#if __cplusplus >= 199711L
#include <atomic>
#ifndef __CPP11__
#define __CPP11__ 1
#endif
#if __cplusplus >= 201703L
#define ZT_MAYBE_UNUSED [[maybe_unused]]
#ifndef __CPP17__
#define __CPP17__ 1
#endif
#else
#define ZT_MAYBE_UNUSED
#endif
#endif
#if defined(ZT_ARCH_X64) || defined(__aarch64__)
#ifndef ZT_ARCH_APPEARS_64BIT
#define ZT_ARCH_APPEARS_64BIT 1
#endif
#endif
#ifdef UINTPTR_MAX
#if UINTPTR_MAX == UINT64_MAX
#ifndef ZT_ARCH_APPEARS_64BIT
#define ZT_ARCH_APPEARS_64BIT 1
#endif
#endif
#endif
#ifndef ZT_INLINE
#ifdef ZT_DEBUG
#define ZT_INLINE
#else
#if defined(__GNUC__) || defined(__clang__)
#define ZT_INLINE inline __attribute__((always_inline))
#else
#define ZT_INLINE inline
#endif
#endif
#endif
/* Right now we fail if no C++11. The core could be ported to old C++ compilers
* if a shim for <atomic> were included. */
#ifndef __CPP11__
#error TODO: to build on pre-c++11 compilers we will need to make a subset of std::atomic for integers
#define nullptr (0)
#define constexpr ZT_INLINE
#define noexcept throw()
#define explicit
#endif
#endif
#ifndef restrict
#if defined(__GNUC__) || defined(__clang__)
#define restrict __restrict__
#else
#define restrict
#endif
#endif
#ifndef likely
#if defined(__GNUC__) || defined(__clang__)
#define likely(x) __builtin_expect((x), 1)
#else
#define likely(x) x
#endif
#endif
#ifndef unlikely
#if defined(__GNUC__) || defined(__clang__)
#define unlikely(x) __builtin_expect((x), 0)
#else
#define unlikely(x) x
#endif
#endif
#if defined(__SIZEOF_INT128__) || ((defined(ZT_ARCH_X64) || defined(__aarch64__)) && defined(__GNUC__))
#ifdef __SIZEOF_INT128__
#define ZT_HAVE_UINT128 1
typedef unsigned __int128 uint128_t;
#else
#define ZT_HAVE_UINT128 1
typedef unsigned uint128_t __attribute__((mode(TI)));
#endif
#endif
#if !defined(__BYTE_ORDER) && defined(__BYTE_ORDER__)
#define __BYTE_ORDER __BYTE_ORDER__
#define __LITTLE_ENDIAN __ORDER_LITTLE_ENDIAN__
#define __BIG_ENDIAN __ORDER_BIG_ENDIAN__
#endif
#if !defined(__BYTE_ORDER) && defined(BYTE_ORDER)
#define __BYTE_ORDER BYTE_ORDER
#define __LITTLE_ENDIAN LITTLE_ENDIAN
#define __BIG_ENDIAN BIG_ENDIAN
#endif
#if !defined(__BYTE_ORDER) && defined(_BYTE_ORDER)
#define __BYTE_ORDER _BYTE_ORDER
#define __LITTLE_ENDIAN _LITTLE_ENDIAN
#define __BIG_ENDIAN _BIG_ENDIAN
#endif
#define ZT_VA_ARGS(...) , ##__VA_ARGS__
#ifdef ZT_DEBUG_SPEW
#define ZT_SPEW(f, ...) fprintf(stderr, "%s:%d(%s): " f ZT_EOL_S, __FILE__, __LINE__, __FUNCTION__ ZT_VA_ARGS(__VA_ARGS__))
#else
#define ZT_SPEW(f, ...)
#endif
#endif

View file

@ -1,127 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "OwnershipCredential.hpp"
namespace ZeroTier {
void OwnershipCredential::addThing(const InetAddress &ip)
{
if (m_thingCount >= ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS)
return;
if (ip.as.sa.sa_family == AF_INET) {
m_thingTypes[m_thingCount] = THING_IPV4_ADDRESS;
Utils::copy<4>(m_thingValues[m_thingCount], &(reinterpret_cast<const struct sockaddr_in *>(&ip)->sin_addr.s_addr));
++m_thingCount;
}
else if (ip.as.sa.sa_family == AF_INET6) {
m_thingTypes[m_thingCount] = THING_IPV6_ADDRESS;
Utils::copy<16>(m_thingValues[m_thingCount], reinterpret_cast<const struct sockaddr_in6 *>(&ip)->sin6_addr.s6_addr);
++m_thingCount;
}
}
void OwnershipCredential::addThing(const MAC &mac)
{
if (m_thingCount >= ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS)
return;
m_thingTypes[m_thingCount] = THING_MAC_ADDRESS;
mac.copyTo(m_thingValues[m_thingCount]);
++m_thingCount;
}
bool OwnershipCredential::sign(const Identity &signer)
{
uint8_t buf[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX + 16];
if (signer.hasPrivate()) {
m_signedBy = signer.address();
m_signatureLength = signer.sign(buf, (unsigned int)marshal(buf, true), m_signature, sizeof(m_signature));
return true;
}
return false;
}
int OwnershipCredential::marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX], bool forSign) const noexcept
{
int p = 0;
if (forSign) {
for (int k = 0; k < 16; ++k)
data[p++] = 0x7f;
}
Utils::storeBigEndian<uint64_t>(data + p, m_networkId);
Utils::storeBigEndian<uint64_t>(data + p + 8, (uint64_t)m_ts);
Utils::storeBigEndian<uint64_t>(data + p + 16, m_flags);
Utils::storeBigEndian<uint32_t>(data + p + 24, m_id);
Utils::storeBigEndian<uint16_t>(data + p + 28, (uint16_t)m_thingCount);
p += 30;
for (unsigned int i = 0, j = m_thingCount; i < j; ++i) {
data[p++] = m_thingTypes[i];
Utils::copy<ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE>(data + p, m_thingValues[i]);
p += ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE;
}
m_issuedTo.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
m_signedBy.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
if (!forSign) {
data[p++] = 1;
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_signatureLength);
p += 2;
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
}
data[p++] = 0;
data[p++] = 0;
if (forSign) {
for (int k = 0; k < 16; ++k)
data[p++] = 0x7f;
}
return p;
}
int OwnershipCredential::unmarshal(const uint8_t *data, int len) noexcept
{
if (len < 30)
return -1;
m_networkId = Utils::loadBigEndian<uint64_t>(data);
m_ts = (int64_t)Utils::loadBigEndian<uint64_t>(data + 8);
m_flags = Utils::loadBigEndian<uint64_t>(data + 16);
m_id = Utils::loadBigEndian<uint32_t>(data + 24);
m_thingCount = Utils::loadBigEndian<uint16_t>(data + 28);
if (m_thingCount > ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS)
return -1;
int p = 30;
for (unsigned int i = 0, j = m_thingCount; i < j; ++i) {
if ((p + 1 + ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE) > len)
return -1;
m_thingTypes[i] = data[p++];
Utils::copy<ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE>(m_thingValues[i], data + p);
p += ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE;
}
if ((p + ZT_ADDRESS_LENGTH + ZT_ADDRESS_LENGTH + 1 + 2) > len)
return -1;
m_issuedTo.setTo(data + p);
p += ZT_ADDRESS_LENGTH;
m_signedBy.setTo(data + p);
p += ZT_ADDRESS_LENGTH + 1;
p += 2 + Utils::loadBigEndian<uint16_t>(data + p);
if (p > len)
return -1;
return p;
}
} // namespace ZeroTier

View file

@ -1,180 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_CERTIFICATEOFOWNERSHIP_HPP
#define ZT_CERTIFICATEOFOWNERSHIP_HPP
#include "Address.hpp"
#include "C25519.hpp"
#include "Constants.hpp"
#include "Credential.hpp"
#include "Identity.hpp"
#include "InetAddress.hpp"
#include "MAC.hpp"
// Max things per CertificateOfOwnership
#define ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS 16
// Maximum size of a thing's value field in bytes
#define ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE 16
#define ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX (8 + 8 + 8 + 4 + 2 + ((1 + ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE) * ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS) + 5 + 5 + 1 + 2 + ZT_SIGNATURE_BUFFER_SIZE + 2)
namespace ZeroTier {
class Context;
/**
* Certificate indicating ownership of a "thing" such as an IP address
*
* These are used in conjunction with the rules engine to make IP addresses and
* other identifiers un-spoofable.
*/
class OwnershipCredential : public Credential {
friend class Credential;
public:
static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_COO; }
enum Thing { THING_NULL = 0, THING_MAC_ADDRESS = 1, THING_IPV4_ADDRESS = 2, THING_IPV6_ADDRESS = 3 };
ZT_INLINE OwnershipCredential() noexcept { memoryZero(this); }
ZT_INLINE
OwnershipCredential(const uint64_t nwid, const int64_t ts, const Address &issuedTo, const uint32_t id) noexcept
{
memoryZero(this);
m_networkId = nwid;
m_ts = ts;
m_id = id;
m_issuedTo = issuedTo;
}
ZT_INLINE uint64_t networkId() const noexcept { return m_networkId; }
ZT_INLINE int64_t timestamp() const noexcept { return m_ts; }
ZT_INLINE int64_t revision() const noexcept { return m_ts; }
ZT_INLINE uint32_t id() const noexcept { return m_id; }
ZT_INLINE const Address &issuedTo() const noexcept { return m_issuedTo; }
ZT_INLINE const Address &signer() const noexcept { return m_signedBy; }
ZT_INLINE const uint8_t *signature() const noexcept { return m_signature; }
ZT_INLINE unsigned int signatureLength() const noexcept { return m_signatureLength; }
ZT_INLINE unsigned int thingCount() const noexcept { return (unsigned int)m_thingCount; }
ZT_INLINE Thing thingType(const unsigned int i) const noexcept { return (Thing)m_thingTypes[i]; }
ZT_INLINE const uint8_t *thingValue(const unsigned int i) const noexcept { return m_thingValues[i]; }
ZT_INLINE bool owns(const InetAddress &ip) const noexcept
{
if (ip.as.sa.sa_family == AF_INET)
return this->_owns(THING_IPV4_ADDRESS, &(reinterpret_cast<const struct sockaddr_in *>(&ip)->sin_addr.s_addr), 4);
else if (ip.as.sa.sa_family == AF_INET6)
return this->_owns(THING_IPV6_ADDRESS, reinterpret_cast<const struct sockaddr_in6 *>(&ip)->sin6_addr.s6_addr, 16);
else
return false;
}
ZT_INLINE bool owns(const MAC &mac) const noexcept
{
uint8_t tmp[6];
mac.copyTo(tmp);
return this->_owns(THING_MAC_ADDRESS, tmp, 6);
}
/**
* Add an IP address to this certificate
*
* @param ip IPv4 or IPv6 address
*/
void addThing(const InetAddress &ip);
/**
* Add an Ethernet MAC address
*
* ZeroTier MAC addresses are always un-spoofable. This could in theory be
* used to make bridged MAC addresses un-spoofable as well, but it's not
* currently implemented.
*
* @param mac 48-bit MAC address
*/
void addThing(const MAC &mac);
/**
* Sign this certificate
*
* @param signer Signing identity, must have private key
* @return True if signature was successful
*/
bool sign(const Identity &signer);
/**
* Verify certificate signature
*
* @return Credential verification result: OK, bad signature, or identity needed
*/
ZT_INLINE Credential::VerifyResult verify(const Context &ctx, const CallContext &cc) const { return s_verify(ctx, cc, *this); }
static constexpr int marshalSizeMax() noexcept { return ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX], bool forSign = false) const noexcept;
int unmarshal(const uint8_t *data, int len) noexcept;
// Provides natural sort order by ID
ZT_INLINE bool operator<(const OwnershipCredential &coo) const noexcept { return (m_id < coo.m_id); }
ZT_INLINE bool operator==(const OwnershipCredential &coo) const noexcept { return (memcmp(this, &coo, sizeof(OwnershipCredential)) == 0); }
ZT_INLINE bool operator!=(const OwnershipCredential &coo) const noexcept { return (memcmp(this, &coo, sizeof(OwnershipCredential)) != 0); }
private:
ZT_INLINE bool _owns(const Thing &t, const void *v, unsigned int l) const noexcept
{
for (unsigned int i = 0, j = m_thingCount; i < j; ++i) {
if (m_thingTypes[i] == (uint8_t)t) {
unsigned int k = 0;
while (k < l) {
if (reinterpret_cast<const uint8_t *>(v)[k] != m_thingValues[i][k])
break;
++k;
}
if (k == l)
return true;
}
}
return false;
}
uint64_t m_networkId;
int64_t m_ts;
uint64_t m_flags;
uint32_t m_id;
uint16_t m_thingCount;
uint8_t m_thingTypes[ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS];
uint8_t m_thingValues[ZT_CERTIFICATEOFOWNERSHIP_MAX_THINGS][ZT_CERTIFICATEOFOWNERSHIP_MAX_THING_VALUE_SIZE];
Address m_issuedTo;
Address m_signedBy;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
};
} // namespace ZeroTier
#endif

View file

@ -1,31 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "Path.hpp"
#include "Context.hpp"
#include "Node.hpp"
namespace ZeroTier {
bool Path::send(const Context &ctx, const CallContext &cc, const void *const data, const unsigned int len) noexcept
{
if (likely(ctx.cb.wirePacketSendFunction(reinterpret_cast<ZT_Node *>(ctx.node), ctx.uPtr, cc.tPtr, m_localSocket, reinterpret_cast<const ZT_InetAddress *>(&m_addr), data, len, 0) == 0)) {
m_lastOut = cc.ticks;
m_outMeter.log(cc.ticks, len);
return true;
}
return false;
}
} // namespace ZeroTier

View file

@ -1,213 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_PATH_HPP
#define ZT_PATH_HPP
#include "CallContext.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "InetAddress.hpp"
#include "Meter.hpp"
#include "Mutex.hpp"
#include "SharedPtr.hpp"
#include "Utils.hpp"
namespace ZeroTier {
class Context;
template <unsigned int MF, unsigned int MFP, unsigned int GCT, unsigned int GCS, typename P> class Defragmenter;
/**
* A path across the physical network
*/
class Path {
friend class SharedPtr<Path>;
// Allow defragmenter to access fragment-in-flight info stored in Path for performance reasons.
template <unsigned int MF, unsigned int MFP, unsigned int GCT, unsigned int GCS, typename P> friend class Defragmenter;
public:
/**
* Map key for paths designed for very fast lookup
*/
class Key {
public:
ZT_INLINE Key() noexcept {}
ZT_INLINE Key(const InetAddress &ip) noexcept
{
const unsigned int family = ip.as.sa.sa_family;
if (family == AF_INET) {
const uint16_t p = (uint16_t)ip.as.sa_in.sin_port;
m_hashCode = Utils::hash64((((uint64_t)ip.as.sa_in.sin_addr.s_addr) << 16U) ^ ((uint64_t)p) ^ Utils::s_mapNonce);
m_ipv6Net64 = 0; // 0 for IPv4, never 0 for IPv6
m_port = p;
}
else {
if (likely(family == AF_INET6)) {
const uint64_t a = Utils::loadMachineEndian<uint64_t>(reinterpret_cast<const uint8_t *>(ip.as.sa_in6.sin6_addr.s6_addr));
const uint64_t b = Utils::loadMachineEndian<uint64_t>(reinterpret_cast<const uint8_t *>(ip.as.sa_in6.sin6_addr.s6_addr) + 8);
const uint16_t p = ip.as.sa_in6.sin6_port;
m_hashCode = Utils::hash64(a ^ b ^ ((uint64_t)p) ^ Utils::s_mapNonce);
m_ipv6Net64 = a; // IPv6 /64
m_port = p;
}
else {
// This is not reachable since InetAddress can only be AF_INET or AF_INET6, but implement something.
m_hashCode = Utils::fnv1a32(&ip, sizeof(InetAddress));
m_ipv6Net64 = 0;
m_port = (uint16_t)family;
}
}
}
ZT_INLINE unsigned long hashCode() const noexcept { return (unsigned long)m_hashCode; }
ZT_INLINE bool operator==(const Key &k) const noexcept { return (m_hashCode == k.m_hashCode) && (m_ipv6Net64 == k.m_ipv6Net64) && (m_port == k.m_port); }
ZT_INLINE bool operator!=(const Key &k) const noexcept { return (!(*this == k)); }
ZT_INLINE bool operator<(const Key &k) const noexcept
{
if (m_hashCode < k.m_hashCode) {
return true;
}
else if (m_hashCode == k.m_hashCode) {
if (m_ipv6Net64 < k.m_ipv6Net64) {
return true;
}
else if (m_ipv6Net64 == k.m_ipv6Net64) {
return (m_port < k.m_port);
}
}
return false;
}
ZT_INLINE bool operator>(const Key &k) const noexcept { return (k < *this); }
ZT_INLINE bool operator<=(const Key &k) const noexcept { return !(k < *this); }
ZT_INLINE bool operator>=(const Key &k) const noexcept { return !(*this < k); }
private:
uint64_t m_hashCode;
uint64_t m_ipv6Net64;
uint16_t m_port;
};
ZT_INLINE Path(const int64_t l, const InetAddress &r) noexcept : m_localSocket(l), m_lastIn(0), m_lastOut(0), m_latency(-1), m_addr(r) {}
/**
* Send a packet via this path (last out time is also updated)
*
* @param data Packet data
* @param len Packet length
* @return True if transport reported success
*/
bool send(const Context &ctx, const CallContext &cc, const void *data, unsigned int len) noexcept;
/**
* Explicitly update last sent time
*
* @param now Time of send
* @param bytes Bytes sent
*/
ZT_INLINE void sent(const CallContext &cc, const unsigned int bytes) noexcept
{
m_lastOut.store(cc.ticks, std::memory_order_relaxed);
m_outMeter.log(cc.ticks, bytes);
}
/**
* Called when a packet is received from this remote path, regardless of content
*
* @param now Time of receive
* @param bytes Bytes received
*/
ZT_INLINE void received(const CallContext &cc, const unsigned int bytes) noexcept
{
m_lastIn.store(cc.ticks, std::memory_order_relaxed);
m_inMeter.log(cc.ticks, bytes);
}
/**
* Update latency with a new measurement
*
* @param newMeasurement New latency measurement in milliseconds
*/
ZT_INLINE void updateLatency(const unsigned int newMeasurement) noexcept
{
const int lat = m_latency.load(std::memory_order_relaxed);
if (likely(lat > 0)) {
m_latency.store((lat + (int)newMeasurement) >> 1U, std::memory_order_relaxed);
}
else {
m_latency.store((int)newMeasurement, std::memory_order_relaxed);
}
}
/**
* @return Latency in milliseconds or -1 if unknown
*/
ZT_INLINE int latency() const noexcept { return m_latency.load(std::memory_order_relaxed); }
/**
* Check path aliveness
*
* @param now Current time
*/
ZT_INLINE bool alive(const CallContext &cc) const noexcept { return ((cc.ticks - m_lastIn.load(std::memory_order_relaxed)) < ZT_PATH_ALIVE_TIMEOUT); }
/**
* @return Physical address
*/
ZT_INLINE const InetAddress &address() const noexcept { return m_addr; }
/**
* @return Local socket as specified by external code
*/
ZT_INLINE int64_t localSocket() const noexcept { return m_localSocket; }
/**
* @return Last time we received anything
*/
ZT_INLINE int64_t lastIn() const noexcept { return m_lastIn.load(std::memory_order_relaxed); }
/**
* @return Last time we sent something
*/
ZT_INLINE int64_t lastOut() const noexcept { return m_lastOut.load(std::memory_order_relaxed); }
private:
const int64_t m_localSocket;
std::atomic<int64_t> m_lastIn;
std::atomic<int64_t> m_lastOut;
std::atomic<int> m_latency;
const InetAddress m_addr;
Meter<> m_inMeter;
Meter<> m_outMeter;
// These fields belong to Defragmenter but are kept in Path for performance
// as it's much faster this way than having Defragmenter maintain another
// mapping from paths to inbound message IDs.
Set<uint64_t> m_inboundFragmentedMessages;
Mutex m_inboundFragmentedMessages_l;
std::atomic<int> __refCount;
};
} // namespace ZeroTier
#endif

View file

@ -1,709 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "Peer.hpp"
#include "Constants.hpp"
#include "Context.hpp"
#include "Endpoint.hpp"
#include "Expect.hpp"
#include "InetAddress.hpp"
#include "Protocol.hpp"
#include "SelfAwareness.hpp"
#include "Topology.hpp"
#include "Trace.hpp"
namespace ZeroTier {
// An arbitrary byte to send in single byte probes, incremented on each probe.
static uint8_t s_arbitraryByte = (uint8_t)Utils::random();
Peer::Peer() : m_key((uintptr_t)&m_identityKey), m_keyRenegotiationNeeded(false), m_lastReceive(0), m_lastSend(0), m_lastSentHello(0), m_lastWhoisRequestReceived(0), m_lastEchoRequestReceived(0), m_lastProbeReceived(0), m_alivePathCount(0), m_bestPath(0), m_vProto(0), m_vMajor(0), m_vMinor(0), m_vRevision(0) {}
Peer::~Peer() { Utils::burn(m_helloMacKey, sizeof(m_helloMacKey)); }
bool Peer::init(const Context &ctx, const CallContext &cc, const Identity &peerIdentity)
{
RWMutex::Lock l(m_lock);
m_id = peerIdentity;
uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
if (unlikely(!ctx.identity.agree(peerIdentity, k)))
return false;
m_identityKey.init(cc.ticks, k);
Utils::burn(k, sizeof(k));
m_deriveSecondaryIdentityKeys();
return true;
}
void Peer::received(const Context &ctx, const CallContext &cc, const SharedPtr<Path> &path, const unsigned int hops, const uint64_t packetId, const unsigned int payloadLength, const Protocol::Verb verb, const Protocol::Verb /*inReVerb*/)
{
m_lastReceive.store(cc.ticks, std::memory_order_relaxed);
m_inMeter.log(cc.ticks, payloadLength);
// NOTE: in the most common scenario we will be talking via the best path.
// This does a check without a full mutex lock and if so there's nothing more
// to do, which speeds things up in that case.
if ((hops == 0) && ((uintptr_t)path.ptr() != m_bestPath.load(std::memory_order_relaxed))) {
RWMutex::RMaybeWLock l(m_lock);
// If this matches an existing path, skip path learning stuff. For the small number
// of paths a peer will have linear scan is the fastest way to do lookup.
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
if (m_paths[i] == path)
return;
}
// If we made it here, we don't already know this path.
if (ctx.node->filterPotentialPath(cc.tPtr, m_id, path->localSocket(), path->address())) {
// SECURITY: note that if we've made it here we expected this OK, see Expect.hpp.
// There is replay protection in effect for OK responses.
if (verb == Protocol::VERB_OK) {
// Acquire write access to the object and thus path set.
l.writing();
unsigned int newPathIdx;
if (m_alivePathCount == ZT_MAX_PEER_NETWORK_PATHS) {
m_prioritizePaths(cc);
if (m_alivePathCount == ZT_MAX_PEER_NETWORK_PATHS) {
newPathIdx = ZT_MAX_PEER_NETWORK_PATHS - 1;
}
else {
newPathIdx = m_alivePathCount++;
}
}
else {
newPathIdx = m_alivePathCount++;
}
// Save a reference to the current path in case we replace it. This
// should technically never happen, but this ensures safety if it does.
const SharedPtr<Path> currentBest(reinterpret_cast<Path *>(m_bestPath.load(std::memory_order_acquire)));
SharedPtr<Path> old;
old.move(m_paths[newPathIdx]);
m_paths[newPathIdx] = path;
m_prioritizePaths(cc);
ctx.t->learnedNewPath(cc, 0x582fabdd, packetId, m_id, path->address(), (old) ? old->address() : InetAddress());
}
else {
int64_t &lt = m_lastTried[Endpoint(path->address())];
if ((cc.ticks - lt) < ZT_PATH_MIN_TRY_INTERVAL) {
lt = cc.ticks;
path->sent(cc, m_hello(ctx, cc, path->localSocket(), path->address(), false));
ctx.t->tryingNewPath(cc, 0xb7747ddd, m_id, path->address(), path->address(), packetId, (uint8_t)verb, m_id);
}
}
}
}
}
void Peer::send(const Context &ctx, const CallContext &cc, const void *data, unsigned int len) noexcept
{
SharedPtr<Path> via(reinterpret_cast<Path *>(m_bestPath.load(std::memory_order_acquire)));
if (likely(via)) {
if (likely(via->send(ctx, cc, data, len)))
this->sent(cc, len);
}
else {
const SharedPtr<Peer> root(ctx.topology->root());
if (likely((root) && (root.ptr() != this))) {
via = root->path(cc);
if (likely(via)) {
if (likely(via->send(ctx, cc, data, len))) {
root->relayed(cc, len);
this->sent(cc, len);
}
}
}
}
}
void Peer::pulse(const Context &ctx, const CallContext &cc)
{
RWMutex::Lock l(m_lock);
// Grab current key (this is never NULL).
SymmetricKey *const key = reinterpret_cast<SymmetricKey *>(m_key.load(std::memory_order_relaxed));
// Determine if we need a new ephemeral key pair and if a new HELLO needs
// to be sent. The latter happens every ZT_PEER_HELLO_INTERVAL or if a new
// ephemeral key pair is generated.
bool needHello = (((m_vProto >= 20) && (m_keyRenegotiationNeeded || (key == &m_identityKey) || ((cc.ticks - key->timestamp()) >= (ZT_SYMMETRIC_KEY_TTL / 2)) || (key->odometer() > (ZT_SYMMETRIC_KEY_TTL_MESSAGES / 2)))) || ((cc.ticks - m_lastSentHello) >= ZT_PEER_HELLO_INTERVAL));
// Prioritize paths and more importantly for here forget dead ones.
m_prioritizePaths(cc);
if (m_tryQueue.empty()) {
if (m_alivePathCount == 0) {
// If there are no living paths and nothing in the try queue, try addresses
// from any locator we have on file or that are fetched via the external API
// callback (if one was supplied).
if (m_locator) {
for (Vector<std::pair<Endpoint, SharedPtr<const Locator::EndpointAttributes>>>::const_iterator ep(m_locator->endpoints().begin()); ep != m_locator->endpoints().end(); ++ep) {
if (ep->first.type == ZT_ENDPOINT_TYPE_IP_UDP) {
if (ctx.node->filterPotentialPath(cc.tPtr, m_id, -1, ep->first.ip())) {
int64_t &lt = m_lastTried[ep->first];
if ((cc.ticks - lt) > ZT_PATH_MIN_TRY_INTERVAL) {
lt = cc.ticks;
ctx.t->tryingNewPath(cc, 0x84b22322, m_id, ep->first.ip(), InetAddress::NIL, 0, 0, Identity::NIL);
sent(cc, m_sendProbe(ctx, cc, -1, ep->first.ip(), nullptr, 0));
}
}
}
}
}
InetAddress addr;
if (ctx.node->externalPathLookup(cc.tPtr, m_id, -1, addr)) {
if ((addr) && ctx.node->filterPotentialPath(cc.tPtr, m_id, -1, addr)) {
int64_t &lt = m_lastTried[Endpoint(addr)];
if ((cc.ticks - lt) > ZT_PATH_MIN_TRY_INTERVAL) {
lt = cc.ticks;
ctx.t->tryingNewPath(cc, 0x84a10000, m_id, addr, InetAddress::NIL, 0, 0, Identity::NIL);
sent(cc, m_sendProbe(ctx, cc, -1, addr, nullptr, 0));
}
}
}
}
}
else {
unsigned int attempts = 0;
for (;;) {
p_TryQueueItem &qi = m_tryQueue.front();
if (qi.target.isInetAddr()) {
// Skip entry if it overlaps with any currently active IP.
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
if (m_paths[i]->address().ipsEqual(qi.target.ip()))
goto discard_queue_item;
}
}
if (qi.target.type == ZT_ENDPOINT_TYPE_IP_UDP) {
// TODO: need to send something like a NOP for older target nodes.
++attempts;
if (qi.iteration < 0) {
// If iteration is less than zero, try to contact the original address.
// It may be set to a larger negative value to try multiple times such
// as e.g. -3 to try 3 times.
sent(cc, m_sendProbe(ctx, cc, -1, qi.target.ip(), nullptr, 0));
++qi.iteration;
goto requeue_item;
}
else if (qi.target.ip().isV4() && (m_alivePathCount == 0)) {
// When iteration reaches zero the queue item is dropped unless it's
// IPv4 and we have no direct paths. In that case some heavier NAT-t
// strategies are attempted.
if (qi.target.ip().port() < 1024) {
// If the source port is privileged, we actually scan every possible
// privileged port in random order slowly over multiple iterations
// of pulse(). This is done in batches of ZT_NAT_T_PORT_SCAN_MAX.
uint16_t ports[ZT_NAT_T_PORT_SCAN_MAX];
unsigned int pn = 0;
while ((pn < ZT_NAT_T_PORT_SCAN_MAX) && (qi.iteration < 1023)) {
const uint16_t p = ctx.randomPrivilegedPortOrder[qi.iteration++];
if ((unsigned int)p != qi.target.ip().port())
ports[pn++] = p;
}
if (pn > 0)
sent(cc, m_sendProbe(ctx, cc, -1, qi.target.ip(), ports, pn));
if (qi.iteration < 1023)
goto requeue_item;
}
else {
// For un-privileged ports we'll try ZT_NAT_T_PORT_SCAN_MAX ports
// beyond the one we were sent to catch some sequentially assigning
// symmetric NATs.
InetAddress tmp(qi.target.ip());
unsigned int p = tmp.port() + 1 + (unsigned int)qi.iteration++;
if (p > 65535)
p -= 64512; // wrap back to 1024
tmp.setPort(p);
sent(cc, m_sendProbe(ctx, cc, -1, tmp, nullptr, 0));
if (qi.iteration < ZT_NAT_T_PORT_SCAN_MAX)
goto requeue_item;
}
}
}
// Discard front item unless the code skips to requeue_item.
discard_queue_item:
m_tryQueue.pop_front();
if (attempts >= std::min((unsigned int)m_tryQueue.size(), (unsigned int)ZT_NAT_T_PORT_SCAN_MAX))
break;
else
continue;
// If the code skips here the front item is instead moved to the back.
requeue_item:
if (m_tryQueue.size() > 1) // no point in doing this splice if there's only one item
m_tryQueue.splice(m_tryQueue.end(), m_tryQueue, m_tryQueue.begin());
if (attempts >= std::min((unsigned int)m_tryQueue.size(), (unsigned int)ZT_NAT_T_PORT_SCAN_MAX))
break;
else
continue;
}
}
// Do keepalive on all currently active paths, sending HELLO to the first
// if needHello is true and sending small keepalives to others.
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
if (needHello) {
needHello = false;
const unsigned int bytes = m_hello(ctx, cc, m_paths[i]->localSocket(), m_paths[i]->address(), m_keyRenegotiationNeeded);
if (bytes) {
m_paths[i]->sent(cc, bytes);
sent(cc, bytes);
m_lastSentHello = cc.ticks;
m_keyRenegotiationNeeded = false;
}
}
else if ((cc.ticks - m_paths[i]->lastOut()) >= ZT_PATH_KEEPALIVE_PERIOD) {
m_paths[i]->send(ctx, cc, &s_arbitraryByte, 1);
++s_arbitraryByte;
sent(cc, 1);
}
}
// Send a HELLO indirectly if we were not able to send one via any direct path.
if (needHello) {
const SharedPtr<Peer> root(ctx.topology->root());
if (root) {
const SharedPtr<Path> via(root->path(cc));
if (via) {
const unsigned int bytes = m_hello(ctx, cc, via->localSocket(), via->address(), m_keyRenegotiationNeeded);
if (bytes) {
via->sent(cc, bytes);
root->relayed(cc, bytes);
sent(cc, bytes);
m_lastSentHello = cc.ticks;
m_keyRenegotiationNeeded = false;
}
}
}
}
// Clean m_lastTried
for (Map<Endpoint, int64_t>::iterator i(m_lastTried.begin()); i != m_lastTried.end();) {
if ((cc.ticks - i->second) > (ZT_PATH_MIN_TRY_INTERVAL * 3))
m_lastTried.erase(i++);
else
++i;
}
}
void Peer::contact(const Context &ctx, const CallContext &cc, const Endpoint &ep, int tries)
{
RWMutex::Lock l(m_lock);
// See if there's already a path to this endpoint and if so ignore it.
if (ep.isInetAddr()) {
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
if (m_paths[i]->address().ipsEqual(ep.ip()))
return;
}
}
// Check underlying path attempt rate limit.
int64_t &lt = m_lastTried[ep];
if ((cc.ticks - lt) < ZT_PATH_MIN_TRY_INTERVAL)
return;
lt = cc.ticks;
// For IPv4 addresses we send a tiny packet with a low TTL, which helps to
// traverse some NAT types. It has no effect otherwise.
if (ep.isInetAddr() && ep.ip().isV4()) {
ctx.cb.wirePacketSendFunction(reinterpret_cast<ZT_Node *>(ctx.node), ctx.uPtr, cc.tPtr, -1, reinterpret_cast<const ZT_InetAddress *>(&ep.ip()), &s_arbitraryByte, 1, 2);
++s_arbitraryByte;
}
// Make sure address is not already in the try queue. If so just update it.
for (List<p_TryQueueItem>::iterator i(m_tryQueue.begin()); i != m_tryQueue.end(); ++i) {
if (i->target.isSameAddress(ep)) {
i->target = ep;
i->iteration = -tries;
return;
}
}
m_tryQueue.push_back(p_TryQueueItem(ep, -tries));
}
void Peer::resetWithinScope(const Context &ctx, const CallContext &cc, InetAddress::IpScope scope, int inetAddressFamily)
{
RWMutex::Lock l(m_lock);
unsigned int pc = 0;
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
if ((m_paths[i]) && (((int)m_paths[i]->address().as.sa.sa_family == inetAddressFamily) && (m_paths[i]->address().ipScope() == scope))) {
const unsigned int bytes = m_sendProbe(ctx, cc, m_paths[i]->localSocket(), m_paths[i]->address(), nullptr, 0);
m_paths[i]->sent(cc, bytes);
sent(cc, bytes);
}
else if (pc != i) {
m_paths[pc++] = m_paths[i];
}
}
m_alivePathCount = pc;
while (pc < ZT_MAX_PEER_NETWORK_PATHS)
m_paths[pc++].zero();
}
void Peer::save(const Context &ctx, const CallContext &cc) const
{
uint8_t buf[8 + ZT_PEER_MARSHAL_SIZE_MAX];
// Prefix each saved peer with the current timestamp.
Utils::storeBigEndian<uint64_t>(buf, (uint64_t)cc.clock);
const int len = marshal(ctx, buf + 8);
if (len > 0) {
uint64_t id[2];
id[0] = m_id.address().toInt();
id[1] = 0;
ctx.store->put(cc, ZT_STATE_OBJECT_PEER, id, 1, buf, (unsigned int)len + 8);
}
}
int Peer::marshal(const Context &ctx, uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
{
RWMutex::RLock l(m_lock);
data[0] = 16; // serialized peer version
// Include our identity's address to detect if this changes and require
// recomputation of m_identityKey.
ctx.identity.address().copyTo(data + 1);
// SECURITY: encryption in place is only to protect secrets if they are
// cached to local storage. It's not used over the wire. Dumb ECB is fine
// because secret keys are random and have no structure to reveal.
ctx.localSecretCipher.encrypt(m_identityKey.key(), data + 1 + ZT_ADDRESS_LENGTH);
ctx.localSecretCipher.encrypt(m_identityKey.key() + 16, data + 1 + ZT_ADDRESS_LENGTH + 16);
ctx.localSecretCipher.encrypt(m_identityKey.key() + 32, data + 1 + ZT_ADDRESS_LENGTH + 32);
int p = 1 + ZT_ADDRESS_LENGTH + 48;
int s = m_id.marshal(data + p, false);
if (s < 0)
return -1;
p += s;
if (m_locator) {
data[p++] = 1;
s = m_locator->marshal(data + p);
if (s <= 0)
return s;
p += s;
}
else {
data[p++] = 0;
}
Utils::storeBigEndian(data + p, (uint16_t)m_vProto);
p += 2;
Utils::storeBigEndian(data + p, (uint16_t)m_vMajor);
p += 2;
Utils::storeBigEndian(data + p, (uint16_t)m_vMinor);
p += 2;
Utils::storeBigEndian(data + p, (uint16_t)m_vRevision);
p += 2;
data[p++] = 0;
data[p++] = 0;
return p;
}
int Peer::unmarshal(const Context &ctx, const int64_t ticks, const uint8_t *restrict data, const int len) noexcept
{
RWMutex::Lock l(m_lock);
if ((len <= (1 + ZT_ADDRESS_LENGTH + 48)) || (data[0] != 16))
return -1;
for (unsigned int i = 0; i < ZT_PEER_EPHEMERAL_KEY_BUFFER_SIZE; ++i) {
m_ephemeralKeysSent[i].creationTime = -1;
m_ephemeralSessions[i].established = false;
}
m_key.store((uintptr_t)&m_identityKey, std::memory_order_relaxed);
bool identityKeyRestored = false;
if (Address(data + 1) == ctx.identity.address()) {
uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
static_assert(ZT_SYMMETRIC_KEY_SIZE == 48, "marshal() and unmarshal() must be revisited if ZT_SYMMETRIC_KEY_SIZE is changed");
ctx.localSecretCipher.decrypt(data + 1 + ZT_ADDRESS_LENGTH, k);
ctx.localSecretCipher.decrypt(data + 1 + ZT_ADDRESS_LENGTH + 16, k + 16);
ctx.localSecretCipher.decrypt(data + 1 + ZT_ADDRESS_LENGTH + 32, k + 32);
m_identityKey.init(ticks, k);
Utils::burn(k, sizeof(k));
identityKeyRestored = true;
}
int p = 1 + ZT_ADDRESS_LENGTH + 48;
int s = m_id.unmarshal(data + p, len - p);
if (s < 0)
return s;
p += s;
if (!identityKeyRestored) {
uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
if (!ctx.identity.agree(m_id, k))
return -1;
m_identityKey.init(ticks, k);
Utils::burn(k, sizeof(k));
}
if (p >= len)
return -1;
if (data[p] == 0) {
++p;
m_locator.zero();
}
else if (data[p] == 1) {
++p;
Locator *const loc = new Locator();
s = loc->unmarshal(data + p, len - p);
m_locator.set(loc);
if (s < 0)
return s;
p += s;
}
else {
return -1;
}
if ((p + 10) > len)
return -1;
m_vProto = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
m_vMajor = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
m_vMinor = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
m_vRevision = Utils::loadBigEndian<uint16_t>(data + p);
p += 2;
p += 2 + (int)Utils::loadBigEndian<uint16_t>(data + p);
m_deriveSecondaryIdentityKeys();
return (p > len) ? -1 : p;
}
struct _PathPriorityComparisonOperator {
ZT_INLINE bool operator()(const SharedPtr<Path> &a, const SharedPtr<Path> &b) const noexcept
{
if (a) {
if (b)
return (a->lastIn() > b->lastIn());
else
return true;
}
else {
return false;
}
}
};
void Peer::m_prioritizePaths(const CallContext &cc)
{
// assumes m_lock is locked
// Need to hold the current best just in case we drop it before changing the atomic.
const SharedPtr<Path> oldBest(reinterpret_cast<Path *>(m_bestPath.load(std::memory_order_acquire)));
// Clean and reprioritize paths.
if (m_alivePathCount != 0) {
unsigned int newCnt = 0;
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
if ((m_paths[i]) && (m_paths[i]->alive(cc))) {
if (i != newCnt)
m_paths[newCnt].move(m_paths[i]);
++newCnt;
}
}
for (unsigned int i = newCnt; i < m_alivePathCount; ++i)
m_paths[i].zero();
m_alivePathCount = newCnt;
std::sort(m_paths, m_paths + newCnt, _PathPriorityComparisonOperator());
}
// Update atomic holding pointer to best path.
m_bestPath.store((m_alivePathCount != 0) ? (uintptr_t)m_paths[0].ptr() : (uintptr_t)0, std::memory_order_release);
}
unsigned int Peer::m_sendProbe(const Context &ctx, const CallContext &cc, int64_t localSocket, const InetAddress &atAddress, const uint16_t *ports, const unsigned int numPorts)
{
// Assumes m_lock is locked
// SECURITY: we use the long-lived identity key here since this is used for
// trial contacts, etc. It contains no meaningful payload so who cares if
// some future attacker compromises it.
uint8_t p[ZT_PROTO_MIN_PACKET_LENGTH];
Utils::storeMachineEndian<uint64_t>(p + ZT_PROTO_PACKET_ID_INDEX, m_identityKey.nextMessage(ctx.identity.address(), m_id.address()));
m_id.address().copyTo(p + ZT_PROTO_PACKET_DESTINATION_INDEX);
ctx.identity.address().copyTo(p + ZT_PROTO_PACKET_SOURCE_INDEX);
p[ZT_PROTO_PACKET_FLAGS_INDEX] = 0;
p[ZT_PROTO_PACKET_VERB_INDEX] = Protocol::VERB_NOP;
ctx.expect->sending(Protocol::armor(p, ZT_PROTO_MIN_PACKET_LENGTH, m_identityKey, cipher()), cc.ticks);
if (numPorts > 0) {
InetAddress tmp(atAddress);
for (unsigned int i = 0; i < numPorts; ++i) {
tmp.setPort(ports[i]);
ctx.cb.wirePacketSendFunction(reinterpret_cast<ZT_Node *>(ctx.node), ctx.uPtr, cc.tPtr, -1, reinterpret_cast<const ZT_InetAddress *>(&tmp), p, ZT_PROTO_MIN_PACKET_LENGTH, 0);
}
return ZT_PROTO_MIN_PACKET_LENGTH * numPorts;
}
else {
ctx.cb.wirePacketSendFunction(reinterpret_cast<ZT_Node *>(ctx.node), ctx.uPtr, cc.tPtr, -1, reinterpret_cast<const ZT_InetAddress *>(&atAddress), p, ZT_PROTO_MIN_PACKET_LENGTH, 0);
return ZT_PROTO_MIN_PACKET_LENGTH;
}
}
void Peer::m_deriveSecondaryIdentityKeys() noexcept
{
// This is called in init() and unmarshal() to use KBKDF to derive keys
// for encrypting the dictionary portion of HELLOs and HELLO HMAC from the
// primary long-lived identity key.
uint8_t hk[ZT_SYMMETRIC_KEY_SIZE];
KBKDFHMACSHA384(m_identityKey.key(), ZT_KBKDF_LABEL_HELLO_DICTIONARY_ENCRYPT, 0, 0, hk);
m_helloCipher.init(hk);
Utils::burn(hk, sizeof(hk));
KBKDFHMACSHA384(m_identityKey.key(), ZT_KBKDF_LABEL_PACKET_HMAC, 0, 0, m_helloMacKey);
}
unsigned int Peer::m_hello(const Context &ctx, const CallContext &cc, int64_t localSocket, const InetAddress &atAddress, const bool forceNewKey)
{
// assumes m_lock is at least locked for reading
/* SECURITY: note that HELLO is sent mostly in the clear and always uses
* the long-lived identity key. This allows us to always bootstrap regardless
* of ephemeral key state. HELLO contains nothing particularly sensitive,
* though part of the message is encrypted with another derived key just to
* conceal things like ephemeral public keys for defense in depth. HELLO is
* always sent with the old salsa/poly algorithm (but minus salsa of course
* as it's plaintext), but terminates with an additional HMAC-SHA3
* authenticator to add extra hardness to the key exchange. The use of HMAC
* here is also needed to satisfy some FIPS/NIST type requirements. */
// Pick or generate an ephemeral key to send with this HELLO.
p_EphemeralPrivate *ephemeral;
{
p_EphemeralPrivate *earliest = m_ephemeralKeysSent;
p_EphemeralPrivate *latest = nullptr;
int64_t earliestEphemeralPrivate = 9223372036854775807LL;
int64_t latestEphemeralPrivate = 0;
for (unsigned int k = 0; k < ZT_PEER_EPHEMERAL_KEY_BUFFER_SIZE; ++k) {
const int64_t ct = m_ephemeralKeysSent[k].creationTime;
if (ct <= earliestEphemeralPrivate) {
earliestEphemeralPrivate = ct;
earliest = m_ephemeralKeysSent + k;
}
else if (ct >= latestEphemeralPrivate) { // creationTime will be -1 if not initialized
latestEphemeralPrivate = ct;
latest = m_ephemeralKeysSent + k;
}
}
if ((latest != nullptr) && (!forceNewKey) && ((cc.ticks - latest->creationTime) < (ZT_SYMMETRIC_KEY_TTL / 2))) {
ephemeral = latest;
}
else {
earliest->creationTime = cc.ticks;
earliest->pub.type = ZT_PROTO_EPHEMERAL_KEY_TYPE_C25519_P384;
C25519::generateC25519(earliest->pub.c25519Public, earliest->c25519Private);
ECC384GenerateKey(earliest->pub.p384Public, earliest->p384Private);
SHA384(earliest->sha384OfPublic, &earliest->pub, sizeof(earliest->pub));
ephemeral = earliest;
}
}
// Initialize packet and add basic fields like identity and sent-to address.
Buf outp;
const uint64_t packetId = m_identityKey.nextMessage(ctx.identity.address(), m_id.address());
int ii = Protocol::newPacket(outp, packetId, m_id.address(), ctx.identity.address(), Protocol::VERB_HELLO);
outp.wI8(ii, ZT_PROTO_VERSION);
outp.wI8(ii, ZEROTIER_VERSION_MAJOR);
outp.wI8(ii, ZEROTIER_VERSION_MINOR);
outp.wI16(ii, ZEROTIER_VERSION_REVISION);
outp.wI64(ii, (uint64_t)cc.clock);
outp.wO(ii, ctx.identity);
outp.wO(ii, atAddress);
// Add 12 random bytes to act as an IV for the encrypted dictionary field.
const int ivStart = ii;
outp.wR(ii, 12);
// LEGACY: the six reserved bytes after the IV exist for legacy compatibility with v1.x nodes.
// Once those are dead they'll become just reserved bytes for future use as flags etc.
outp.wI32(ii, 0); // reserved bytes
void *const legacyMoonCountStart = outp.unsafeData + ii;
outp.wI16(ii, 0);
const uint64_t legacySalsaIv = packetId & ZT_CONST_TO_BE_UINT64(0xfffffffffffffff8ULL);
Salsa20(m_identityKey.key(), &legacySalsaIv).crypt12(legacyMoonCountStart, legacyMoonCountStart, 2);
// Append dictionary containinig meta-data and ephemeral key info.
const int cryptSectionStart = ii;
FCV<uint8_t, 2048> md;
Dictionary::append(md, ZT_PROTO_HELLO_NODE_META_INSTANCE_ID, ctx.instanceId);
// TODO: add other fields and ephemeral key info
outp.wI16(ii, (uint16_t)md.size());
outp.wB(ii, md.data(), (unsigned int)md.size());
if (unlikely((ii + ZT_HMACSHA384_LEN) > ZT_BUF_SIZE)) // sanity check, should be impossible
return 0;
// Encrypt the meta-data dictionary using a derived static key and the IV
// we generated above. This isn't strictly necessary as the data in there is
// not "secret," but it's not a bad idea to hide it for defense in depth. In
// particular this means that the public keys exchanged for ephemeral keying
// are concealed from any observer.
AES::CTR ctr(m_helloCipher);
void *const cryptSection = outp.unsafeData + ii;
ctr.init(outp.unsafeData + ivStart, 0, cryptSection);
ctr.crypt(cryptSection, ii - cryptSectionStart);
ctr.finish();
// Add HMAC at the end for strong verification by v2 nodes.
HMACSHA384(m_helloMacKey, outp.unsafeData, ii, outp.unsafeData + ii);
ii += ZT_HMACSHA384_LEN;
// Add poly1305 MAC for v1 nodes.
uint8_t polyKey[ZT_POLY1305_KEY_SIZE], perPacketKey[ZT_SALSA20_KEY_SIZE];
Protocol::salsa2012DeriveKey(m_identityKey.key(), perPacketKey, outp, ii);
Salsa20(perPacketKey, &packetId).crypt12(Utils::ZERO256, polyKey, sizeof(polyKey));
Poly1305 p1305(polyKey);
p1305.update(outp.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START, ii - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START);
uint64_t polyMac[2];
p1305.finish(polyMac);
Utils::storeMachineEndian<uint64_t>(outp.unsafeData + ZT_PROTO_PACKET_MAC_INDEX, polyMac[0]);
return (likely(ctx.cb.wirePacketSendFunction(reinterpret_cast<ZT_Node *>(ctx.node), ctx.uPtr, cc.tPtr, localSocket, reinterpret_cast<const ZT_InetAddress *>(&atAddress), outp.unsafeData, ii, 0) == 0)) ? (unsigned int)ii : 0U;
}
} // namespace ZeroTier

View file

@ -1,535 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_PEER_HPP
#define ZT_PEER_HPP
#include "AES.hpp"
#include "Address.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Context.hpp"
#include "Endpoint.hpp"
#include "Identity.hpp"
#include "InetAddress.hpp"
#include "Locator.hpp"
#include "Mutex.hpp"
#include "Node.hpp"
#include "Path.hpp"
#include "Protocol.hpp"
#include "SharedPtr.hpp"
#include "SymmetricKey.hpp"
#include "Utils.hpp"
#define ZT_PEER_MARSHAL_SIZE_MAX (1 + ZT_ADDRESS_LENGTH + ZT_SYMMETRIC_KEY_SIZE + ZT_IDENTITY_MARSHAL_SIZE_MAX + 1 + ZT_LOCATOR_MARSHAL_SIZE_MAX + (2 * 4) + 2)
#define ZT_PEER_DEDUP_BUFFER_SIZE 1024
#define ZT_PEER_DEDUP_BUFFER_MASK 1023U
#define ZT_PEER_EPHEMERAL_KEY_BUFFER_SIZE 3
#define ZT_PEER_EPHEMERAL_KEY_COUNT_MAX (ZT_PEER_EPHEMERAL_KEY_BUFFER_SIZE + 1)
namespace ZeroTier {
class Topology;
/**
* Peer on P2P Network (virtual layer 1)
*/
class Peer {
friend class SharedPtr<Peer>;
friend class Topology;
public:
/**
* Create an uninitialized peer
*
* New peers must be initialized via either init() or unmarshal() prior to
* use or null pointer dereference may occur.
*/
Peer();
~Peer();
/**
* Initialize peer with an identity
*
* @param peerIdentity The peer's identity
* @return True if initialization was succcesful
*/
bool init(const Context &ctx, const CallContext &cc, const Identity &peerIdentity);
/**
* @return This peer's ZT address (short for identity().address())
*/
ZT_INLINE Address address() const noexcept { return m_id.address(); }
/**
* @return This peer's identity
*/
ZT_INLINE const Identity &identity() const noexcept { return m_id; }
/**
* @return Current locator or NULL if no locator is known
*/
ZT_INLINE const SharedPtr<const Locator> locator() const noexcept
{
RWMutex::RLock l(m_lock);
return m_locator;
}
/**
* Set or update peer locator
*
* This checks the locator's timestamp against the current locator and
* replace it if newer.
*
* @param loc Locator update
* @param verify If true, verify locator's signature and structure
* @return New locator or previous if it was not replaced.
*/
ZT_INLINE SharedPtr<const Locator> setLocator(const SharedPtr<const Locator> &loc, const bool verify) noexcept
{
RWMutex::Lock l(m_lock);
if ((loc) && ((!m_locator) || (m_locator->revision() < loc->revision()))) {
if ((!verify) || loc->verify(m_id))
m_locator = loc;
}
return m_locator;
}
/**
* Log receipt of an authenticated packet
*
* This is called by the decode pipe when a packet is proven to be authentic
* and appears to be valid.
*
* @param path Path over which packet was received
* @param hops ZeroTier (not IP) hops
* @param packetId Packet ID
* @param verb Packet verb
* @param inReVerb In-reply verb for OK or ERROR verbs
*/
void received(const Context &ctx, const CallContext &cc, const SharedPtr<Path> &path, unsigned int hops, uint64_t packetId, unsigned int payloadLength, Protocol::Verb verb, Protocol::Verb inReVerb);
/**
* Log sent data
*
* @param bytes Number of bytes written
*/
ZT_INLINE void sent(const CallContext &cc, const unsigned int bytes) noexcept
{
m_lastSend.store(cc.ticks, std::memory_order_relaxed);
m_outMeter.log(cc.ticks, bytes);
}
/**
* Called when traffic destined for a different peer is sent to this one
*
* @param bytes Number of bytes relayed
*/
ZT_INLINE void relayed(const CallContext &cc, const unsigned int bytes) noexcept { m_relayedMeter.log(cc.ticks, bytes); }
/**
* Get the current best direct path or NULL if none
*
* @return Current best path or NULL if there is no direct path
*/
ZT_INLINE SharedPtr<Path> path(const CallContext &cc) noexcept { return SharedPtr<Path>(reinterpret_cast<Path *>(m_bestPath.load(std::memory_order_acquire))); }
/**
* Send data to this peer over a specific path only
*
* @param data Data to send
* @param len Length in bytes
* @param via Path over which to send data (may or may not be an already-learned path for this peer)
*/
ZT_INLINE void send(const Context &ctx, const CallContext &cc, const void *data, unsigned int len, const SharedPtr<Path> &via) noexcept
{
via->send(ctx, cc, data, len);
sent(cc, len);
}
/**
* Send data to this peer over the best available path
*
* If there is a working direct path it will be used. Otherwise the data will be
* sent via a root server.
*
* @param data Data to send
* @param len Length in bytes
*/
void send(const Context &ctx, const CallContext &cc, const void *data, unsigned int len) noexcept;
/**
* Do ping, probes, re-keying, and keepalive with this peer, as needed.
*/
void pulse(const Context &ctx, const CallContext &cc);
/**
* Attempt to contact this peer at a given endpoint.
*
* The attempt doesn't happen immediately. It's added to a queue for the
* next invocation of pulse().
*
* @param ep Endpoint to attempt to contact
* @param tries Number of times to try (default: 1)
*/
void contact(const Context &ctx, const CallContext &cc, const Endpoint &ep, int tries = 1);
/**
* Reset paths within a given IP scope and address family
*
* Resetting a path involves sending an ECHO to it and then deactivating
* it until or unless it responds. This is done when we detect a change
* to our external IP or another system change that might invalidate
* many or all current paths.
*
* @param scope IP scope
* @param inetAddressFamily Family e.g. AF_INET
*/
void resetWithinScope(const Context &ctx, const CallContext &cc, InetAddress::IpScope scope, int inetAddressFamily);
/**
* @return Time of last receive of anything, whether direct or relayed
*/
ZT_INLINE int64_t lastReceive() const noexcept { return m_lastReceive.load(std::memory_order_relaxed); }
/**
* @return Average latency of all direct paths or -1 if no direct paths or unknown
*/
ZT_INLINE int latency() const noexcept
{
RWMutex::RLock l(m_lock);
int ltot = 0;
int lcnt = 0;
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
int lat = m_paths[i]->latency();
if (lat > 0) {
ltot += lat;
++lcnt;
}
}
return (ltot > 0) ? (lcnt / ltot) : -1;
}
/**
* @return Cipher suite that should be used to communicate with this peer
*/
ZT_INLINE uint8_t cipher() const noexcept
{
// if (m_vProto >= 11)
// return ZT_PROTO_CIPHER_SUITE__AES_GMAC_SIV;
return ZT_PROTO_CIPHER_POLY1305_SALSA2012;
}
/**
* @return The permanent shared key for this peer computed by simple identity agreement
*/
ZT_INLINE SymmetricKey &identityKey() noexcept { return m_identityKey; }
/**
* @return AES instance for HELLO dictionary / encrypted section encryption/decryption
*/
ZT_INLINE const AES &identityHelloDictionaryEncryptionCipher() const noexcept { return m_helloCipher; }
/**
* @return Key for HMAC on HELLOs
*/
ZT_INLINE const uint8_t *identityHelloHmacKey() const noexcept { return m_helloMacKey; }
/**
* @return Raw identity key bytes
*/
ZT_INLINE const uint8_t *rawIdentityKey() const noexcept { return m_identityKey.key(); }
/**
* @return Current best key: either the latest ephemeral or the identity key
*/
ZT_INLINE SymmetricKey &key() noexcept { return *reinterpret_cast<SymmetricKey *>(m_key.load(std::memory_order_relaxed)); }
/**
* Get keys other than a key we have already tried.
*
* This is used when a packet arrives that doesn't decrypt with the preferred
* key. It fills notYetTried[] with other keys that haven't been tried yet,
* which can include the identity key and any older session keys.
*
* @param alreadyTried Key we've already tried or NULL if none
* @param notYetTried All keys known (long lived or session) other than alreadyTried
* @return Number of pointers written to notYetTried[]
*/
ZT_INLINE int getOtherKeys(const SymmetricKey *const alreadyTried, SymmetricKey *notYetTried[ZT_PEER_EPHEMERAL_KEY_COUNT_MAX]) noexcept
{
RWMutex::RLock l(m_lock);
int cnt = 0;
if (alreadyTried != &m_identityKey)
notYetTried[cnt++] = &m_identityKey;
for (unsigned int k = 0; k < ZT_PEER_EPHEMERAL_KEY_BUFFER_SIZE; ++k) {
SymmetricKey *const kk = &m_ephemeralSessions[k].key;
if (m_ephemeralSessions[k].established && (alreadyTried != kk))
notYetTried[cnt++] = kk;
}
return cnt;
}
/**
* Set a flag ordering a key renegotiation ASAP.
*
* This can be called if there's any hint of an issue with the current key.
* It's also called if any of the secondary possible keys returned by
* getOtherKeys() decrypt a valid packet, indicating a desynchronization
* in which key should be used.
*/
ZT_INLINE void setKeyRenegotiationNeeded() noexcept
{
RWMutex::Lock l(m_lock);
m_keyRenegotiationNeeded = true;
}
/**
* Set the currently known remote version of this peer's client
*
* @param vproto Protocol version
* @param vmaj Major version
* @param vmin Minor version
* @param vrev Revision
*/
ZT_INLINE void setRemoteVersion(unsigned int vproto, unsigned int vmaj, unsigned int vmin, unsigned int vrev) noexcept
{
RWMutex::Lock l(m_lock);
m_vProto = (uint16_t)vproto;
m_vMajor = (uint16_t)vmaj;
m_vMinor = (uint16_t)vmin;
m_vRevision = (uint16_t)vrev;
}
/**
* Get the remote version of this peer.
*
* If false is returned, the value of the value-result variables is
* undefined.
*
* @param vProto Set to protocol version
* @param vMajor Set to major version
* @param vMinor Set to minor version
* @param vRevision Set to revision
* @return True if remote version is known
*/
ZT_INLINE bool remoteVersion(uint16_t &vProto, uint16_t &vMajor, uint16_t &vMinor, uint16_t &vRevision)
{
RWMutex::RLock l(m_lock);
return (((vProto = m_vProto) | (vMajor = m_vMajor) | (vMinor = m_vMinor) | (vRevision = m_vRevision)) != 0);
}
/**
* @return True if there is at least one alive direct path
*/
ZT_INLINE bool directlyConnected() const noexcept
{
RWMutex::RLock l(m_lock);
return m_alivePathCount > 0;
}
/**
* Get all paths
*
* @param paths Vector of paths with the first path being the current preferred path
*/
ZT_INLINE void getAllPaths(Vector<SharedPtr<Path>> &paths) const
{
RWMutex::RLock l(m_lock);
paths.assign(m_paths, m_paths + m_alivePathCount);
}
/**
* Save the latest version of this peer to the data store
*/
void save(const Context &ctx, const CallContext &cc) const;
static constexpr int marshalSizeMax() noexcept { return ZT_PEER_MARSHAL_SIZE_MAX; }
int marshal(const Context &ctx, uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept;
int unmarshal(const Context &ctx, int64_t ticks, const uint8_t *restrict data, int len) noexcept;
/**
* Rate limit gate for inbound WHOIS requests
*/
ZT_INLINE bool rateGateInboundWhoisRequest(CallContext &cc) noexcept
{
if ((cc.ticks - m_lastWhoisRequestReceived.load(std::memory_order_relaxed)) >= ZT_PEER_WHOIS_RATE_LIMIT) {
m_lastWhoisRequestReceived.store(cc.ticks, std::memory_order_relaxed);
return true;
}
return false;
}
/**
* Rate limit gate for inbound ECHO requests
*/
ZT_INLINE bool rateGateEchoRequest(CallContext &cc) noexcept
{
if ((cc.ticks - m_lastEchoRequestReceived.load(std::memory_order_relaxed)) >= ZT_PEER_GENERAL_RATE_LIMIT) {
m_lastEchoRequestReceived.store(cc.ticks, std::memory_order_relaxed);
return true;
}
return false;
}
/**
* Rate limit gate for inbound probes
*/
ZT_INLINE bool rateGateProbeRequest(CallContext &cc) noexcept
{
if ((cc.ticks - m_lastProbeReceived.load(std::memory_order_relaxed)) > ZT_PEER_PROBE_RESPONSE_RATE_LIMIT) {
m_lastProbeReceived.store(cc.ticks, std::memory_order_relaxed);
return true;
}
return false;
}
/**
* Packet deduplication filter for incoming packets
*
* This flags a packet ID and returns true if the same packet ID was already
* flagged. This is done in an atomic operation if supported.
*
* @param packetId Packet ID to check/flag
* @return True if this is a duplicate
*/
ZT_INLINE bool deduplicateIncomingPacket(const uint64_t packetId) noexcept { return m_dedup[Utils::hash32((uint32_t)packetId) & ZT_PEER_DEDUP_BUFFER_MASK].exchange(packetId, std::memory_order_relaxed) == packetId; }
private:
struct p_EphemeralPublic {
uint8_t type;
uint8_t c25519Public[ZT_C25519_ECDH_PUBLIC_KEY_SIZE];
uint8_t p384Public[ZT_ECC384_PUBLIC_KEY_SIZE];
};
static_assert(sizeof(p_EphemeralPublic) == (1 + ZT_C25519_ECDH_PUBLIC_KEY_SIZE + ZT_ECC384_PUBLIC_KEY_SIZE), "p_EphemeralPublic has extra padding");
struct p_EphemeralPrivate {
ZT_INLINE p_EphemeralPrivate() noexcept : creationTime(-1) {}
ZT_INLINE ~p_EphemeralPrivate() { Utils::burn(this, sizeof(p_EphemeralPublic)); }
int64_t creationTime;
uint64_t sha384OfPublic[6];
p_EphemeralPublic pub;
uint8_t c25519Private[ZT_C25519_ECDH_PRIVATE_KEY_SIZE];
uint8_t p384Private[ZT_ECC384_PRIVATE_KEY_SIZE];
};
struct p_EphemeralSession {
ZT_INLINE p_EphemeralSession() noexcept : established(false) {}
uint64_t sha384OfPeerPublic[6];
SymmetricKey key;
bool established;
};
void m_prioritizePaths(const CallContext &cc);
unsigned int m_sendProbe(const Context &ctx, const CallContext &cc, int64_t localSocket, const InetAddress &atAddress, const uint16_t *ports, unsigned int numPorts);
void m_deriveSecondaryIdentityKeys() noexcept;
unsigned int m_hello(const Context &ctx, const CallContext &cc, int64_t localSocket, const InetAddress &atAddress, bool forceNewKey);
// Guards all fields except those otherwise indicated (and atomics of course).
RWMutex m_lock;
// Long lived key resulting from agreement with this peer's identity.
SymmetricKey m_identityKey;
// Cipher for encrypting or decrypting the encrypted section of HELLO packets.
AES m_helloCipher;
// Key for HELLO HMAC-SHA384
uint8_t m_helloMacKey[ZT_SYMMETRIC_KEY_SIZE];
// Keys we have generated and sent.
p_EphemeralPrivate m_ephemeralKeysSent[ZT_PEER_EPHEMERAL_KEY_BUFFER_SIZE];
// Sessions created when OK(HELLO) is received.
p_EphemeralSession m_ephemeralSessions[ZT_PEER_EPHEMERAL_KEY_BUFFER_SIZE];
// Pointer to active key (SymmetricKey).
std::atomic<uintptr_t> m_key;
// Flag indicating that we should rekey at next pulse().
bool m_keyRenegotiationNeeded;
// This peer's public identity.
Identity m_id;
// This peer's most recent (by revision) locator, or NULL if none on file.
SharedPtr<const Locator> m_locator;
// The last time something was received or sent.
std::atomic<int64_t> m_lastReceive;
std::atomic<int64_t> m_lastSend;
// The last time we sent a full HELLO to this peer.
int64_t m_lastSentHello; // only checked while locked
// The last time a WHOIS request was received from this peer (anti-DOS / anti-flood).
std::atomic<int64_t> m_lastWhoisRequestReceived;
// The last time an ECHO request was received from this peer (anti-DOS / anti-flood).
std::atomic<int64_t> m_lastEchoRequestReceived;
// The last time we got a probe from this peer.
std::atomic<int64_t> m_lastProbeReceived;
// Deduplication buffer.
std::atomic<uint64_t> m_dedup[ZT_PEER_DEDUP_BUFFER_SIZE];
// Meters measuring actual bandwidth in, out, and relayed via this peer (mostly if this is a root).
Meter<> m_inMeter;
Meter<> m_outMeter;
Meter<> m_relayedMeter;
// Direct paths sorted in descending order of preference.
SharedPtr<Path> m_paths[ZT_MAX_PEER_NETWORK_PATHS];
// Size of m_paths[] in non-NULL paths (max: MAX_PEER_NETWORK_PATHS).
unsigned int m_alivePathCount;
// Current best path (pointer to Path).
std::atomic<uintptr_t> m_bestPath;
// For SharedPtr<>
std::atomic<int> __refCount;
struct p_TryQueueItem {
ZT_INLINE p_TryQueueItem() : target(), iteration(0) {}
ZT_INLINE p_TryQueueItem(const Endpoint &t, int iter) : target(t), iteration(iter) {}
Endpoint target;
int iteration;
};
// Queue of endpoints to try.
List<p_TryQueueItem> m_tryQueue;
// Time each endpoint was last tried, for rate limiting.
Map<Endpoint, int64_t> m_lastTried;
// Version of remote peer, if known.
uint16_t m_vProto;
uint16_t m_vMajor;
uint16_t m_vMinor;
uint16_t m_vRevision;
};
} // namespace ZeroTier
#endif

View file

@ -1,508 +0,0 @@
/*
20080912
D. J. Bernstein
Public domain.
*/
// Small modifications have been made for ZeroTier, but this code remains in the public domain.
#include "Poly1305.hpp"
#include "Constants.hpp"
#include "Utils.hpp"
#ifdef __WINDOWS__
#pragma warning(disable : 4146)
#endif
#define U8TO64(p) Utils::loadLittleEndian<uint64_t>(p)
#define U64TO8(p, v) Utils::storeLittleEndian<uint64_t>(p, v)
#define U8TO32(p) Utils::loadLittleEndian<uint32_t>(p)
#define U32TO8(p, v) Utils::storeLittleEndian<uint32_t>(p, v)
namespace ZeroTier {
namespace {
typedef struct poly1305_context {
size_t aligner;
unsigned char opaque[136];
} poly1305_context;
#ifdef ZT_HAVE_UINT128
#define MUL(out, x, y) out = ((uint128_t)x * y)
#define ADD(out, in) out += in
#define ADDLO(out, in) out += in
#define SHR(in, shift) (unsigned long long)(in >> (shift))
#define LO(in) (unsigned long long)(in)
#define poly1305_block_size 16
typedef struct poly1305_state_internal_t {
unsigned long long r[3];
unsigned long long h[3];
unsigned long long pad[2];
size_t leftover;
unsigned char buffer[poly1305_block_size];
unsigned char final;
} poly1305_state_internal_t;
ZT_INLINE void poly1305_init(poly1305_context *ctx, const unsigned char key[32])
{
poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
unsigned long long t0, t1;
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
t0 = U8TO64(&key[0]);
t1 = U8TO64(&key[8]);
st->r[0] = (t0)&0xffc0fffffff;
st->r[1] = ((t0 >> 44) | (t1 << 20)) & 0xfffffc0ffff;
st->r[2] = ((t1 >> 24)) & 0x00ffffffc0f;
/* h = 0 */
st->h[0] = 0;
st->h[1] = 0;
st->h[2] = 0;
/* save pad for later */
st->pad[0] = U8TO64(&key[16]);
st->pad[1] = U8TO64(&key[24]);
st->leftover = 0;
st->final = 0;
}
void poly1305_blocks(poly1305_state_internal_t *st, const unsigned char *m, size_t bytes)
{
const unsigned long long hibit = (st->final) ? 0 : ((unsigned long long)1 << 40); /* 1 << 128 */
unsigned long long r0, r1, r2;
unsigned long long s1, s2;
unsigned long long h0, h1, h2;
uint128_t d0, d1, d2, d;
r0 = st->r[0];
r1 = st->r[1];
r2 = st->r[2];
h0 = st->h[0];
h1 = st->h[1];
h2 = st->h[2];
s1 = r1 * (5 << 2);
s2 = r2 * (5 << 2);
while (bytes >= poly1305_block_size) {
unsigned long long t0, t1;
/* h += m[i] */
t0 = U8TO64(&m[0]);
t1 = U8TO64(&m[8]);
h0 += ((t0)&0xfffffffffff);
h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffff);
h2 += (((t1 >> 24)) & 0x3ffffffffff) | hibit;
/* h *= r */
MUL(d0, h0, r0);
MUL(d, h1, s2);
ADD(d0, d);
MUL(d, h2, s1);
ADD(d0, d);
MUL(d1, h0, r1);
MUL(d, h1, r0);
ADD(d1, d);
MUL(d, h2, s2);
ADD(d1, d);
MUL(d2, h0, r2);
MUL(d, h1, r1);
ADD(d2, d);
MUL(d, h2, r0);
ADD(d2, d);
/* (partial) h %= p */
unsigned long long c = SHR(d0, 44);
h0 = LO(d0) & 0xfffffffffff;
ADDLO(d1, c);
c = SHR(d1, 44);
h1 = LO(d1) & 0xfffffffffff;
ADDLO(d2, c);
c = SHR(d2, 42);
h2 = LO(d2) & 0x3ffffffffff;
h0 += c * 5;
c = (h0 >> 44);
h0 = h0 & 0xfffffffffff;
h1 += c;
m += poly1305_block_size;
bytes -= poly1305_block_size;
}
st->h[0] = h0;
st->h[1] = h1;
st->h[2] = h2;
}
ZT_INLINE void poly1305_finish(poly1305_context *ctx, unsigned char mac[16])
{
poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
unsigned long long h0, h1, h2, c;
unsigned long long g0, g1, g2;
unsigned long long t0, t1;
/* process the remaining block */
if (st->leftover) {
size_t i = st->leftover;
st->buffer[i] = 1;
for (i = i + 1; i < poly1305_block_size; i++)
st->buffer[i] = 0;
st->final = 1;
poly1305_blocks(st, st->buffer, poly1305_block_size);
}
/* fully carry h */
h0 = st->h[0];
h1 = st->h[1];
h2 = st->h[2];
c = (h1 >> 44);
h1 &= 0xfffffffffff;
h2 += c;
c = (h2 >> 42);
h2 &= 0x3ffffffffff;
h0 += c * 5;
c = (h0 >> 44);
h0 &= 0xfffffffffff;
h1 += c;
c = (h1 >> 44);
h1 &= 0xfffffffffff;
h2 += c;
c = (h2 >> 42);
h2 &= 0x3ffffffffff;
h0 += c * 5;
c = (h0 >> 44);
h0 &= 0xfffffffffff;
h1 += c;
/* compute h + -p */
g0 = h0 + 5;
c = (g0 >> 44);
g0 &= 0xfffffffffff;
g1 = h1 + c;
c = (g1 >> 44);
g1 &= 0xfffffffffff;
g2 = h2 + c - ((unsigned long long)1 << 42);
/* select h if h < p, or h + -p if h >= p */
c = (g2 >> ((sizeof(unsigned long long) * 8) - 1)) - 1;
g0 &= c;
g1 &= c;
g2 &= c;
c = ~c;
h0 = (h0 & c) | g0;
h1 = (h1 & c) | g1;
h2 = (h2 & c) | g2;
/* h = (h + pad) */
t0 = st->pad[0];
t1 = st->pad[1];
h0 += ((t0)&0xfffffffffff);
c = (h0 >> 44);
h0 &= 0xfffffffffff;
h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffff) + c;
c = (h1 >> 44);
h1 &= 0xfffffffffff;
h2 += (((t1 >> 24)) & 0x3ffffffffff) + c;
h2 &= 0x3ffffffffff;
/* mac = h % (2^128) */
h0 = ((h0) | (h1 << 44));
h1 = ((h1 >> 20) | (h2 << 24));
U64TO8(&mac[0], h0);
U64TO8(&mac[8], h1);
/* zero out the state */
st->h[0] = 0;
st->h[1] = 0;
st->h[2] = 0;
st->r[0] = 0;
st->r[1] = 0;
st->r[2] = 0;
st->pad[0] = 0;
st->pad[1] = 0;
}
#else // no uint128_t
#define poly1305_block_size 16
typedef struct poly1305_state_internal_t {
unsigned long r[5];
unsigned long h[5];
unsigned long pad[4];
size_t leftover;
unsigned char buffer[poly1305_block_size];
unsigned char final;
} poly1305_state_internal_t;
ZT_INLINE void poly1305_init(poly1305_context *ctx, const unsigned char key[32])
{
poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
st->r[0] = (U8TO32(&key[0])) & 0x3ffffff;
st->r[1] = (U8TO32(&key[3]) >> 2) & 0x3ffff03;
st->r[2] = (U8TO32(&key[6]) >> 4) & 0x3ffc0ff;
st->r[3] = (U8TO32(&key[9]) >> 6) & 0x3f03fff;
st->r[4] = (U8TO32(&key[12]) >> 8) & 0x00fffff;
/* h = 0 */
st->h[0] = 0;
st->h[1] = 0;
st->h[2] = 0;
st->h[3] = 0;
st->h[4] = 0;
/* save pad for later */
st->pad[0] = U8TO32(&key[16]);
st->pad[1] = U8TO32(&key[20]);
st->pad[2] = U8TO32(&key[24]);
st->pad[3] = U8TO32(&key[28]);
st->leftover = 0;
st->final = 0;
}
void poly1305_blocks(poly1305_state_internal_t *st, const unsigned char *m, size_t bytes)
{
const unsigned long hibit = (st->final) ? 0 : (1 << 24); /* 1 << 128 */
unsigned long r0, r1, r2, r3, r4;
unsigned long s1, s2, s3, s4;
unsigned long h0, h1, h2, h3, h4;
r0 = st->r[0];
r1 = st->r[1];
r2 = st->r[2];
r3 = st->r[3];
r4 = st->r[4];
s1 = r1 * 5;
s2 = r2 * 5;
s3 = r3 * 5;
s4 = r4 * 5;
h0 = st->h[0];
h1 = st->h[1];
h2 = st->h[2];
h3 = st->h[3];
h4 = st->h[4];
while (bytes >= poly1305_block_size) {
/* h += m[i] */
h0 += (U8TO32(m + 0)) & 0x3ffffff;
h1 += (U8TO32(m + 3) >> 2) & 0x3ffffff;
h2 += (U8TO32(m + 6) >> 4) & 0x3ffffff;
h3 += (U8TO32(m + 9) >> 6) & 0x3ffffff;
h4 += (U8TO32(m + 12) >> 8) | hibit;
/* h *= r */
unsigned long long d0 = ((unsigned long long)h0 * r0) + ((unsigned long long)h1 * s4) + ((unsigned long long)h2 * s3) + ((unsigned long long)h3 * s2) + ((unsigned long long)h4 * s1);
unsigned long long d1 = ((unsigned long long)h0 * r1) + ((unsigned long long)h1 * r0) + ((unsigned long long)h2 * s4) + ((unsigned long long)h3 * s3) + ((unsigned long long)h4 * s2);
unsigned long long d2 = ((unsigned long long)h0 * r2) + ((unsigned long long)h1 * r1) + ((unsigned long long)h2 * r0) + ((unsigned long long)h3 * s4) + ((unsigned long long)h4 * s3);
unsigned long long d3 = ((unsigned long long)h0 * r3) + ((unsigned long long)h1 * r2) + ((unsigned long long)h2 * r1) + ((unsigned long long)h3 * r0) + ((unsigned long long)h4 * s4);
unsigned long long d4 = ((unsigned long long)h0 * r4) + ((unsigned long long)h1 * r3) + ((unsigned long long)h2 * r2) + ((unsigned long long)h3 * r1) + ((unsigned long long)h4 * r0);
/* (partial) h %= p */
unsigned long c = (unsigned long)(d0 >> 26);
h0 = (unsigned long)d0 & 0x3ffffff;
d1 += c;
c = (unsigned long)(d1 >> 26);
h1 = (unsigned long)d1 & 0x3ffffff;
d2 += c;
c = (unsigned long)(d2 >> 26);
h2 = (unsigned long)d2 & 0x3ffffff;
d3 += c;
c = (unsigned long)(d3 >> 26);
h3 = (unsigned long)d3 & 0x3ffffff;
d4 += c;
c = (unsigned long)(d4 >> 26);
h4 = (unsigned long)d4 & 0x3ffffff;
h0 += c * 5;
c = (h0 >> 26);
h0 = h0 & 0x3ffffff;
h1 += c;
m += poly1305_block_size;
bytes -= poly1305_block_size;
}
st->h[0] = h0;
st->h[1] = h1;
st->h[2] = h2;
st->h[3] = h3;
st->h[4] = h4;
}
ZT_INLINE void poly1305_finish(poly1305_context *ctx, unsigned char mac[16])
{
poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
unsigned long h0, h1, h2, h3, h4, c;
unsigned long g0, g1, g2, g3, g4;
unsigned long long f;
unsigned long mask;
/* process the remaining block */
if (st->leftover) {
size_t i = st->leftover;
st->buffer[i++] = 1;
for (; i < poly1305_block_size; i++)
st->buffer[i] = 0;
st->final = 1;
poly1305_blocks(st, st->buffer, poly1305_block_size);
}
/* fully carry h */
h0 = st->h[0];
h1 = st->h[1];
h2 = st->h[2];
h3 = st->h[3];
h4 = st->h[4];
c = h1 >> 26;
h1 = h1 & 0x3ffffff;
h2 += c;
c = h2 >> 26;
h2 = h2 & 0x3ffffff;
h3 += c;
c = h3 >> 26;
h3 = h3 & 0x3ffffff;
h4 += c;
c = h4 >> 26;
h4 = h4 & 0x3ffffff;
h0 += c * 5;
c = h0 >> 26;
h0 = h0 & 0x3ffffff;
h1 += c;
/* compute h + -p */
g0 = h0 + 5;
c = g0 >> 26;
g0 &= 0x3ffffff;
g1 = h1 + c;
c = g1 >> 26;
g1 &= 0x3ffffff;
g2 = h2 + c;
c = g2 >> 26;
g2 &= 0x3ffffff;
g3 = h3 + c;
c = g3 >> 26;
g3 &= 0x3ffffff;
g4 = h4 + c - (1 << 26);
/* select h if h < p, or h + -p if h >= p */
mask = (g4 >> ((sizeof(unsigned long) * 8) - 1)) - 1;
g0 &= mask;
g1 &= mask;
g2 &= mask;
g3 &= mask;
g4 &= mask;
mask = ~mask;
h0 = (h0 & mask) | g0;
h1 = (h1 & mask) | g1;
h2 = (h2 & mask) | g2;
h3 = (h3 & mask) | g3;
h4 = (h4 & mask) | g4;
/* h = h % (2^128) */
h0 = ((h0) | (h1 << 26)) & 0xffffffff;
h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff;
h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff;
h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff;
/* mac = (h + pad) % (2^128) */
f = (unsigned long long)h0 + st->pad[0];
h0 = (unsigned long)f;
f = (unsigned long long)h1 + st->pad[1] + (f >> 32);
h1 = (unsigned long)f;
f = (unsigned long long)h2 + st->pad[2] + (f >> 32);
h2 = (unsigned long)f;
f = (unsigned long long)h3 + st->pad[3] + (f >> 32);
h3 = (unsigned long)f;
U32TO8(mac + 0, h0);
U32TO8(mac + 4, h1);
U32TO8(mac + 8, h2);
U32TO8(mac + 12, h3);
/* zero out the state */
st->h[0] = 0;
st->h[1] = 0;
st->h[2] = 0;
st->h[3] = 0;
st->h[4] = 0;
st->r[0] = 0;
st->r[1] = 0;
st->r[2] = 0;
st->r[3] = 0;
st->r[4] = 0;
st->pad[0] = 0;
st->pad[1] = 0;
st->pad[2] = 0;
st->pad[3] = 0;
}
#endif // uint128_t or portable version?
ZT_INLINE void poly1305_update(poly1305_context *ctx, const unsigned char *m, size_t bytes) noexcept
{
poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
size_t i;
/* handle leftover */
if (st->leftover) {
size_t want = (poly1305_block_size - st->leftover);
if (want > bytes)
want = bytes;
for (i = 0; i < want; i++)
st->buffer[st->leftover + i] = m[i];
bytes -= want;
m += want;
st->leftover += want;
if (st->leftover < poly1305_block_size)
return;
poly1305_blocks(st, st->buffer, poly1305_block_size);
st->leftover = 0;
}
/* process full blocks */
if (bytes >= poly1305_block_size) {
size_t want = (bytes & ~(poly1305_block_size - 1));
poly1305_blocks(st, m, want);
m += want;
bytes -= want;
}
/* store leftover */
if (bytes) {
for (i = 0; i < bytes; i++)
st->buffer[st->leftover + i] = m[i];
st->leftover += bytes;
}
}
} // anonymous namespace
void Poly1305::init(const void *key) noexcept
{
static_assert(sizeof(ctx) >= sizeof(poly1305_context), "buffer in class smaller than required structure size");
poly1305_init(reinterpret_cast<poly1305_context *>(&ctx), reinterpret_cast<const unsigned char *>(key));
}
void Poly1305::update(const void *data, unsigned int len) noexcept { poly1305_update(reinterpret_cast<poly1305_context *>(&ctx), reinterpret_cast<const unsigned char *>(data), (size_t)len); }
void Poly1305::finish(void *auth) noexcept { poly1305_finish(reinterpret_cast<poly1305_context *>(&ctx), reinterpret_cast<unsigned char *>(auth)); }
} // namespace ZeroTier

View file

@ -1,53 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_POLY1305_HPP
#define ZT_POLY1305_HPP
#include "Constants.hpp"
namespace ZeroTier {
#define ZT_POLY1305_KEY_SIZE 32
#define ZT_POLY1305_MAC_SIZE 16
/**
* Poly1305 one-time MAC calculator
*/
class Poly1305 {
public:
ZT_INLINE Poly1305() {}
ZT_INLINE Poly1305(const void *key) { this->init(key); }
void init(const void *key) noexcept;
void update(const void *data, unsigned int len) noexcept;
void finish(void *auth) noexcept;
static ZT_INLINE void compute(void *const auth, const void *const data, const unsigned int len, const void *const key) noexcept
{
Poly1305 p(key);
p.update(data, len);
p.finish(auth);
}
private:
struct {
size_t aligner;
unsigned char opaque[136];
} ctx;
};
} // namespace ZeroTier
#endif

View file

@ -1,918 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_PROTOCOL_HPP
#define ZT_PROTOCOL_HPP
#include "AES.hpp"
#include "Address.hpp"
#include "Buf.hpp"
#include "Constants.hpp"
#include "Identity.hpp"
#include "LZ4.hpp"
#include "Poly1305.hpp"
#include "Salsa20.hpp"
#include "SymmetricKey.hpp"
/*
* Packet format:
* <[8] 64-bit packet ID / crypto IV>
* <[5] destination ZT address>
* <[5] source ZT address>
* <[1] outer visible flags, cipher, and hop count (bits: FFCCHHH)>
* <[8] 64-bit MAC (or trusted path ID in trusted path mode)>
* [... -- begin encryption envelope -- ...]
* <[1] inner envelope flags (MS 3 bits) and verb (LS 5 bits)>
* [... verb-specific payload ...]
*
* Packets smaller than 28 bytes are invalid and silently discarded.
*
* The hop count field is masked during message authentication computation
* and is thus the only field that is mutable in transit. It's incremented
* when roots or other nodes forward packets and exists to prevent infinite
* forwarding loops and to detect direct paths.
*
* HELLO is normally sent in the clear with the POLY1305_NONE cipher suite
* and with Poly1305 computed on plain text (Salsa20/12 is still used to
* generate a one time use Poly1305 key). As of protocol version 11 HELLO
* also includes a terminating HMAC (last 48 bytes) that significantly
* hardens HELLO authentication beyond what a 64-bit MAC can guarantee.
*
* Fragmented packets begin with a packet header whose fragment bit (bit
* 0x40 in the flags field) is set. This constitutes fragment zero. The
* total number of expected fragments is contained in each subsequent
* fragment packet. Unfragmented packets must not have the fragment bit
* set or the receiver will expect at least one additional fragment.
*
* --
*
* Packet fragment format (fragments beyond 0):
* <[8] packet ID of packet to which this fragment belongs>
* <[5] destination ZT address>
* <[1] 0xff here signals that this is a fragment>
* <[1] total fragments (most significant 4 bits), fragment no (LS 4 bits)>
* <[1] ZT hop count (least significant 3 bits; others are reserved)>
* <[...] fragment data>
*
* The protocol supports a maximum of 16 fragments including fragment 0
* which contains the full packet header (with fragment bit set). Fragments
* thus always carry fragment numbers between 1 and 15. All fragments
* belonging to the same packet must carry the same total fragment count in
* the most significant 4 bits of the fragment numbering field.
*
* All fragments have the same packet ID and destination. The packet ID
* doubles as the grouping identifier for fragment reassembly.
*
* Fragments do not carry their own packet MAC. The entire packet is
* authenticated once it is assembled by the receiver. Incomplete packets
* are discarded after a receiver configured period of time.
*/
/*
* Protocol versions
*
* 1 - 0.2.0 ... 0.2.5
* 2 - 0.3.0 ... 0.4.5
* + Added signature and originating peer to multicast frame
* + Double size of multicast frame bloom filter
* 3 - 0.5.0 ... 0.6.0
* + Yet another multicast redesign
* + New crypto completely changes key agreement cipher
* 4 - 0.6.0 ... 1.0.6
* + BREAKING CHANGE: New identity format based on hashcash design
* 5 - 1.1.0 ... 1.1.5
* + Supports echo
* + Supports in-band world (root server definition) updates
* + Clustering! (Though this will work with protocol v4 clients.)
* + Otherwise backward compatible with protocol v4
* 6 - 1.1.5 ... 1.1.10
* + Network configuration format revisions including binary values
* 7 - 1.1.10 ... 1.1.17
* + Introduce trusted paths for local SDN use
* 8 - 1.1.17 ... 1.2.0
* + Multipart network configurations for large network configs
* + Tags and Capabilities
* + inline push of CertificateOfMembership deprecated
* 9 - 1.2.0 ... 1.2.14
* 10 - 1.4.0 ... 1.4.6
* + Contained early pre-alpha versions of multipath, which are deprecated
* 11 - 1.6.0 ... 2.0.0
* + Supports AES-GMAC-SIV symmetric crypto, backported from v2 tree.
* 20 - 2.0.0 ... CURRENT
* + New more WAN-efficient P2P-assisted multicast algorithm
* + HELLO and OK(HELLO) include an extra HMAC to harden authentication
* + HELLO and OK(HELLO) carry meta-data in a dictionary that's encrypted
* + Forward secrecy, key lifetime management
* + Old planet/moon stuff is DEAD! Independent roots are easier.
* + AES encryption with the SIV construction AES-GMAC-SIV
* + New combined Curve25519/NIST P-384 identity type (type 1)
* + Short probe packets to reduce probe bandwidth
* + More aggressive NAT traversal techniques for IPv4 symmetric NATs
*/
#define ZT_PROTO_VERSION 20
/**
* Minimum supported protocol version
*/
#define ZT_PROTO_VERSION_MIN 9
/**
* Maximum allowed packet size (can technically be increased up to 16384)
*/
#define ZT_PROTO_MAX_PACKET_LENGTH (ZT_MAX_PACKET_FRAGMENTS * ZT_MIN_UDP_MTU)
/**
* Minimum viable packet length (outer header + verb)
*/
#define ZT_PROTO_MIN_PACKET_LENGTH 28
/**
* Index at which the encrypted section of a packet begins
*/
#define ZT_PROTO_PACKET_ENCRYPTED_SECTION_START 27
/**
* Index at which packet payload begins (after verb)
*/
#define ZT_PROTO_PACKET_PAYLOAD_START 28
/**
* Maximum hop count allowed by packet structure (3 bits, 0-7)
*
* This is a protocol constant. It's the maximum allowed by the length
* of the hop counter -- three bits. A lower limit is specified as
* the actual maximum hop count.
*/
#define ZT_PROTO_MAX_HOPS 7
/**
* NONE/Poly1305 (used for HELLO for backward compatibility)
*/
#define ZT_PROTO_CIPHER_POLY1305_NONE 0
/**
* Salsa2012/Poly1305 (legacy)
*/
#define ZT_PROTO_CIPHER_POLY1305_SALSA2012 1
/**
* Deprecated, not currently used.
*/
#define ZT_PROTO_CIPHER_NONE 2
/**
* AES-GMAC-SIV
*/
#define ZT_PROTO_CIPHER_AES_GMAC_SIV 3
/**
* Ephemeral key consisting of both a C25519 and a NIST P-384 key pair.
*/
#define ZT_PROTO_EPHEMERAL_KEY_TYPE_C25519_P384 1
/**
* Minimum viable length for a fragment
*/
#define ZT_PROTO_MIN_FRAGMENT_LENGTH 16
/**
* Magic number indicating a fragment if present at index 13
*/
#define ZT_PROTO_PACKET_FRAGMENT_INDICATOR 0xff
/**
* Index at which packet fragment payload starts
*/
#define ZT_PROTO_PACKET_FRAGMENT_PAYLOAD_START_AT ZT_PROTO_MIN_FRAGMENT_LENGTH
/**
* Outer flag indicating that a packet is fragmented and this is just the head.
*/
#define ZT_PROTO_FLAG_FRAGMENTED 0x40U
/**
* Mask for obtaining hops from the combined flags, cipher, and hops field
*/
#define ZT_PROTO_FLAG_FIELD_HOPS_MASK 0x07U
/**
* Verb flag indicating payload is compressed with LZ4
*/
#define ZT_PROTO_VERB_FLAG_COMPRESSED 0x80U
/**
* Mask to extract just the verb from the verb / verb flags field
*/
#define ZT_PROTO_VERB_MASK 0x1fU
/**
* AES-GMAC-SIV first of two keys
*/
#define ZT_KBKDF_LABEL_AES_GMAC_SIV_K0 '0'
/**
* AES-GMAC-SIV second of two keys
*/
#define ZT_KBKDF_LABEL_AES_GMAC_SIV_K1 '1'
/**
* Key used to encrypt dictionary in HELLO with AES-CTR.
*/
#define ZT_KBKDF_LABEL_HELLO_DICTIONARY_ENCRYPT 'H'
/**
* Key used for extra HMAC-SHA384 authentication on some packets.
*/
#define ZT_KBKDF_LABEL_PACKET_HMAC 'M'
#define ZT_PROTO_PACKET_FRAGMENT_INDICATOR_INDEX 13
#define ZT_PROTO_PACKET_FRAGMENT_COUNTS 14
#define ZT_PROTO_PACKET_ID_INDEX 0
#define ZT_PROTO_PACKET_DESTINATION_INDEX 8
#define ZT_PROTO_PACKET_SOURCE_INDEX 13
#define ZT_PROTO_PACKET_FLAGS_INDEX 18
#define ZT_PROTO_PACKET_MAC_INDEX 19
#define ZT_PROTO_PACKET_VERB_INDEX 27
#define ZT_PROTO_HELLO_NODE_META_INSTANCE_ID "i"
#define ZT_PROTO_HELLO_NODE_META_LOCATOR "l"
#define ZT_PROTO_HELLO_NODE_META_SOFTWARE_VENDOR "s"
#define ZT_PROTO_HELLO_NODE_META_COMPLIANCE "c"
#define ZT_PROTO_HELLO_NODE_META_EPHEMERAL_PUBLIC "e"
#define ZT_PROTO_HELLO_NODE_META_EPHEMERAL_ACK "E"
static_assert(ZT_PROTO_MAX_PACKET_LENGTH < ZT_BUF_MEM_SIZE, "maximum packet length won't fit in Buf");
static_assert(ZT_PROTO_PACKET_ENCRYPTED_SECTION_START == (ZT_PROTO_MIN_PACKET_LENGTH - 1), "encrypted packet section must start right before protocol verb at one less than minimum packet size");
namespace ZeroTier {
namespace Protocol {
/**
* Packet verb (message type)
*/
enum Verb {
/**
* No operation
*
* This packet does nothing, but it is sometimes sent as a probe to
* trigger a HELLO exchange as the code will attempt HELLO when it
* receives a packet from an unidentified source.
*/
VERB_NOP = 0x00,
/**
* Announcement of a node's existence and vitals:
* <[1] protocol version>
* <[1] software major version (optional, 0 if unspecified)>
* <[1] software minor version (optional, 0 if unspecified)>
* <[2] software revision (optional, 0 if unspecified)>
* <[8] timestamp>
* <[...] binary serialized full sender identity>
* <[...] physical destination of packet>
* <[12] 96-bit CTR IV>
* <[6] reserved bytes, currently used for legacy compatibility>
* [... start of encrypted section ...]
* <[2] 16-bit length of encrypted dictionary>
* <[...] encrypted dictionary>
* [... end of encrypted section ...]
* <[48] HMAC-SHA384 of packet>
*
* HELLO is sent to initiate a new pairing between two nodes and
* periodically to refresh information.
*
* HELLO is the only packet ever sent without whole payload encryption,
* though an inner encrypted envelope exists to obscure all fields that
* do not need to be sent in the clear. There is nothing in this
* encrypted section that would be catastrophic if it leaked, but it's
* good to proactively limit exposed information.
*
* Inner encryption is AES-CTR with a key derived using KBKDF and a
* label indicating this specific usage. A 96-bit CTR IV precedes this
* encrypted section.
*
* Authentication and encryption in HELLO and OK(HELLO) are always done
* with the long-lived identity key, not ephemeral shared keys. This
* is so ephemeral key negotiation can always occur on the first try
* even if things get out of sync e.g. by one side restarting. Nothing
* in HELLO is likely to be dangerous if decrypted later.
*
* HELLO and OK(HELLO) include an extra HMAC at the end of the packet.
* This authenticates them to a level of certainty beyond that afforded
* by regular AEAD. HMAC is computed over the whole packet prior to
* packet MAC and with the 3-bit hop count field masked as it is
* with regular packet AEAD, and it is then included in the regular
* packet MAC.
*
* LEGACY: for legacy reasons the MAC field of HELLO is a poly1305
* MAC initialized in the same manner as 1.x. Since HMAC provides
* additional full 384-bit strength authentication this should not be
* a problem for FIPS.
*
* Several legacy fields are present as well for the benefit of 1.x nodes.
* These will go away and become simple reserved space once 1.x is no longer
* supported. Some are self-explanatory. The "encrypted zero" is rather
* strange. It's a 16-bit zero value encrypted using Salsa20/12 and the
* long-lived identity key shared by the two peers. It tells 1.x that an
* old encrypted field is no longer there and that it should stop parsing
* the packet at that point.
*
* 1.x does not understand the dictionary and HMAC fields, but it will
* ignore them due to the "encrypted zero" field indicating that the
* packet contains no more information.
*
* Dictionary fields (defines start with ZT_PROTO_HELLO_NODE_META_):
*
* INSTANCE_ID - a 64-bit unique value generated on each node start
* LOCATOR - signed record enumerating this node's trusted contact points
* EPHEMERAL_PUBLIC - Ephemeral public key(s)
*
* OK will contain EPHEMERAL_PUBLIC of the responding node and:
*
* EPHEMERAL_ACK - SHA384(EPHEMERAL_PUBLIC from HELLO)
*
* The following optional fields may also be present:
*
* PREFERRED_CIPHER - preferred symmetric encryption mode
* HOSTNAME - arbitrary short host name for this node
* ARCH - system architecture (CPU type, bits, etc.)
* OSNAME - system operating system name
* OSVERSION - operating system version
* CONTACT - arbitrary short contact information string for this node
* SOFTWARE_VENDOR - short name or description of vendor, such as a URL
* COMPLIANCE - bit mask containing bits for e.g. a FIPS-compliant node
*
* The timestamp field in OK is echoed but the others represent the sender
* of the OK and are not echoes from HELLO. The dictionary in OK typically
* only contains the EPHEMERAL fields, allowing the receiver of the OK to
* confirm that both sides know the correct keys and thus begin using the
* ephemeral shared secret to send packets.
*
* OK is sent encrypted with the usual AEAD, but still includes a full HMAC
* as well (inside the cryptographic envelope).
*
* OK payload:
* <[8] timestamp echoed from original HELLO>
* <[1] protocol version of responding node>
* <[1] software major version (optional)>
* <[1] software minor version (optional)>
* <[2] software revision (optional)>
* <[...] physical destination address of packet>
* <[2] 16-bit reserved field (zero for legacy compatibility)>
* <[2] 16-bit length of dictionary>
* <[...] dictionary>
* <[48] HMAC-SHA384 of plaintext packet>
*/
VERB_HELLO = 0x01,
/**
* Error response:
* <[1] in-re verb>
* <[8] in-re packet ID>
* <[1] error code>
* <[...] error-dependent payload, may be empty>
*
* An ERROR that does not pertain to a specific packet will have its verb
* set to VERB_NOP and its packet ID set to zero.
*/
VERB_ERROR = 0x02,
/**
* Success response:
* <[1] in-re verb>
* <[8] in-re packet ID>
* <[...] response-specific payload>
*/
VERB_OK = 0x03,
/**
* Query an identity by address:
* <[5] address to look up>
* [<[...] additional addresses to look up>
*
* OK response payload:
* <[...] identity>
* <[...] locator>
* [... additional identity/locator pairs]
*
* If the address is not found, no response is generated. The semantics
* of WHOIS is similar to ARP and NDP in that persistent retrying can
* be performed.
*
* It is possible for an identity but a null/empty locator to be returned
* if no locator is known for a node. Older versions may omit the locator.
*/
VERB_WHOIS = 0x04,
/**
* Relay-mediated NAT traversal or firewall punching initiation:
* <[1] flags>
* <[5] ZeroTier address of other peer>
* <[2] 16-bit number of endpoints where peer might be reached>
* [<[...] endpoints to attempt>]
*
* Legacy packet format for pre-2.x peers:
* <[1] flags (unused, currently 0)>
* <[5] ZeroTier address of other peer>
* <[2] 16-bit protocol address port>
* <[1] protocol address length / type>
* <[...] protocol address (network byte order)>
*
* When a root or other peer is relaying messages, it can periodically send
* RENDEZVOUS to assist peers in establishing direct communication.
*
* Peers also directly exchange information via HELLO, so this serves as
* a second way for peers to learn about their possible locations.
*
* It also serves another function: temporal coordination of NAT traversal
* attempts. Some NATs traverse better if both sides first send "firewall
* opener" packets and then send real packets and if this exchange is
* coordinated in time so that the packets effectively pass each other in
* flight.
*
* No OK or ERROR is generated.
*/
VERB_RENDEZVOUS = 0x05,
/**
* ZT-to-ZT unicast ethernet frame (shortened EXT_FRAME):
* <[8] 64-bit network ID>
* <[2] 16-bit ethertype>
* <[...] ethernet payload>
*
* MAC addresses are derived from the packet's source and destination
* ZeroTier addresses. This is a shortened EXT_FRAME that elides full
* Ethernet framing and other optional flags and features when they
* are not necessary.
*
* ERROR may be generated if a membership certificate is needed for a
* closed network. Payload will be network ID.
*/
VERB_FRAME = 0x06,
/**
* Full Ethernet frame with MAC addressing and optional fields:
* <[8] 64-bit network ID>
* <[1] flags>
* <[6] destination MAC or all zero for destination node>
* <[6] source MAC or all zero for node of origin>
* <[2] 16-bit ethertype>
* <[...] ethernet payload>
*
* Flags:
* 0x01 - Certificate of network membership attached (DEPRECATED)
* 0x02 - Most significant bit of subtype (see below)
* 0x04 - Middle bit of subtype (see below)
* 0x08 - Least significant bit of subtype (see below)
* 0x10 - ACK requested in the form of OK(EXT_FRAME)
*
* Subtypes (0..7):
* 0x0 - Normal frame (bridging can be determined by checking MAC)
* 0x1 - TEEd outbound frame
* 0x2 - REDIRECTed outbound frame
* 0x3 - WATCHed outbound frame (TEE with ACK, ACK bit also set)
* 0x4 - TEEd inbound frame
* 0x5 - REDIRECTed inbound frame
* 0x6 - WATCHed inbound frame
* 0x7 - (reserved for future use)
*
* An extended frame carries full MAC addressing, making it a
* superset of VERB_FRAME. If 0x20 is set then p2p or hub and
* spoke multicast propagation is requested.
*
* OK payload (if ACK flag is set):
* <[8] 64-bit network ID>
* <[1] flags>
* <[6] destination MAC or all zero for destination node>
* <[6] source MAC or all zero for node of origin>
* <[2] 16-bit ethertype>
*/
VERB_EXT_FRAME = 0x07,
/**
* ECHO request (a.k.a. ping):
* <[...] arbitrary payload>
*
* This generates OK with a copy of the transmitted payload. No ERROR
* is generated. Response to ECHO requests is optional and ECHO may be
* ignored if a node detects a possible flood.
*/
VERB_ECHO = 0x08,
/**
* Announce interest in multicast group(s):
* <[8] 64-bit network ID>
* <[6] multicast Ethernet address>
* <[4] multicast additional distinguishing information (ADI)>
* [... additional tuples of network/address/adi ...]
*
* LIKEs may be sent to any peer, though a good implementation should
* restrict them to peers on the same network they're for and to network
* controllers and root servers. In the current network, root servers
* will provide the service of final multicast cache.
*/
VERB_MULTICAST_LIKE = 0x09,
/**
* Network credentials push:
* [<[...] one or more certificates of membership>]
* <[1] 0x00, null byte marking end of COM array>
* <[2] 16-bit number of capabilities>
* <[...] one or more serialized Capability>
* <[2] 16-bit number of tags>
* <[...] one or more serialized Tags>
* <[2] 16-bit number of revocations>
* <[...] one or more serialized Revocations>
* <[2] 16-bit number of certificates of ownership>
* <[...] one or more serialized CertificateOfOwnership>
*
* This can be sent by anyone at any time to push network credentials.
* These will of course only be accepted if they are properly signed.
* Credentials can be for any number of networks.
*
* The use of a zero byte to terminate the COM section is for legacy
* backward compatibility. Newer fields are prefixed with a length.
*
* OK/ERROR are not generated.
*/
VERB_NETWORK_CREDENTIALS = 0x0a,
/**
* Network configuration request:
* <[8] 64-bit network ID>
* <[2] 16-bit length of request meta-data dictionary>
* <[...] string-serialized request meta-data>
* <[8] 64-bit revision of netconf we currently have>
* <[8] 64-bit timestamp of netconf we currently have>
*
* This message requests network configuration from a node capable of
* providing it. Responses can be sent as OK(NETWORK_CONFIG_REQUEST)
* or NETWORK_CONFIG messages. NETWORK_CONFIG can also be sent by
* network controllers or other nodes unsolicited.
*
* OK response payload:
* (same as VERB_NETWORK_CONFIG payload)
*
* ERROR response payload:
* <[8] 64-bit network ID>
*/
VERB_NETWORK_CONFIG_REQUEST = 0x0b,
/**
* Network configuration data push:
* <[8] 64-bit network ID>
* <[2] 16-bit length of network configuration dictionary chunk>
* <[...] network configuration dictionary (may be incomplete)>
* <[1] 8-bit flags>
* <[8] 64-bit config update ID (should never be 0)>
* <[4] 32-bit total length of assembled dictionary>
* <[4] 32-bit index of chunk>
* [ ... end signed portion ... ]
* <[1] 8-bit reserved field (legacy)>
* <[2] 16-bit length of chunk signature>
* <[...] chunk signature>
*
* Network configurations can come from network controllers or theoretically
* any other node, but each chunk must be signed by the network controller
* that generated it originally. The config update ID is arbitrary and is merely
* used by the receiver to group chunks. Chunk indexes must be sequential and
* the total delivered chunks must yield a total network config equal to the
* specified total length.
*
* Flags:
* 0x01 - Use fast propagation -- rumor mill flood this chunk to other members
*
* An OK should be sent if the config is successfully received and
* accepted.
*
* OK payload:
* <[8] 64-bit network ID>
* <[8] 64-bit config update ID>
*/
VERB_NETWORK_CONFIG = 0x0c,
/**
* Request endpoints for multicast distribution:
* <[8] 64-bit network ID>
* <[1] flags>
* <[6] MAC address of multicast group being queried>
* <[4] 32-bit ADI for multicast group being queried>
* <[4] 32-bit requested max number of multicast peers>
*
* This message asks a peer for additional known endpoints that have
* LIKEd a given multicast group. It's sent when the sender wishes
* to send multicast but does not have the desired number of recipient
* peers.
*
* OK response payload: (multiple OKs can be generated)
* <[8] 64-bit network ID>
* <[6] MAC address of multicast group being queried>
* <[4] 32-bit ADI for multicast group being queried>
* <[4] 32-bit total number of known members in this multicast group>
* <[2] 16-bit number of members enumerated in this packet>
* <[...] series of 5-byte ZeroTier addresses of enumerated members>
*
* ERROR is not generated; queries that return no response are dropped.
*/
VERB_MULTICAST_GATHER = 0x0d,
// Deprecated multicast frame message type.
VERB_MULTICAST_FRAME_deprecated = 0x0e,
/**
* Push of potential endpoints for direct communication:
* <[2] 16-bit number of endpoints>
* <[...] endpoints>
*
* If the target node is pre-2.0 path records of the following format
* are sent instead of post-2.x endpoints:
* <[1] 8-bit path flags (zero)>
* <[2] length of extended path characteristics (0)>
* [<[...] extended path characteristics>]
* <[1] address type>
* <[1] address length in bytes>
* <[...] address>
*
* Recipients will add these endpoints to a queue of possible endpoints
* to try for a given peer.
*
* OK and ERROR are not generated.
*/
VERB_PUSH_DIRECT_PATHS = 0x10,
/**
* A message with arbitrary user-definable content:
* <[8] 64-bit arbitrary message type ID>
* [<[...] message payload>]
*
* This can be used to send arbitrary messages over VL1. It generates no
* OK or ERROR and has no special semantics outside of whatever the user
* (via the ZeroTier core API) chooses to give it.
*
* Message type IDs less than or equal to 65535 are reserved for use by
* ZeroTier, Inc. itself. We recommend making up random ones for your own
* implementations.
*/
VERB_USER_MESSAGE = 0x14,
VERB_MULTICAST = 0x16,
/**
* Encapsulate a full ZeroTier packet in another:
* <[...] raw encapsulated packet>
*
* Encapsulation exists to enable secure relaying as opposed to the usual
* "dumb" relaying. The latter is faster but secure relaying has roles
* where endpoint privacy is desired.
*
* Packet hop count is incremented as normal.
*/
VERB_ENCAP = 0x17
// protocol max: 0x1f
};
#ifdef ZT_DEBUG_SPEW
static ZT_INLINE const char *verbName(const Verb v) noexcept
{
switch (v) {
case VERB_NOP: return "NOP";
case VERB_HELLO: return "HELLO";
case VERB_ERROR: return "ERROR";
case VERB_OK: return "OK";
case VERB_WHOIS: return "WHOIS";
case VERB_RENDEZVOUS: return "RENDEZVOUS";
case VERB_FRAME: return "FRAME";
case VERB_EXT_FRAME: return "EXT_FRAME";
case VERB_ECHO: return "ECHO";
case VERB_MULTICAST_LIKE: return "MULTICAST_LIKE";
case VERB_NETWORK_CREDENTIALS: return "NETWORK_CREDENTIALS";
case VERB_NETWORK_CONFIG_REQUEST: return "NETWORK_CONFIG_REQUEST";
case VERB_NETWORK_CONFIG: return "NETWORK_CONFIG";
case VERB_MULTICAST_GATHER: return "MULTICAST_GATHER";
case VERB_MULTICAST_FRAME_deprecated: return "MULTICAST_FRAME_deprecated";
case VERB_PUSH_DIRECT_PATHS: return "PUSH_DIRECT_PATHS";
case VERB_USER_MESSAGE: return "USER_MESSAGE";
case VERB_MULTICAST: return "MULTICAST";
case VERB_ENCAP: return "ENCAP";
default: return "(unknown)";
}
}
#endif
/**
* Error codes used in ERROR packets.
*/
enum ErrorCode {
/* Invalid request */
ERROR_INVALID_REQUEST = 0x01,
/* Bad/unsupported protocol version */
ERROR_BAD_PROTOCOL_VERSION = 0x02,
/* Unknown object queried */
ERROR_OBJ_NOT_FOUND = 0x03,
/* Verb or use case not supported/enabled by this node */
ERROR_UNSUPPORTED_OPERATION = 0x05,
/* Network access denied; updated credentials needed */
ERROR_NEED_MEMBERSHIP_CERTIFICATE = 0x06,
/* Tried to join network, but you're not a member */
ERROR_NETWORK_ACCESS_DENIED_ = 0x07, /* extra _ at end to avoid Windows name conflict */
/* Cannot deliver a forwarded ZeroTier packet (for any reason) */
ERROR_CANNOT_DELIVER = 0x09
};
/**
* EXT_FRAME subtypes, which are packed into three bits in the flags field.
*
* This allows the node to know whether this is a normal frame or one generated
* by a special tee or redirect type flow rule.
*/
enum ExtFrameSubtype { EXT_FRAME_SUBTYPE_NORMAL = 0x0, EXT_FRAME_SUBTYPE_TEE_OUTBOUND = 0x1, EXT_FRAME_SUBTYPE_REDIRECT_OUTBOUND = 0x2, EXT_FRAME_SUBTYPE_WATCH_OUTBOUND = 0x3, EXT_FRAME_SUBTYPE_TEE_INBOUND = 0x4, EXT_FRAME_SUBTYPE_REDIRECT_INBOUND = 0x5, EXT_FRAME_SUBTYPE_WATCH_INBOUND = 0x6 };
/**
* EXT_FRAME flags
*/
enum ExtFrameFlag {
/**
* A certifiate of membership was included (no longer used but still accepted)
*/
EXT_FRAME_FLAG_COM_ATTACHED_deprecated = 0x01,
// bits 0x02, 0x04, and 0x08 are occupied by the 3-bit ExtFrameSubtype value.
/**
* An OK(EXT_FRAME) acknowledgement was requested by the sender.
*/
EXT_FRAME_FLAG_ACK_REQUESTED = 0x10
};
/**
* NETWORK_CONFIG (or OK(NETWORK_CONFIG_REQUEST)) flags
*/
enum NetworkConfigFlag {
/**
* Indicates that this network config chunk should be fast propagated via rumor mill flooding.
*/
NETWORK_CONFIG_FLAG_FAST_PROPAGATE = 0x01
};
/**
* Deterministically mangle a 256-bit crypto key based on packet characteristics
*
* This uses extra data from the packet to mangle the secret, yielding when
* combined with Salsa20's conventional 64-bit nonce an effective nonce that's
* more like 68 bits.
*
* @param in Input key (32 bytes)
* @param out Output buffer (32 bytes)
*/
static ZT_INLINE void salsa2012DeriveKey(const uint8_t *const in, uint8_t *const out, const Buf &packet, const unsigned int packetSize) noexcept
{
// IV and source/destination addresses. Using the addresses divides the
// key space into two halves-- A->B and B->A (since order will change).
#ifdef ZT_NO_UNALIGNED_ACCESS
for (int i = 0; i < 18; ++i)
out[i] = in[i] ^ packet.unsafeData[i];
#else
*reinterpret_cast<uint64_t *>(out) = *reinterpret_cast<const uint64_t *>(in) ^ *reinterpret_cast<const uint64_t *>(packet.unsafeData);
*reinterpret_cast<uint64_t *>(out + 8) = *reinterpret_cast<const uint64_t *>(in + 8) ^ *reinterpret_cast<const uint64_t *>(packet.unsafeData + 8);
*reinterpret_cast<uint16_t *>(out + 16) = *reinterpret_cast<const uint16_t *>(in + 16) ^ *reinterpret_cast<const uint16_t *>(packet.unsafeData + 16);
#endif
// Flags, but with hop count masked off. Hop count is altered by forwarding
// nodes and is the only field that is mutable by unauthenticated third parties.
out[18] = in[18] ^ (packet.unsafeData[18] & 0xf8U);
// Raw packet size in bytes -- thus each packet size defines a new key space.
out[19] = in[19] ^ (uint8_t)packetSize;
out[20] = in[20] ^ (uint8_t)(packetSize >> 8U); // little endian
// Rest of raw key is used unchanged
#ifdef ZT_NO_UNALIGNED_ACCESS
for (int i = 21; i < 32; ++i)
out[i] = in[i];
#else
out[21] = in[21];
out[22] = in[22];
out[23] = in[23];
*reinterpret_cast<uint64_t *>(out + 24) = *reinterpret_cast<const uint64_t *>(in + 24);
#endif
}
/**
* Fill out packet header fields (except for mac, which is filled out by armor())
*
* @param pkt Start of packet buffer
* @param packetId Packet IV / cryptographic MAC
* @param destination Destination ZT address
* @param source Source (sending) ZT address
* @param verb Protocol verb
* @return Index of packet start
*/
static ZT_INLINE int newPacket(uint8_t pkt[28], const uint64_t packetId, const Address destination, const Address source, const Verb verb) noexcept
{
Utils::storeMachineEndian<uint64_t>(pkt + ZT_PROTO_PACKET_ID_INDEX, packetId);
destination.copyTo(pkt + ZT_PROTO_PACKET_DESTINATION_INDEX);
source.copyTo(pkt + ZT_PROTO_PACKET_SOURCE_INDEX);
pkt[ZT_PROTO_PACKET_FLAGS_INDEX] = 0;
Utils::storeMachineEndian<uint64_t>(pkt + ZT_PROTO_PACKET_MAC_INDEX, 0);
pkt[ZT_PROTO_PACKET_VERB_INDEX] = (uint8_t)verb;
return ZT_PROTO_PACKET_VERB_INDEX + 1;
}
static ZT_INLINE int newPacket(Buf &pkt, const uint64_t packetId, const Address destination, const Address source, const Verb verb) noexcept { return newPacket(pkt.unsafeData, packetId, destination, source, verb); }
/**
* Encrypt and compute packet MAC
*
* @param pkt Packet data to encrypt (in place)
* @param packetSize Packet size, must be at least ZT_PROTO_MIN_PACKET_LENGTH or crash will occur
* @param key Key to use for encryption
* @param cipherSuite Cipher suite to use for AEAD encryption or just MAC
* @return Packet ID of packet (which may change!)
*/
static ZT_INLINE uint64_t armor(uint8_t *const pkt, const int packetSize, const SymmetricKey &key, const uint8_t cipherSuite) noexcept
{
// TODO
#if 0
Protocol::Header &ph = pkt.as<Protocol::Header>(); // NOLINT(hicpp-use-auto,modernize-use-auto)
ph.flags = (ph.flags & 0xc7U) | ((cipherSuite << 3U) & 0x38U); // flags: FFCCCHHH where CCC is cipher
switch(cipherSuite) {
case ZT_PROTO_CIPHER_SUITE__POLY1305_NONE: {
uint8_t perPacketKey[ZT_SYMMETRIC_KEY_SIZE];
salsa2012DeriveKey(key,perPacketKey,pkt,packetSize);
Salsa20 s20(perPacketKey,&ph.packetId);
uint8_t macKey[ZT_POLY1305_KEY_SIZE];
s20.crypt12(Utils::ZERO256,macKey,ZT_POLY1305_KEY_SIZE);
// only difference here is that we don't encrypt the payload
uint64_t mac[2];
poly1305(mac,pkt.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,macKey);
ph.mac = mac[0];
} break;
case ZT_PROTO_CIPHER_SUITE__POLY1305_SALSA2012: {
uint8_t perPacketKey[ZT_SYMMETRIC_KEY_SIZE];
salsa2012DeriveKey(key,perPacketKey,pkt,packetSize);
Salsa20 s20(perPacketKey,&ph.packetId);
uint8_t macKey[ZT_POLY1305_KEY_SIZE];
s20.crypt12(Utils::ZERO256,macKey,ZT_POLY1305_KEY_SIZE);
const unsigned int encLen = packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START;
s20.crypt12(pkt.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,pkt.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,encLen);
uint64_t mac[2];
poly1305(mac,pkt.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START,encLen,macKey);
ph.mac = mac[0];
} break;
case ZT_PROTO_CIPHER_SUITE__AES_GMAC_SIV: {
} break;
}
#endif
return 0;
}
/**
* Attempt to compress packet payload
*
* This attempts compression and swaps the pointer in 'pkt' for a buffer holding
* compressed data on success. If compression did not shrink the packet, the original
* packet size is returned and 'pkt' remains unchanged. If compression is successful
* the compressed verb flag is also set.
*
* @param pkt Packet buffer value/result parameter: pointer may be swapped if compression is successful
* @param packetSize Total size of packet in bytes (including headers)
* @return New size of packet after compression or original size of compression wasn't helpful
*/
static ZT_INLINE int compress(Buf &pkt, int packetSize) noexcept
{
// TODO
return packetSize;
}
} // namespace Protocol
} // namespace ZeroTier
#endif

View file

@ -1,13 +0,0 @@
ZeroTier Network Hypervisor Core
======
This directory contains the *real* ZeroTier: a completely OS-independent global virtual Ethernet switch engine. This is where the magic happens.
Give it wire packets and it gives you Ethernet packets, and vice versa. The core contains absolutely no actual I/O, port configuration, or other OS-specific code (except Utils::getSecureRandom()). It provides a simple C API via [/include/ZeroTierOne.h](../include/ZeroTierOne.h). It's designed to be small and maximally portable for future use on small embedded and special purpose systems.
Code in here follows these guidelines:
- Keep it minimal, especially in terms of code footprint and memory use.
- There should be no OS-dependent code here unless absolutely necessary (e.g. getSecureRandom).
- If it's not part of the core virtual Ethernet switch it does not belong here.
- Minimize the use of complex C++ features since at some point we might end up "minus-minus'ing" this code if doing so proves necessary to port to tiny embedded systems.

View file

@ -1,92 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "RevocationCredential.hpp"
namespace ZeroTier {
bool RevocationCredential::sign(const Identity &signer) noexcept
{
uint8_t buf[ZT_REVOCATION_MARSHAL_SIZE_MAX + 32];
if (signer.hasPrivate()) {
m_signedBy = signer.address();
m_signatureLength = signer.sign(buf, (unsigned int)marshal(buf, true), m_signature, sizeof(m_signature));
return true;
}
return false;
}
int RevocationCredential::marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX], bool forSign) const noexcept
{
int p = 0;
if (forSign) {
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
Utils::storeBigEndian<uint32_t>(data + p, 0);
Utils::storeBigEndian<uint32_t>(data + p + 4, m_id);
Utils::storeBigEndian<uint64_t>(data + p + 8, m_networkId);
Utils::storeBigEndian<uint32_t>(data + p + 16, 0);
Utils::storeBigEndian<uint32_t>(data + p + 20, m_credentialId);
Utils::storeBigEndian<uint64_t>(data + p + 24, (uint64_t)m_threshold);
Utils::storeBigEndian<uint64_t>(data + p + 32, m_flags);
p += 40;
m_target.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
m_signedBy.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
data[p++] = (uint8_t)m_type;
if (!forSign) {
data[p++] = 1;
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_signatureLength);
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
}
data[p++] = 0;
data[p++] = 0;
if (forSign) {
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
return p;
}
int RevocationCredential::unmarshal(const uint8_t *restrict data, const int len) noexcept
{
if (len < 54)
return -1;
// 4 bytes reserved
m_id = Utils::loadBigEndian<uint32_t>(data + 4);
m_networkId = Utils::loadBigEndian<uint64_t>(data + 8);
// 4 bytes reserved
m_credentialId = Utils::loadBigEndian<uint32_t>(data + 20);
m_threshold = (int64_t)Utils::loadBigEndian<uint64_t>(data + 24);
m_flags = Utils::loadBigEndian<uint64_t>(data + 32);
m_target.setTo(data + 40);
m_signedBy.setTo(data + 45);
m_type = (ZT_CredentialType)data[50];
// 1 byte reserved
m_signatureLength = Utils::loadBigEndian<uint16_t>(data + 52);
int p = 54 + (int)m_signatureLength;
if ((m_signatureLength > ZT_SIGNATURE_BUFFER_SIZE) || (p > len))
return -1;
Utils::copy(m_signature, data + 54, m_signatureLength);
if ((p + 2) > len)
return -1;
p += 2 + Utils::loadBigEndian<uint16_t>(data + p);
if (p > len)
return -1;
return p;
}
} // namespace ZeroTier

View file

@ -1,125 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_REVOCATION_HPP
#define ZT_REVOCATION_HPP
#include "Address.hpp"
#include "C25519.hpp"
#include "Constants.hpp"
#include "Credential.hpp"
#include "Identity.hpp"
#include "Utils.hpp"
/**
* Flag: fast propagation via rumor mill algorithm
*/
#define ZT_REVOCATION_FLAG_FAST_PROPAGATE 0x1ULL
#define ZT_REVOCATION_MARSHAL_SIZE_MAX (4 + 4 + 8 + 4 + 4 + 8 + 8 + 5 + 5 + 1 + 1 + 2 + ZT_SIGNATURE_BUFFER_SIZE + 2)
namespace ZeroTier {
class Context;
/**
* Revocation certificate to instantaneously revoke a COM, capability, or tag
*/
class RevocationCredential : public Credential {
friend class Credential;
public:
static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_REVOCATION; }
ZT_INLINE RevocationCredential() noexcept { memoryZero(this); }
/**
* @param i ID (arbitrary for revocations, currently random)
* @param nwid Network ID
* @param cid Credential ID being revoked (0 for all or for COMs, which lack IDs)
* @param thr Revocation time threshold before which credentials will be revoked
* @param fl Flags
* @param tgt Target node whose credential(s) are being revoked
* @param ct Credential type being revoked
*/
ZT_INLINE RevocationCredential(
const uint32_t i, const uint64_t nwid, const uint32_t cid, const uint64_t thr, const uint64_t fl, const Address &tgt,
const ZT_CredentialType ct) noexcept
: // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
m_id(i)
, m_credentialId(cid)
, m_networkId(nwid)
, m_threshold(thr)
, m_flags(fl)
, m_target(tgt)
, m_signedBy()
, m_type(ct)
, m_signatureLength(0)
{
}
ZT_INLINE uint32_t id() const noexcept { return m_id; }
ZT_INLINE uint32_t credentialId() const noexcept { return m_credentialId; }
ZT_INLINE uint64_t networkId() const noexcept { return m_networkId; }
ZT_INLINE int64_t threshold() const noexcept { return m_threshold; }
ZT_INLINE const Address &target() const noexcept { return m_target; }
ZT_INLINE const Address &signer() const noexcept { return m_signedBy; }
ZT_INLINE ZT_CredentialType typeBeingRevoked() const noexcept { return m_type; }
ZT_INLINE const uint8_t *signature() const noexcept { return m_signature; }
ZT_INLINE unsigned int signatureLength() const noexcept { return m_signatureLength; }
ZT_INLINE bool fastPropagate() const noexcept { return ((m_flags & ZT_REVOCATION_FLAG_FAST_PROPAGATE) != 0); }
/**
* @param signer Signing identity, must have private key
* @return True if signature was successful
*/
bool sign(const Identity &signer) noexcept;
/**
* Verify this revocation's signature
*
* @param RR Runtime environment to provide for peer lookup, etc.
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
*/
ZT_INLINE Credential::VerifyResult verify(const Context &ctx, const CallContext &cc) const noexcept { return s_verify(ctx, cc, *this); }
static constexpr int marshalSizeMax() noexcept { return ZT_REVOCATION_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX], bool forSign = false) const noexcept;
int unmarshal(const uint8_t *restrict data, int len) noexcept;
private:
uint32_t m_id;
uint32_t m_credentialId;
uint64_t m_networkId;
int64_t m_threshold;
uint64_t m_flags;
Address m_target;
Address m_signedBy;
ZT_CredentialType m_type;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
};
} // namespace ZeroTier
#endif

View file

@ -1,260 +0,0 @@
// This code is public domain, taken from a PD crypto source file on GitHub.
#include "SHA512.hpp"
#include "Utils.hpp"
namespace ZeroTier {
#ifndef ZT_HAVE_NATIVE_SHA512
namespace {
struct sha512_state {
uint64_t length, state[8];
unsigned long curlen;
uint8_t buf[128];
};
static const uint64_t K[80] = { 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL,
0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL, 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL, 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL, 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL, 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL,
0x113f9804bef90daeULL, 0x1b710b35131c471bULL, 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL };
#define STORE64H(x, y) Utils::storeBigEndian<uint64_t>(y, x)
#define LOAD64H(x, y) x = Utils::loadBigEndian<uint64_t>(y)
#define ROL64c(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
#define ROR64c(x, y) (((x) >> (y)) | ((x) << (64 - (y))))
#define Ch(x, y, z) (z ^ (x & (y ^ z)))
#define Maj(x, y, z) (((x | y) & z) | (x & y))
#define S(x, n) ROR64c(x, n)
#define R(x, n) ((x) >> (n))
#define Sigma0(x) (S(x, 28) ^ S(x, 34) ^ S(x, 39))
#define Sigma1(x) (S(x, 14) ^ S(x, 18) ^ S(x, 41))
#define Gamma0(x) (S(x, 1) ^ S(x, 8) ^ R(x, 7))
#define Gamma1(x) (S(x, 19) ^ S(x, 61) ^ R(x, 6))
static ZT_INLINE void sha512_compress(sha512_state *const md, uint8_t *const buf)
{
uint64_t S[8], W[80], t0, t1;
int i;
for (i = 0; i < 8; i++)
S[i] = md->state[i];
for (i = 0; i < 16; i++)
LOAD64H(W[i], buf + (8 * i));
for (i = 16; i < 80; i++)
W[i] = Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16];
#define RND(a, b, c, d, e, f, g, h, i) \
t0 = h + Sigma1(e) + Ch(e, f, g) + K[i] + W[i]; \
t1 = Sigma0(a) + Maj(a, b, c); \
d += t0; \
h = t0 + t1;
for (i = 0; i < 80; i += 8) {
RND(S[0], S[1], S[2], S[3], S[4], S[5], S[6], S[7], i + 0);
RND(S[7], S[0], S[1], S[2], S[3], S[4], S[5], S[6], i + 1);
RND(S[6], S[7], S[0], S[1], S[2], S[3], S[4], S[5], i + 2);
RND(S[5], S[6], S[7], S[0], S[1], S[2], S[3], S[4], i + 3);
RND(S[4], S[5], S[6], S[7], S[0], S[1], S[2], S[3], i + 4);
RND(S[3], S[4], S[5], S[6], S[7], S[0], S[1], S[2], i + 5);
RND(S[2], S[3], S[4], S[5], S[6], S[7], S[0], S[1], i + 6);
RND(S[1], S[2], S[3], S[4], S[5], S[6], S[7], S[0], i + 7);
}
for (i = 0; i < 8; i++)
md->state[i] = md->state[i] + S[i];
}
static ZT_INLINE void sha384_init(sha512_state *const md)
{
md->curlen = 0;
md->length = 0;
md->state[0] = 0xcbbb9d5dc1059ed8ULL;
md->state[1] = 0x629a292a367cd507ULL;
md->state[2] = 0x9159015a3070dd17ULL;
md->state[3] = 0x152fecd8f70e5939ULL;
md->state[4] = 0x67332667ffc00b31ULL;
md->state[5] = 0x8eb44a8768581511ULL;
md->state[6] = 0xdb0c2e0d64f98fa7ULL;
md->state[7] = 0x47b5481dbefa4fa4ULL;
}
static ZT_INLINE void sha512_init(sha512_state *const md)
{
md->curlen = 0;
md->length = 0;
md->state[0] = 0x6a09e667f3bcc908ULL;
md->state[1] = 0xbb67ae8584caa73bULL;
md->state[2] = 0x3c6ef372fe94f82bULL;
md->state[3] = 0xa54ff53a5f1d36f1ULL;
md->state[4] = 0x510e527fade682d1ULL;
md->state[5] = 0x9b05688c2b3e6c1fULL;
md->state[6] = 0x1f83d9abfb41bd6bULL;
md->state[7] = 0x5be0cd19137e2179ULL;
}
static void sha512_process(sha512_state *const md, const uint8_t *in, unsigned long inlen)
{
while (inlen > 0) {
if (md->curlen == 0 && inlen >= 128) {
sha512_compress(md, (uint8_t *)in);
md->length += 128 * 8;
in += 128;
inlen -= 128;
}
else {
unsigned long n = std::min(inlen, (128 - md->curlen));
Utils::copy(md->buf + md->curlen, in, n);
md->curlen += n;
in += n;
inlen -= n;
if (md->curlen == 128) {
sha512_compress(md, md->buf);
md->length += 8 * 128;
md->curlen = 0;
}
}
}
}
static ZT_INLINE void sha512_done(sha512_state *const md, uint8_t *out)
{
int i;
md->length += md->curlen * 8ULL;
md->buf[md->curlen++] = (uint8_t)0x80;
if (md->curlen > 112) {
while (md->curlen < 128) {
md->buf[md->curlen++] = (uint8_t)0;
}
sha512_compress(md, md->buf);
md->curlen = 0;
}
while (md->curlen < 120) {
md->buf[md->curlen++] = (uint8_t)0;
}
STORE64H(md->length, md->buf + 120);
sha512_compress(md, md->buf);
for (i = 0; i < 8; i++) {
STORE64H(md->state[i], out + (8 * i));
}
}
} // anonymous namespace
void SHA512(void *digest, const void *data, unsigned int len)
{
sha512_state state;
sha512_init(&state);
sha512_process(&state, (uint8_t *)data, (unsigned long)len);
sha512_done(&state, (uint8_t *)digest);
}
void SHA384(void *digest, const void *data, unsigned int len)
{
uint8_t tmp[64];
sha512_state state;
sha384_init(&state);
sha512_process(&state, (uint8_t *)data, (unsigned long)len);
sha512_done(&state, tmp);
Utils::copy<48>(digest, tmp);
}
void SHA384(void *digest, const void *data0, unsigned int len0, const void *data1, unsigned int len1)
{
uint8_t tmp[64];
sha512_state state;
sha384_init(&state);
sha512_process(&state, (uint8_t *)data0, (unsigned long)len0);
sha512_process(&state, (uint8_t *)data1, (unsigned long)len1);
sha512_done(&state, tmp);
Utils::copy<48>(digest, tmp);
}
#endif // !ZT_HAVE_NATIVE_SHA512
void HMACSHA384(const uint8_t key[ZT_SYMMETRIC_KEY_SIZE], const void *msg, const unsigned int msglen, uint8_t mac[48])
{
uint64_t kInPadded[16]; // input padded key
uint64_t outer[22]; // output padded key | H(input padded key | msg)
const uint64_t k0 = Utils::loadMachineEndian<uint64_t>(key);
const uint64_t k1 = Utils::loadMachineEndian<uint64_t>(key + 8);
const uint64_t k2 = Utils::loadMachineEndian<uint64_t>(key + 16);
const uint64_t k3 = Utils::loadMachineEndian<uint64_t>(key + 24);
const uint64_t k4 = Utils::loadMachineEndian<uint64_t>(key + 32);
const uint64_t k5 = Utils::loadMachineEndian<uint64_t>(key + 40);
const uint64_t ipad = 0x3636363636363636ULL;
kInPadded[0] = k0 ^ ipad;
kInPadded[1] = k1 ^ ipad;
kInPadded[2] = k2 ^ ipad;
kInPadded[3] = k3 ^ ipad;
kInPadded[4] = k4 ^ ipad;
kInPadded[5] = k5 ^ ipad;
kInPadded[6] = ipad;
kInPadded[7] = ipad;
kInPadded[8] = ipad;
kInPadded[9] = ipad;
kInPadded[10] = ipad;
kInPadded[11] = ipad;
kInPadded[12] = ipad;
kInPadded[13] = ipad;
kInPadded[14] = ipad;
kInPadded[15] = ipad;
const uint64_t opad = 0x5c5c5c5c5c5c5c5cULL;
outer[0] = k0 ^ opad;
outer[1] = k1 ^ opad;
outer[2] = k2 ^ opad;
outer[3] = k3 ^ opad;
outer[4] = k4 ^ opad;
outer[5] = k5 ^ opad;
outer[6] = opad;
outer[7] = opad;
outer[8] = opad;
outer[9] = opad;
outer[10] = opad;
outer[11] = opad;
outer[12] = opad;
outer[13] = opad;
outer[14] = opad;
outer[15] = opad;
// H(output padded key | H(input padded key | msg))
SHA384(reinterpret_cast<uint8_t *>(outer) + 128, kInPadded, 128, msg, msglen);
SHA384(mac, outer, 176);
}
void KBKDFHMACSHA384(const uint8_t key[ZT_SYMMETRIC_KEY_SIZE], const char label, const char context, const uint32_t iter, uint8_t out[ZT_SYMMETRIC_KEY_SIZE])
{
uint8_t kbkdfMsg[13];
Utils::storeBigEndian<uint32_t>(kbkdfMsg, (uint32_t)iter);
kbkdfMsg[4] = (uint8_t)'Z';
kbkdfMsg[5] = (uint8_t)'T'; // preface our labels with something ZT-specific
kbkdfMsg[6] = (uint8_t)label;
kbkdfMsg[7] = 0;
kbkdfMsg[8] = (uint8_t)context;
// Output key length: 384 bits (as 32-bit big-endian value)
kbkdfMsg[9] = 0;
kbkdfMsg[10] = 0;
kbkdfMsg[11] = 0x01;
kbkdfMsg[12] = 0x80;
static_assert(ZT_SYMMETRIC_KEY_SIZE == ZT_SHA384_DIGEST_SIZE, "sizeof(out) != ZT_SHA384_DIGEST_SIZE");
HMACSHA384(key, &kbkdfMsg, sizeof(kbkdfMsg), out);
}
} // namespace ZeroTier

View file

@ -1,88 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_SHA512_HPP
#define ZT_SHA512_HPP
#include "Constants.hpp"
#ifdef __APPLE__
#include <CommonCrypto/CommonDigest.h>
#endif
#define ZT_SHA512_DIGEST_SIZE 64
#define ZT_SHA384_DIGEST_SIZE 48
#define ZT_SHA512_BLOCK_SIZE 128
#define ZT_SHA384_BLOCK_SIZE 128
#define ZT_HMACSHA384_LEN 48
namespace ZeroTier {
// SHA384 and SHA512 are actually in the standard libraries on MacOS and iOS
#ifdef __APPLE__
#define ZT_HAVE_NATIVE_SHA512 1
static ZT_INLINE void SHA512(void *digest, const void *data, unsigned int len)
{
CC_SHA512_CTX ctx;
CC_SHA512_Init(&ctx);
CC_SHA512_Update(&ctx, data, len);
CC_SHA512_Final(reinterpret_cast<unsigned char *>(digest), &ctx);
}
static ZT_INLINE void SHA384(void *digest, const void *data, unsigned int len)
{
CC_SHA512_CTX ctx;
CC_SHA384_Init(&ctx);
CC_SHA384_Update(&ctx, data, len);
CC_SHA384_Final(reinterpret_cast<unsigned char *>(digest), &ctx);
}
static ZT_INLINE void SHA384(void *digest, const void *data0, unsigned int len0, const void *data1, unsigned int len1)
{
CC_SHA512_CTX ctx;
CC_SHA384_Init(&ctx);
CC_SHA384_Update(&ctx, data0, len0);
CC_SHA384_Update(&ctx, data1, len1);
CC_SHA384_Final(reinterpret_cast<unsigned char *>(digest), &ctx);
}
#endif
#ifndef ZT_HAVE_NATIVE_SHA512
void SHA512(void *digest, const void *data, unsigned int len);
void SHA384(void *digest, const void *data, unsigned int len);
void SHA384(void *digest, const void *data0, unsigned int len0, const void *data1, unsigned int len1);
#endif
/**
* Compute HMAC SHA-384 using a 256-bit key
*
* @param key Secret key
* @param msg Message to HMAC
* @param msglen Length of message
* @param mac Buffer to fill with result
*/
void HMACSHA384(const uint8_t key[ZT_SYMMETRIC_KEY_SIZE], const void *msg, unsigned int msglen, uint8_t mac[48]);
/**
* Compute KBKDF (key-based key derivation function) using HMAC-SHA-384 as a PRF
*
* @param key Source master key
* @param label A label indicating the key's purpose in the ZeroTier system
* @param context An arbitrary "context" or zero if not applicable
* @param iter Key iteration for generation of multiple keys for the same label/context
* @param out Output to receive derived key
*/
void KBKDFHMACSHA384(const uint8_t key[ZT_SYMMETRIC_KEY_SIZE], char label, char context, uint32_t iter, uint8_t out[ZT_SYMMETRIC_KEY_SIZE]);
} // namespace ZeroTier
#endif

View file

@ -1,334 +0,0 @@
/*
* Based on public domain code available at: http://cr.yp.to/snuffle.html
*
* Modifications and C-native SSE macro based SSE implementation by
* Adam Ierymenko <adam.ierymenko@zerotier.com>.
*
* Since the original was public domain, this is too.
*/
#include "Salsa20.hpp"
#include "Constants.hpp"
#define ROTATE(v, c) (((v) << (c)) | ((v) >> (32 - (c))))
#define XOR(v, w) ((v) ^ (w))
#define PLUS(v, w) ((uint32_t)((v) + (w)))
#ifndef ZT_SALSA20_SSE
#if __BYTE_ORDER == __LITTLE_ENDIAN
#ifdef ZT_NO_UNALIGNED_ACCESS
// Slower version that does not use type punning
#define U8TO32_LITTLE(p) (((uint32_t)(p)[0]) | ((uint32_t)(p)[1] << 8) | ((uint32_t)(p)[2] << 16) | ((uint32_t)(p)[3] << 24))
static ZT_INLINE void U32TO8_LITTLE(uint8_t *const c, const uint32_t v)
{
c[0] = (uint8_t)v;
c[1] = (uint8_t)(v >> 8);
c[2] = (uint8_t)(v >> 16);
c[3] = (uint8_t)(v >> 24);
}
#else
// Fast version that just does 32-bit load/store
#define U8TO32_LITTLE(p) (*((const uint32_t *)((const void *)(p))))
#define U32TO8_LITTLE(c, v) *((uint32_t *)((void *)(c))) = (v)
#endif // ZT_NO_UNALIGNED_ACCESS
#else // __BYTE_ORDER == __BIG_ENDIAN (we don't support anything else... does MIDDLE_ENDIAN even still exist?)
#ifdef __GNUC__
// Use GNUC builtin bswap macros on big-endian machines if available
#define U8TO32_LITTLE(p) __builtin_bswap32(*((const uint32_t *)((const void *)(p))))
#define U32TO8_LITTLE(c, v) *((uint32_t *)((void *)(c))) = __builtin_bswap32((v))
#else // no __GNUC__
// Otherwise do it the slow, manual way on BE machines
#define U8TO32_LITTLE(p) (((uint32_t)(p)[0]) | ((uint32_t)(p)[1] << 8) | ((uint32_t)(p)[2] << 16) | ((uint32_t)(p)[3] << 24))
static ZT_INLINE void U32TO8_LITTLE(uint8_t *const c, const uint32_t v)
{
c[0] = (uint8_t)v;
c[1] = (uint8_t)(v >> 8);
c[2] = (uint8_t)(v >> 16);
c[3] = (uint8_t)(v >> 24);
}
#endif // __GNUC__ or not
#endif // __BYTE_ORDER little or big?
#endif // !ZT_SALSA20_SSE
#ifdef ZT_SALSA20_SSE
class _s20sseconsts {
public:
_s20sseconsts() noexcept
{
maskLo32 = _mm_shuffle_epi32(_mm_cvtsi32_si128(-1), _MM_SHUFFLE(1, 0, 1, 0));
maskHi32 = _mm_slli_epi64(maskLo32, 32);
}
__m128i maskLo32, maskHi32;
};
static const _s20sseconsts s_S20SSECONSTANTS;
#endif
namespace ZeroTier {
void Salsa20::init(const void *key, const void *iv) noexcept
{
#ifdef ZT_SALSA20_SSE
const uint32_t *const k = (const uint32_t *)key;
_state.i[0] = 0x61707865;
_state.i[1] = 0x3320646e;
_state.i[2] = 0x79622d32;
_state.i[3] = 0x6b206574;
_state.i[4] = k[3];
_state.i[5] = 0;
_state.i[6] = k[7];
_state.i[7] = k[2];
_state.i[8] = 0;
_state.i[9] = k[6];
_state.i[10] = k[1];
_state.i[11] = ((const uint32_t *)iv)[1];
_state.i[12] = k[5];
_state.i[13] = k[0];
_state.i[14] = ((const uint32_t *)iv)[0];
_state.i[15] = k[4];
#else
const char *const constants = "expand 32-byte k";
const uint8_t *const k = (const uint8_t *)key;
_state.i[0] = U8TO32_LITTLE(constants + 0);
_state.i[1] = U8TO32_LITTLE(k + 0);
_state.i[2] = U8TO32_LITTLE(k + 4);
_state.i[3] = U8TO32_LITTLE(k + 8);
_state.i[4] = U8TO32_LITTLE(k + 12);
_state.i[5] = U8TO32_LITTLE(constants + 4);
_state.i[6] = U8TO32_LITTLE(((const uint8_t *)iv) + 0);
_state.i[7] = U8TO32_LITTLE(((const uint8_t *)iv) + 4);
_state.i[8] = 0;
_state.i[9] = 0;
_state.i[10] = U8TO32_LITTLE(constants + 8);
_state.i[11] = U8TO32_LITTLE(k + 16);
_state.i[12] = U8TO32_LITTLE(k + 20);
_state.i[13] = U8TO32_LITTLE(k + 24);
_state.i[14] = U8TO32_LITTLE(k + 28);
_state.i[15] = U8TO32_LITTLE(constants + 12);
#endif
}
union p_SalsaState {
#ifdef ZT_SALSA20_SSE
__m128i v[4];
#endif // ZT_SALSA20_SSE
uint32_t i[16];
};
template <unsigned int R> static ZT_INLINE void p_salsaCrypt(p_SalsaState *const state, const uint8_t *m, uint8_t *c, unsigned int bytes) noexcept
{
if (unlikely(bytes == 0))
return;
uint8_t tmp[64];
uint8_t *ctarget = c;
#ifdef ZT_SALSA20_SSE
__m128i X0 = state->v[0];
__m128i X1 = state->v[1];
__m128i X2 = state->v[2];
__m128i X3 = state->v[3];
const __m128i maskLo32 = s_S20SSECONSTANTS.maskLo32;
const __m128i maskHi32 = s_S20SSECONSTANTS.maskHi32;
const __m128i add1 = _mm_set_epi32(0, 0, 0, 1);
#else
uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
uint32_t j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
j0 = state->i[0];
j1 = state->i[1];
j2 = state->i[2];
j3 = state->i[3];
j4 = state->i[4];
j5 = state->i[5];
j6 = state->i[6];
j7 = state->i[7];
j8 = state->i[8];
j9 = state->i[9];
j10 = state->i[10];
j11 = state->i[11];
j12 = state->i[12];
j13 = state->i[13];
j14 = state->i[14];
j15 = state->i[15];
#endif
for (;;) {
if (unlikely(bytes < 64)) {
for (unsigned int i = 0; i < bytes; ++i)
tmp[i] = m[i];
m = tmp;
ctarget = c;
c = tmp;
}
#ifdef ZT_SALSA20_SSE
__m128i X0s = X0;
__m128i X1s = X1;
__m128i X2s = X2;
__m128i X3s = X3;
__m128i T;
for (unsigned int rr = 0; rr < (R / 2); ++rr) {
T = _mm_add_epi32(X0, X3);
X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
T = _mm_add_epi32(X1, X0);
X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
T = _mm_add_epi32(X2, X1);
X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
T = _mm_add_epi32(X3, X2);
X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
X1 = _mm_shuffle_epi32(X1, 0x93);
X2 = _mm_shuffle_epi32(X2, 0x4E);
X3 = _mm_shuffle_epi32(X3, 0x39);
T = _mm_add_epi32(X0, X1);
X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
T = _mm_add_epi32(X3, X0);
X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
T = _mm_add_epi32(X2, X3);
X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
T = _mm_add_epi32(X1, X2);
X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
X1 = _mm_shuffle_epi32(X1, 0x39);
X2 = _mm_shuffle_epi32(X2, 0x4E);
X3 = _mm_shuffle_epi32(X3, 0x93);
}
X0 = _mm_add_epi32(X0s, X0);
X1 = _mm_add_epi32(X1s, X1);
X2 = _mm_add_epi32(X2s, X2);
X3 = _mm_add_epi32(X3s, X3);
__m128i k02 = _mm_or_si128(_mm_slli_epi64(X0, 32), _mm_srli_epi64(X3, 32));
__m128i k20 = _mm_or_si128(_mm_and_si128(X2, maskLo32), _mm_and_si128(X1, maskHi32));
__m128i k13 = _mm_or_si128(_mm_slli_epi64(X1, 32), _mm_srli_epi64(X0, 32));
__m128i k31 = _mm_or_si128(_mm_and_si128(X3, maskLo32), _mm_and_si128(X2, maskHi32));
k02 = _mm_shuffle_epi32(k02, _MM_SHUFFLE(0, 1, 2, 3));
k13 = _mm_shuffle_epi32(k13, _MM_SHUFFLE(0, 1, 2, 3));
_mm_storeu_si128(reinterpret_cast<__m128i *>(c), _mm_xor_si128(_mm_unpackhi_epi64(k02, k20), _mm_loadu_si128(reinterpret_cast<const __m128i *>(m))));
_mm_storeu_si128(reinterpret_cast<__m128i *>(c) + 1, _mm_xor_si128(_mm_unpackhi_epi64(k13, k31), _mm_loadu_si128(reinterpret_cast<const __m128i *>(m) + 1)));
_mm_storeu_si128(reinterpret_cast<__m128i *>(c) + 2, _mm_xor_si128(_mm_unpacklo_epi64(k20, k02), _mm_loadu_si128(reinterpret_cast<const __m128i *>(m) + 2)));
_mm_storeu_si128(reinterpret_cast<__m128i *>(c) + 3, _mm_xor_si128(_mm_unpacklo_epi64(k31, k13), _mm_loadu_si128(reinterpret_cast<const __m128i *>(m) + 3)));
X0 = X0s;
X1 = X1s;
X2 = _mm_add_epi32(X2s, add1);
X3 = X3s;
#else
x0 = j0;
x1 = j1;
x2 = j2;
x3 = j3;
x4 = j4;
x5 = j5;
x6 = j6;
x7 = j7;
x8 = j8;
x9 = j9;
x10 = j10;
x11 = j11;
x12 = j12;
x13 = j13;
x14 = j14;
x15 = j15;
for (unsigned int rr = 0; rr < (R / 2); ++rr) {
x4 = XOR(x4, ROTATE(PLUS(x0, x12), 7));
x8 = XOR(x8, ROTATE(PLUS(x4, x0), 9));
x12 = XOR(x12, ROTATE(PLUS(x8, x4), 13));
x0 = XOR(x0, ROTATE(PLUS(x12, x8), 18));
x9 = XOR(x9, ROTATE(PLUS(x5, x1), 7));
x13 = XOR(x13, ROTATE(PLUS(x9, x5), 9));
x1 = XOR(x1, ROTATE(PLUS(x13, x9), 13));
x5 = XOR(x5, ROTATE(PLUS(x1, x13), 18));
x14 = XOR(x14, ROTATE(PLUS(x10, x6), 7));
x2 = XOR(x2, ROTATE(PLUS(x14, x10), 9));
x6 = XOR(x6, ROTATE(PLUS(x2, x14), 13));
x10 = XOR(x10, ROTATE(PLUS(x6, x2), 18));
x3 = XOR(x3, ROTATE(PLUS(x15, x11), 7));
x7 = XOR(x7, ROTATE(PLUS(x3, x15), 9));
x11 = XOR(x11, ROTATE(PLUS(x7, x3), 13));
x15 = XOR(x15, ROTATE(PLUS(x11, x7), 18));
x1 = XOR(x1, ROTATE(PLUS(x0, x3), 7));
x2 = XOR(x2, ROTATE(PLUS(x1, x0), 9));
x3 = XOR(x3, ROTATE(PLUS(x2, x1), 13));
x0 = XOR(x0, ROTATE(PLUS(x3, x2), 18));
x6 = XOR(x6, ROTATE(PLUS(x5, x4), 7));
x7 = XOR(x7, ROTATE(PLUS(x6, x5), 9));
x4 = XOR(x4, ROTATE(PLUS(x7, x6), 13));
x5 = XOR(x5, ROTATE(PLUS(x4, x7), 18));
x11 = XOR(x11, ROTATE(PLUS(x10, x9), 7));
x8 = XOR(x8, ROTATE(PLUS(x11, x10), 9));
x9 = XOR(x9, ROTATE(PLUS(x8, x11), 13));
x10 = XOR(x10, ROTATE(PLUS(x9, x8), 18));
x12 = XOR(x12, ROTATE(PLUS(x15, x14), 7));
x13 = XOR(x13, ROTATE(PLUS(x12, x15), 9));
x14 = XOR(x14, ROTATE(PLUS(x13, x12), 13));
x15 = XOR(x15, ROTATE(PLUS(x14, x13), 18));
}
x0 = PLUS(x0, j0);
x1 = PLUS(x1, j1);
x2 = PLUS(x2, j2);
x3 = PLUS(x3, j3);
x4 = PLUS(x4, j4);
x5 = PLUS(x5, j5);
x6 = PLUS(x6, j6);
x7 = PLUS(x7, j7);
x8 = PLUS(x8, j8);
x9 = PLUS(x9, j9);
x10 = PLUS(x10, j10);
x11 = PLUS(x11, j11);
x12 = PLUS(x12, j12);
x13 = PLUS(x13, j13);
x14 = PLUS(x14, j14);
x15 = PLUS(x15, j15);
U32TO8_LITTLE(c + 0, XOR(x0, U8TO32_LITTLE(m + 0)));
U32TO8_LITTLE(c + 4, XOR(x1, U8TO32_LITTLE(m + 4)));
U32TO8_LITTLE(c + 8, XOR(x2, U8TO32_LITTLE(m + 8)));
U32TO8_LITTLE(c + 12, XOR(x3, U8TO32_LITTLE(m + 12)));
U32TO8_LITTLE(c + 16, XOR(x4, U8TO32_LITTLE(m + 16)));
U32TO8_LITTLE(c + 20, XOR(x5, U8TO32_LITTLE(m + 20)));
U32TO8_LITTLE(c + 24, XOR(x6, U8TO32_LITTLE(m + 24)));
U32TO8_LITTLE(c + 28, XOR(x7, U8TO32_LITTLE(m + 28)));
U32TO8_LITTLE(c + 32, XOR(x8, U8TO32_LITTLE(m + 32)));
U32TO8_LITTLE(c + 36, XOR(x9, U8TO32_LITTLE(m + 36)));
U32TO8_LITTLE(c + 40, XOR(x10, U8TO32_LITTLE(m + 40)));
U32TO8_LITTLE(c + 44, XOR(x11, U8TO32_LITTLE(m + 44)));
U32TO8_LITTLE(c + 48, XOR(x12, U8TO32_LITTLE(m + 48)));
U32TO8_LITTLE(c + 52, XOR(x13, U8TO32_LITTLE(m + 52)));
U32TO8_LITTLE(c + 56, XOR(x14, U8TO32_LITTLE(m + 56)));
U32TO8_LITTLE(c + 60, XOR(x15, U8TO32_LITTLE(m + 60)));
++j8;
#endif
if (likely(bytes > 64)) {
bytes -= 64;
c += 64;
m += 64;
}
else {
if (bytes < 64) {
for (unsigned int i = 0; i < bytes; ++i)
ctarget[i] = c[i];
}
#ifdef ZT_SALSA20_SSE
state->v[2] = X2;
#else
state->i[8] = j8;
#endif
return;
}
}
}
void Salsa20::crypt12(const void *in, void *out, unsigned int bytes) noexcept { p_salsaCrypt<12>(reinterpret_cast<p_SalsaState *>(&_state), reinterpret_cast<const uint8_t *>(in), reinterpret_cast<uint8_t *>(out), bytes); }
void Salsa20::crypt20(const void *in, void *out, unsigned int bytes) noexcept { p_salsaCrypt<20>(reinterpret_cast<p_SalsaState *>(&_state), reinterpret_cast<const uint8_t *>(in), reinterpret_cast<uint8_t *>(out), bytes); }
} // namespace ZeroTier

View file

@ -1,95 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_SALSA20_HPP
#define ZT_SALSA20_HPP
#include "Constants.hpp"
#include "TriviallyCopyable.hpp"
#include "Utils.hpp"
#ifdef ZT_ARCH_X64
#define ZT_SALSA20_SSE 1
#endif
#define ZT_SALSA20_KEY_SIZE 32
namespace ZeroTier {
/**
* Salsa20 stream cipher
*
* This supports both the 12-round and 20-round variants.
*
* SECURITY: this code only support up to 2^32 bytes per key. This is
* a minor optimization done here because ZeroTier messages are
* nowhere near this large.
*/
class Salsa20 : public TriviallyCopyable {
public:
#ifdef ZT_SALSA20_SSE
static constexpr bool accelerated() noexcept { return true; }
#else
static constexpr bool accelerated() noexcept { return false; }
#endif
ZT_INLINE Salsa20() noexcept {}
ZT_INLINE ~Salsa20() noexcept { Utils::burn(&_state, sizeof(_state)); }
/**
* @param key 256-bit (32 byte) key
* @param iv 64-bit initialization vector
*/
ZT_INLINE Salsa20(const void *key, const void *iv) noexcept { init(key, iv); }
/**
* Initialize cipher
*
* @param key Key bits
* @param iv 64-bit initialization vector
*/
void init(const void *key, const void *iv) noexcept;
/**
* Encrypt/decrypt data using Salsa20/12
*
* @param in Input data
* @param out Output buffer
* @param bytes Length of data
*/
void crypt12(const void *in, void *out, unsigned int bytes) noexcept;
/**
* Encrypt/decrypt data using Salsa20/20
*
* @param in Input data
* @param out Output buffer
* @param bytes Length of data
*/
void crypt20(const void *in, void *out, unsigned int bytes) noexcept;
private:
union {
#ifdef ZT_SALSA20_SSE
__m128i v[4];
#endif // ZT_SALSA20_SSE
uint32_t i[16];
} _state;
};
} // namespace ZeroTier
#endif

View file

@ -1,72 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_SCOPEDPTR_HPP
#define ZT_SCOPEDPTR_HPP
#include "Constants.hpp"
#include "TriviallyCopyable.hpp"
namespace ZeroTier {
/**
* Simple scoped pointer
*
* This is used in the core to avoid requiring C++11 and because auto_ptr is weird.
*/
template <typename T> class ScopedPtr : public TriviallyCopyable {
public:
explicit ZT_INLINE ScopedPtr(T *const p) noexcept : m_ptr(p) {}
ZT_INLINE ~ScopedPtr() { delete m_ptr; }
ZT_INLINE T *operator->() const noexcept { return m_ptr; }
ZT_INLINE T &operator*() const noexcept { return *m_ptr; }
ZT_INLINE T *ptr() const noexcept { return m_ptr; }
ZT_INLINE void swap(const ScopedPtr &p) noexcept
{
T *const tmp = m_ptr;
m_ptr = p.m_ptr;
p.m_ptr = tmp;
}
explicit ZT_INLINE operator bool() const noexcept { return (m_ptr != (T *)0); }
ZT_INLINE bool operator==(const ScopedPtr &p) const noexcept { return (m_ptr == p.m_ptr); }
ZT_INLINE bool operator!=(const ScopedPtr &p) const noexcept { return (m_ptr != p.m_ptr); }
ZT_INLINE bool operator==(T *const p) const noexcept { return (m_ptr == p); }
ZT_INLINE bool operator!=(T *const p) const noexcept { return (m_ptr != p); }
private:
ZT_INLINE ScopedPtr() noexcept {}
ZT_INLINE ScopedPtr(const ScopedPtr &p) noexcept : m_ptr(nullptr) {}
ZT_INLINE ScopedPtr &operator=(const ScopedPtr &p) noexcept { return *this; }
T *const m_ptr;
};
} // namespace ZeroTier
namespace std {
template <typename T> ZT_INLINE void swap(ZeroTier::ScopedPtr<T> &a, ZeroTier::ScopedPtr<T> &b) noexcept { a.swap(b); }
} // namespace std
#endif

View file

@ -1,104 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "SelfAwareness.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Context.hpp"
#include "Peer.hpp"
#include "Topology.hpp"
#include "Trace.hpp"
// Entry timeout -- make it fairly long since this is just to prevent stale buildup
#define ZT_SELFAWARENESS_ENTRY_TIMEOUT 300000
namespace ZeroTier {
SelfAwareness::SelfAwareness(const Context &ctx) : m_ctx(ctx) {}
void SelfAwareness::iam(const CallContext &cc, const Identity &reporter, const int64_t receivedOnLocalSocket, const InetAddress &reporterPhysicalAddress, const InetAddress &myPhysicalAddress, bool trusted)
{
const InetAddress::IpScope scope = myPhysicalAddress.ipScope();
if ((scope != reporterPhysicalAddress.ipScope()) || (scope == ZT_IP_SCOPE_NONE) || (scope == ZT_IP_SCOPE_LOOPBACK) || (scope == ZT_IP_SCOPE_MULTICAST))
return;
Mutex::Lock l(m_phy_l);
p_PhySurfaceEntry &entry = m_phy[p_PhySurfaceKey(reporter.address(), receivedOnLocalSocket, reporterPhysicalAddress, scope)];
if ((trusted) && ((cc.ticks - entry.timestampTicks) < ZT_SELFAWARENESS_ENTRY_TIMEOUT) && (!entry.mySurface.ipsEqual(myPhysicalAddress))) {
// Changes to external surface reported by trusted peers causes path reset in this scope
entry.mySurface = myPhysicalAddress;
entry.timestampTicks = cc.ticks;
entry.trusted = trusted;
// Erase all entries in this scope that were not reported from this remote address to prevent 'thrashing'
// due to multiple reports of endpoint change.
// Don't use 'entry' after this since hash table gets modified.
for (Map<p_PhySurfaceKey, p_PhySurfaceEntry>::iterator i(m_phy.begin()); i != m_phy.end();) {
if ((i->first.scope == scope) && (i->first.reporterPhysicalAddress != reporterPhysicalAddress))
m_phy.erase(i++);
else
++i;
}
// Reset all paths within this scope and address family
Vector<SharedPtr<Peer>> peers, rootPeers;
m_ctx.topology->allPeers(peers, rootPeers);
for (Vector<SharedPtr<Peer>>::const_iterator p(peers.begin()); p != peers.end(); ++p)
(*p)->resetWithinScope(m_ctx, cc, (InetAddress::IpScope)scope, myPhysicalAddress.as.sa.sa_family);
m_ctx.t->resettingPathsInScope(cc, 0x9afff100, reporter, reporterPhysicalAddress, entry.mySurface, myPhysicalAddress, scope);
}
else {
// Otherwise just update DB to use to determine external surface info
entry.mySurface = myPhysicalAddress;
entry.timestampTicks = cc.ticks;
entry.trusted = trusted;
}
}
void SelfAwareness::clean(const CallContext &cc)
{
Mutex::Lock l(m_phy_l);
for (Map<p_PhySurfaceKey, p_PhySurfaceEntry>::iterator i(m_phy.begin()); i != m_phy.end();) {
if ((cc.ticks - i->second.timestampTicks) >= ZT_SELFAWARENESS_ENTRY_TIMEOUT)
m_phy.erase(i++);
else
++i;
}
}
MultiMap<unsigned int, InetAddress> SelfAwareness::externalAddresses(CallContext &cc) const
{
MultiMap<unsigned int, InetAddress> r;
// Count endpoints reporting each IP/port combo
Map<InetAddress, unsigned long> counts;
{
Mutex::Lock l(m_phy_l);
for (Map<p_PhySurfaceKey, p_PhySurfaceEntry>::const_iterator i(m_phy.begin()); i != m_phy.end(); ++i) {
if ((cc.ticks - i->second.timestampTicks) < ZT_SELFAWARENESS_ENTRY_TIMEOUT)
++counts[i->second.mySurface];
}
}
// Invert to create a map from count to address
for (Map<InetAddress, unsigned long>::iterator i(counts.begin()); i != counts.end(); ++i)
r.insert(std::pair<unsigned long, InetAddress>(i->second, i->first));
return r;
}
} // namespace ZeroTier

View file

@ -1,118 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_SELFAWARENESS_HPP
#define ZT_SELFAWARENESS_HPP
#include "Address.hpp"
#include "CallContext.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "InetAddress.hpp"
#include "Mutex.hpp"
namespace ZeroTier {
class Identity;
class Context;
/**
* SelfAwareness manages awareness of this peer's external address(es) and NAT situation.
*
* Name aside, it shouldn't be capable of achieving sentience.
*/
class SelfAwareness {
public:
explicit SelfAwareness(const Context &ctx);
/**
* Called when a remote peer informs us of our external network address
*
* @param reporter Identity of reporting peer
* @param receivedOnLocalAddress Local address on which report was received
* @param reporterPhysicalAddress Physical address that reporting peer seems to have
* @param myPhysicalAddress Physical address that peer says we have
* @param trusted True if this peer is trusted as an authority to inform us of external address changes
*/
void iam(const CallContext &cc, const Identity &reporter, int64_t receivedOnLocalSocket, const InetAddress &reporterPhysicalAddress, const InetAddress &myPhysicalAddress, bool trusted);
/**
* Clean up database periodically
*/
void clean(const CallContext &cc);
/**
* Get external address consensus, which is the statistical "mode" of external addresses.
*
* @return Map of count to IP/port representing how many endpoints reported each address
*/
MultiMap<unsigned int, InetAddress> externalAddresses(CallContext &cc) const;
private:
struct p_PhySurfaceKey {
Address reporter;
int64_t receivedOnLocalSocket;
InetAddress reporterPhysicalAddress;
InetAddress::IpScope scope;
ZT_INLINE p_PhySurfaceKey() noexcept {}
ZT_INLINE
p_PhySurfaceKey(const Address &r, const int64_t rol, const InetAddress &ra, InetAddress::IpScope s) noexcept : reporter(r), receivedOnLocalSocket(rol), reporterPhysicalAddress(ra), scope(s) {}
ZT_INLINE unsigned long hashCode() const noexcept { return ((unsigned long)reporter.toInt() + (unsigned long)receivedOnLocalSocket + (unsigned long)scope); }
ZT_INLINE bool operator==(const p_PhySurfaceKey &k) const noexcept { return ((reporter == k.reporter) && (receivedOnLocalSocket == k.receivedOnLocalSocket) && (reporterPhysicalAddress == k.reporterPhysicalAddress) && (scope == k.scope)); }
ZT_INLINE bool operator!=(const p_PhySurfaceKey &k) const noexcept { return (!(*this == k)); }
ZT_INLINE bool operator<(const p_PhySurfaceKey &k) const noexcept
{
if (reporter < k.reporter) {
return true;
}
else if (reporter == k.reporter) {
if (receivedOnLocalSocket < k.receivedOnLocalSocket) {
return true;
}
else if (receivedOnLocalSocket == k.receivedOnLocalSocket) {
if (reporterPhysicalAddress < k.reporterPhysicalAddress) {
return true;
}
else if (reporterPhysicalAddress == k.reporterPhysicalAddress) {
return scope < k.scope;
}
}
}
return false;
}
};
struct p_PhySurfaceEntry {
InetAddress mySurface;
int64_t timestampTicks;
bool trusted;
ZT_INLINE p_PhySurfaceEntry() noexcept : mySurface(), timestampTicks(0), trusted(false) {}
ZT_INLINE p_PhySurfaceEntry(const InetAddress &a, const int64_t t) noexcept : mySurface(a), timestampTicks(t), trusted(false) {}
};
const Context &m_ctx;
Map<p_PhySurfaceKey, p_PhySurfaceEntry> m_phy;
Mutex m_phy_l;
};
} // namespace ZeroTier
#endif

View file

@ -1,182 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_SHAREDPTR_HPP
#define ZT_SHAREDPTR_HPP
#include "Constants.hpp"
#include "TriviallyCopyable.hpp"
namespace ZeroTier {
/**
* An intrusive reference counted pointer.
*
* Classes must have an atomic<int> field called __refCount and set this class
* as a friend to be used with this.
*/
template <typename T> class SharedPtr : public TriviallyCopyable {
public:
ZT_INLINE SharedPtr() noexcept : m_ptr(nullptr) {}
explicit ZT_INLINE SharedPtr(T *obj) noexcept : m_ptr(obj)
{
if (likely(obj != nullptr))
const_cast<std::atomic<int> *>(&(obj->__refCount))->fetch_add(1, std::memory_order_acquire);
}
ZT_INLINE SharedPtr(const SharedPtr &sp) noexcept : m_ptr(sp.m_acquire()) {}
ZT_INLINE ~SharedPtr() { m_release(); }
ZT_INLINE SharedPtr &operator=(const SharedPtr &sp)
{
if (likely(m_ptr != sp.m_ptr)) {
T *const p = sp.m_acquire();
m_release();
m_ptr = p;
}
return *this;
}
ZT_INLINE void set(T *ptr) noexcept
{
m_release();
m_ptr = ptr;
const_cast<std::atomic<int> *>(&(ptr->__refCount))->fetch_add(1, std::memory_order_acquire);
}
/**
* Swap with another pointer.
*
* This is much faster than using assignment as it requires no atomic
* operations at all.
*
* @param with Pointer to swap with
*/
ZT_INLINE void swap(SharedPtr &with) noexcept
{
T *const tmp = m_ptr;
m_ptr = with.m_ptr;
with.m_ptr = tmp;
}
/**
* Move pointer from another SharedPtr to this one, zeroing target.
*
* This is faster than assignment as it saves one atomically synchronized
* increment. If this pointer is null there are no atomic operations at
* all.
*
* @param from Source pointer; will be changed to NULL
*/
ZT_INLINE void move(SharedPtr &from)
{
m_release();
m_ptr = from.m_ptr;
from.m_ptr = nullptr;
}
ZT_INLINE operator bool() const noexcept { return (m_ptr != nullptr); }
ZT_INLINE T &operator*() const noexcept { return *m_ptr; }
ZT_INLINE T *operator->() const noexcept { return m_ptr; }
/**
* @return Raw pointer to held object
*/
ZT_INLINE T *ptr() const noexcept { return m_ptr; }
/**
* Set this pointer to NULL
*/
ZT_INLINE void zero()
{
m_release();
m_ptr = nullptr;
}
/**
* Return held object and null this pointer if reference count is one.
*
* If the reference count is one, the reference count is changed to zero
* and the object is returned. It is not deleted; the caller must do that
* if that is desired. This pointer will be set to NULL. If the reference
* count is not one nothing happens and NULL is returned.
*
* @return Pointer or NULL if more than one reference
*/
ZT_INLINE T *weakGC()
{
if (likely(m_ptr != nullptr)) {
int one = 1;
if (const_cast<std::atomic<int> *>(&(m_ptr->__refCount))->compare_exchange_strong(one, (int)0)) {
T *const ptr = m_ptr;
m_ptr = nullptr;
return ptr;
}
else {
return nullptr;
}
}
else {
return nullptr;
}
}
ZT_INLINE unsigned long hashCode() const noexcept { return (unsigned long)((uintptr_t)m_ptr + (uintptr_t)Utils::hash32((uint32_t)m_ptr)); }
ZT_INLINE bool operator==(const SharedPtr &sp) const noexcept { return (m_ptr == sp.m_ptr); }
ZT_INLINE bool operator!=(const SharedPtr &sp) const noexcept { return (m_ptr != sp.m_ptr); }
ZT_INLINE bool operator>(const SharedPtr &sp) const noexcept { return (reinterpret_cast<const uint8_t *>(m_ptr) > reinterpret_cast<const uint8_t *>(sp.m_ptr)); }
ZT_INLINE bool operator<(const SharedPtr &sp) const noexcept { return (reinterpret_cast<const uint8_t *>(m_ptr) < reinterpret_cast<const uint8_t *>(sp.m_ptr)); }
ZT_INLINE bool operator>=(const SharedPtr &sp) const noexcept { return (reinterpret_cast<const uint8_t *>(m_ptr) >= reinterpret_cast<const uint8_t *>(sp.m_ptr)); }
ZT_INLINE bool operator<=(const SharedPtr &sp) const noexcept { return (reinterpret_cast<const uint8_t *>(m_ptr) <= reinterpret_cast<const uint8_t *>(sp.m_ptr)); }
private:
ZT_INLINE T *m_acquire() const noexcept
{
if (likely(m_ptr != nullptr))
const_cast<std::atomic<int> *>(&(m_ptr->__refCount))->fetch_add(1, std::memory_order_acquire);
return m_ptr;
}
ZT_INLINE void m_release() const noexcept
{
if (likely(m_ptr != nullptr)) {
if (unlikely(const_cast<std::atomic<int> *>(&(m_ptr->__refCount))->fetch_sub(1, std::memory_order_release) <= 1))
delete m_ptr;
}
}
T *m_ptr;
};
} // namespace ZeroTier
// Augment std::swap to speed up some operations with SharedPtr.
namespace std {
template <typename T> ZT_MAYBE_UNUSED ZT_INLINE void swap(ZeroTier::SharedPtr<T> &a, ZeroTier::SharedPtr<T> &b) noexcept { a.swap(b); }
template <typename T> ZT_MAYBE_UNUSED ZT_INLINE void move(ZeroTier::SharedPtr<T> &a, ZeroTier::SharedPtr<T> &b) noexcept { a.move(b); }
} // namespace std
#endif

View file

@ -1,72 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_SPINLOCK_HPP
#define ZT_SPINLOCK_HPP
#include "Constants.hpp"
#include "Mutex.hpp"
#ifdef __LINUX__
#include <sched.h>
#else
#include <thread>
#endif
/**
* Simple spinlock
*
* This can be used in place of Mutex to lock things that are extremely fast
* to access. It should be used very sparingly.
*/
class Spinlock {
public:
/**
* Pause current thread using whatever methods might be available
*
* This is broken out since it's used in a few other places where
* spinlock-like constructions are used.
*/
ZT_INLINE static void pause() noexcept
{
#ifdef ZT_ARCH_X64
_mm_pause();
#endif
#ifdef __LINUX__
sched_yield();
#else
std::this_thread::yield();
#endif
}
ZT_INLINE Spinlock() noexcept : m_locked(false) {}
ZT_INLINE void lock() noexcept
{
if (unlikely(m_locked.test_and_set(std::memory_order_acquire))) {
do {
Spinlock::pause();
} while (m_locked.test_and_set(std::memory_order_acquire));
}
}
ZT_INLINE void unlock() noexcept { m_locked.clear(std::memory_order_release); }
private:
ZT_INLINE Spinlock(const Spinlock &) noexcept {}
ZT_INLINE const Spinlock &operator=(const Spinlock &) noexcept { return *this; }
std::atomic_flag m_locked;
};
#endif

View file

@ -1,78 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_STORE_HPP
#define ZT_STORE_HPP
#include "CallContext.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Context.hpp"
namespace ZeroTier {
/**
* Wrapper around API callbacks for data store
*/
class Store {
public:
ZT_INLINE Store(const Context &ctx) : m_ctx(ctx) {}
/**
* Get a state object
*
* @param type Object type
* @param id Object ID
* @param idSize Size of object ID in qwords
* @return Data or empty vector if not found
*/
ZT_INLINE Vector<uint8_t> get(const CallContext &cc, ZT_StateObjectType type, const uint64_t *const id, unsigned int idSize) const
{
Vector<uint8_t> dv;
void *data = nullptr;
void (*freeFunc)(void *) = nullptr;
const int r = m_ctx.cb.stateGetFunction(reinterpret_cast<ZT_Node *>(m_ctx.node), m_ctx.uPtr, cc.tPtr, type, id, idSize, &data, &freeFunc);
if (r > 0)
dv.assign(reinterpret_cast<const uint8_t *>(data), reinterpret_cast<const uint8_t *>(data) + r);
if ((data) && (freeFunc))
freeFunc(data);
return dv;
}
/**
* Store a state object
*
* @param type Object type
* @param id Object ID
* @param idSize Size of object ID in qwords
* @param data Data to store
* @param len Length of data
*/
ZT_INLINE void put(const CallContext &cc, ZT_StateObjectType type, const uint64_t *const id, const unsigned int idSize, const void *const data, const unsigned int len) noexcept { m_ctx.cb.statePutFunction(reinterpret_cast<ZT_Node *>(this), m_ctx.uPtr, cc.tPtr, type, id, idSize, data, (int)len); }
/**
* Erase a state object from the object store
*
* @param type Object type
* @param id Object ID
* @param idSize Size of object ID in qwords
*/
ZT_INLINE void erase(const CallContext &cc, ZT_StateObjectType type, const uint64_t *const id, const unsigned int idSize) noexcept { m_ctx.cb.statePutFunction(reinterpret_cast<ZT_Node *>(this), m_ctx.uPtr, cc.tPtr, type, id, idSize, nullptr, -1); }
private:
const Context &m_ctx;
};
} // namespace ZeroTier
#endif

View file

@ -1,118 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_SYMMETRICKEY_HPP
#define ZT_SYMMETRICKEY_HPP
#include "AES.hpp"
#include "Address.hpp"
#include "Constants.hpp"
#include "Utils.hpp"
namespace ZeroTier {
/**
* Container for symmetric keys and ciphers initialized with them.
*/
class SymmetricKey {
public:
/**
* Construct an uninitialized key (init() must be called)
*/
ZT_INLINE SymmetricKey() : m_secret(), m_ts(-1), m_initialNonce(0), m_cipher(), m_nonce(0) {}
/**
* Construct a new symmetric key
*
* SECURITY: the MSB of the nonce is always 0 because this bit is set to 0
* or 1 depending on which "direction" data is moving. See nextMessage().
*
* @param ts Key timestamp
* @param key Key (must be 48 bytes / 384 bits)
*/
ZT_INLINE SymmetricKey(const int64_t ts, const void *const key) noexcept : m_secret(key), m_ts(ts), m_initialNonce(Utils::getSecureRandomU64() >> 1U), m_cipher(key), m_nonce(m_initialNonce) {}
ZT_INLINE SymmetricKey(const SymmetricKey &k) noexcept : m_secret(k.m_secret), m_ts(k.m_ts), m_initialNonce(k.m_initialNonce), m_cipher(k.m_secret.data), m_nonce(k.m_nonce.load(std::memory_order_relaxed)) {}
ZT_INLINE ~SymmetricKey() noexcept { Utils::burn(m_secret.data, ZT_SYMMETRIC_KEY_SIZE); }
ZT_INLINE SymmetricKey &operator=(const SymmetricKey &k) noexcept
{
m_secret = k.m_secret;
m_ts = k.m_ts;
m_initialNonce = k.m_initialNonce;
m_cipher.init(k.m_secret.data);
m_nonce.store(k.m_nonce.load(std::memory_order_relaxed), std::memory_order_relaxed);
return *this;
}
/**
* Initialize or re-initialize a symmetric key
*
* @param ts Key timestamp
* @param key Key (must be 48 bytes / 384 bits)
*/
ZT_INLINE void init(const int64_t ts, const void *const key) noexcept
{
Utils::copy<ZT_SYMMETRIC_KEY_SIZE>(m_secret.data, key);
m_ts = ts;
m_initialNonce = Utils::getSecureRandomU64() >> 1U;
m_cipher.init(key);
m_nonce.store(m_initialNonce, std::memory_order_relaxed);
}
/**
* Advance usage counter by one and return the next IV / packet ID.
*
* @param sender Sending ZeroTier address
* @param receiver Receiving ZeroTier address
* @return Next unique IV for next message
*/
ZT_INLINE uint64_t nextMessage(const Address sender, const Address receiver) noexcept { return m_nonce.fetch_add(1, std::memory_order_relaxed) ^ (((uint64_t)(sender > receiver)) << 63U); }
/**
* Get the number of times this key has been used.
*
* This is used along with the key's initial timestamp to determine key age
* for ephemeral key rotation.
*
* @return Number of times nextMessage() has been called since object creation
*/
ZT_INLINE uint64_t odometer() const noexcept { return m_nonce.load(std::memory_order_relaxed) - m_initialNonce; }
/**
* @return Key creation timestamp or -1 if this is a long-lived key
*/
ZT_INLINE int64_t timestamp() const noexcept { return m_ts; }
/**
* @return 48-byte / 384-bit secret key
*/
ZT_INLINE const uint8_t *key() const noexcept { return m_secret.data; }
/**
* @return AES cipher (already initialized with secret key)
*/
ZT_INLINE const AES &aes() const noexcept { return m_cipher; }
private:
Blob<ZT_SYMMETRIC_KEY_SIZE> m_secret;
int64_t m_ts;
uint64_t m_initialNonce;
AES m_cipher;
std::atomic<uint64_t> m_nonce;
};
} // namespace ZeroTier
#endif

View file

@ -1,85 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "TagCredential.hpp"
namespace ZeroTier {
bool TagCredential::sign(const Identity &signer) noexcept
{
uint8_t buf[ZT_TAG_MARSHAL_SIZE_MAX];
if (signer.hasPrivate()) {
m_signedBy = signer.address();
m_signatureLength = signer.sign(buf, (unsigned int)marshal(buf, true), m_signature, sizeof(m_signature));
return true;
}
return false;
}
int TagCredential::marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX], bool forSign) const noexcept
{
int p = 0;
if (forSign) {
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
Utils::storeBigEndian<uint64_t>(data + p, m_networkId);
Utils::storeBigEndian<uint64_t>(data + p + 8, (uint64_t)m_ts);
Utils::storeBigEndian<uint32_t>(data + p + 16, m_id);
Utils::storeBigEndian<uint32_t>(data + p + 20, m_value);
p += 24;
m_issuedTo.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
m_signedBy.copyTo(data + p);
p += ZT_ADDRESS_LENGTH;
if (!forSign) {
data[p++] = 1;
Utils::storeBigEndian<uint16_t>(data + p, (uint16_t)m_signatureLength);
p += 2;
Utils::copy(data + p, m_signature, m_signatureLength);
p += (int)m_signatureLength;
}
data[p++] = 0;
data[p++] = 0;
if (forSign) {
for (int k = 0; k < 8; ++k)
data[p++] = 0x7f;
}
return p;
}
int TagCredential::unmarshal(const uint8_t *data, int len) noexcept
{
if (len < 37)
return -1;
m_networkId = Utils::loadBigEndian<uint64_t>(data);
m_ts = (int64_t)Utils::loadBigEndian<uint64_t>(data + 8);
m_id = Utils::loadBigEndian<uint32_t>(data + 16);
m_value = Utils::loadBigEndian<uint32_t>(data + 20);
m_issuedTo.setTo(data + 24);
m_signedBy.setTo(data + 29);
// 1 byte reserved
m_signatureLength = Utils::loadBigEndian<uint16_t>(data + 35);
int p = 37 + (int)m_signatureLength;
if ((m_signatureLength > ZT_SIGNATURE_BUFFER_SIZE) || (p > len))
return -1;
Utils::copy(m_signature, data + p, m_signatureLength);
if ((p + 2) > len)
return -1;
p += 2 + Utils::loadBigEndian<uint16_t>(data + p);
if (p > len)
return -1;
return p;
}
} // namespace ZeroTier

View file

@ -1,144 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_TAG_HPP
#define ZT_TAG_HPP
#include "Address.hpp"
#include "C25519.hpp"
#include "Constants.hpp"
#include "Credential.hpp"
#include "Identity.hpp"
#define ZT_TAG_MARSHAL_SIZE_MAX (8 + 8 + 4 + 4 + 5 + 5 + 1 + 2 + ZT_SIGNATURE_BUFFER_SIZE + 2)
namespace ZeroTier {
class Context;
/**
* A tag that can be associated with members and matched in rules
*
* Capabilities group rules, while tags group members subject to those
* rules. Tag values can be matched in rules, and tags relevant to a
* capability are presented along with it.
*
* E.g. a capability might be "can speak Samba/CIFS within your
* department." This cap might have a rule to allow TCP/137 but
* only if a given tag ID's value matches between two peers. The
* capability is what members can do, while the tag is who they are.
* Different departments might have tags with the same ID but different
* values.
*
* Unlike capabilities tags are signed only by the issuer and are never
* transferable.
*/
class TagCredential : public Credential {
friend class Credential;
public:
static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_TAG; }
ZT_INLINE TagCredential() noexcept { memoryZero(this); }
/**
* @param nwid Network ID
* @param ts Timestamp
* @param issuedTo Address to which this tag was issued
* @param id Tag ID
* @param value Tag value
*/
ZT_INLINE TagCredential(const uint64_t nwid, const int64_t ts, const Address &issuedTo, const uint32_t id, const uint32_t value) noexcept : m_id(id), m_value(value), m_networkId(nwid), m_ts(ts), m_issuedTo(issuedTo), m_signedBy(), m_signatureLength(0) {}
ZT_INLINE uint32_t id() const noexcept { return m_id; }
ZT_INLINE const uint32_t &value() const noexcept { return m_value; }
ZT_INLINE uint64_t networkId() const noexcept { return m_networkId; }
ZT_INLINE int64_t timestamp() const noexcept { return m_ts; }
ZT_INLINE int64_t revision() const noexcept { return m_ts; }
ZT_INLINE const Address &issuedTo() const noexcept { return m_issuedTo; }
ZT_INLINE const Address &signer() const noexcept { return m_signedBy; }
ZT_INLINE const uint8_t *signature() const noexcept { return m_signature; }
ZT_INLINE unsigned int signatureLength() const noexcept { return m_signatureLength; }
/**
* Sign this tag
*
* @param signer Signing identity, must have private key
* @return True if signature was successful
*/
bool sign(const Identity &signer) noexcept;
/**
* Check this tag's signature
*
* @param RR Runtime environment to allow identity lookup for signedBy
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
*/
ZT_INLINE Credential::VerifyResult verify(const Context &ctx, const CallContext &cc) const noexcept { return s_verify(ctx, cc, *this); }
static constexpr int marshalSizeMax() noexcept { return ZT_TAG_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_TAG_MARSHAL_SIZE_MAX], bool forSign = false) const noexcept;
int unmarshal(const uint8_t *data, int len) noexcept;
// Provides natural sort order by ID
ZT_INLINE bool operator<(const TagCredential &t) const noexcept { return (m_id < t.m_id); }
ZT_INLINE bool operator==(const TagCredential &t) const noexcept { return (memcmp(this, &t, sizeof(TagCredential)) == 0); }
ZT_INLINE bool operator!=(const TagCredential &t) const noexcept { return (memcmp(this, &t, sizeof(TagCredential)) != 0); }
// For searching sorted arrays or lists of Tags by ID
struct IdComparePredicate {
ZT_INLINE bool operator()(const TagCredential &a, const TagCredential &b) const noexcept { return (a.id() < b.id()); }
ZT_INLINE bool operator()(const uint32_t a, const TagCredential &b) const noexcept { return (a < b.id()); }
ZT_INLINE bool operator()(const TagCredential &a, const uint32_t b) const noexcept { return (a.id() < b); }
ZT_INLINE bool operator()(const TagCredential *a, const TagCredential *b) const noexcept { return (a->id() < b->id()); }
ZT_INLINE bool operator()(const TagCredential *a, const TagCredential &b) const noexcept { return (a->id() < b.id()); }
ZT_INLINE bool operator()(const TagCredential &a, const TagCredential *b) const noexcept { return (a.id() < b->id()); }
ZT_INLINE bool operator()(const uint32_t a, const TagCredential *b) const noexcept { return (a < b->id()); }
ZT_INLINE bool operator()(const TagCredential *a, const uint32_t b) const noexcept { return (a->id() < b); }
ZT_INLINE bool operator()(const uint32_t a, const uint32_t b) const noexcept { return (a < b); }
};
private:
uint32_t m_id;
uint32_t m_value;
uint64_t m_networkId;
int64_t m_ts;
Address m_issuedTo;
Address m_signedBy;
unsigned int m_signatureLength;
uint8_t m_signature[ZT_SIGNATURE_BUFFER_SIZE];
};
} // namespace ZeroTier
#endif

File diff suppressed because it is too large Load diff

View file

@ -1,81 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
/*
* This header and its implementation in Tests.cpp contain assertion tests,
* self-tests, cryptographic tests, and fuzzing for the ZeroTier core.
*
* To build these ensure that ZT_ENABLE_TESTS is defined during build time.
* Otherwise they are omitted.
*
* The macro ZT_STANDALONE_TESTS will also build a main() function for these
* tests for creating a stand-alone test executable. It will return zero if
* all tests pass and non-zero if at least one test fails.
*
* These symbols are defined extern "C" so tests can be called from regular
* C code, which is important for use via CGo or in plain C projects.
*
* The ZT_T_PRINTF macro defaults to printf() but if it's defined at compile
* time (it must be set while building Tests.cpp) it can specify another
* function to use for output. Defining it to a no-op can be used to disable
* output.
*
* Each test function returns NULL if the tests succeeds or an error message
* on test failure.
*
* Be aware that fuzzing tests can and will crash the program if a serious
* error is discovered. This is the point. It's also beneficial to run these
* in "valgrind" or a similar tool to detect marginal bad behvaior.
*/
#ifndef ZT_TESTS_H
#define ZT_TESTS_H
#ifdef ZT_ENABLE_TESTS
#include <stdint.h>
#include <stdio.h>
#ifndef ZT_T_PRINTF
#define ZT_T_PRINTF(fmt, ...) printf((fmt), ##__VA_ARGS__), fflush(stdout)
#endif
#ifdef __cplusplus
extern "C" {
#endif
/**
* Test platform, compiler behavior, utility functions, and core classes
*/
const char *ZTT_general();
/**
* Test crypto using test vectors and simple scenarios
*
* This is not an absolutely exhaustive test, just a sanity check to make sure
* crypto routines are basically working.
*/
const char *ZTT_crypto();
/**
* Run benchmarks of cryptographic routines and common constructions
*/
const char *ZTT_benchmarkCrypto();
#ifdef __cplusplus
}
#endif
#endif // ZT_ENABLE_TESTS
#endif

View file

@ -1,153 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_TINYMAP_HPP
#define ZT_TINYMAP_HPP
#include "Constants.hpp"
#include "Containers.hpp"
#include "Network.hpp"
#include "SharedPtr.hpp"
#include "Spinlock.hpp"
// The number of buckets must be a power of two.
#define ZT_TINYMAP_BUCKETS 1024
#define ZT_TINYMAP_BUCKETS_MASK (ZT_TINYMAP_BUCKETS - 1)
#define ZT_TINYMAP_LOCKED_POINTER (~((uintptr_t)0))
namespace ZeroTier {
/**
* A small, simple, and very fast hash map with a fixed bucket count.
*
* This is used where it's necessary to keep small numbers of items indexed by
* an integer, such as networks mapping to network IDs. It's optimized for very
* fast lookup, with lookups sometimes requiring only a few instructions. It
* uses a "lock free" (actually pointer-as-spinlock) design.
*/
template <typename V> class TinyMap {
private:
typedef Vector<std::pair<uint64_t, V>> EV;
public:
ZT_INLINE TinyMap() {}
ZT_INLINE ~TinyMap() { this->clear(); }
ZT_INLINE void clear()
{
for (unsigned int i = 0; i < ZT_TINYMAP_BUCKETS; ++i) {
for (;;) {
const uintptr_t vptr = m_buckets[i].exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
if (vptr != 0)
delete reinterpret_cast<EV *>(vptr);
m_buckets[i].store(0, std::memory_order_release);
break;
}
else {
Spinlock::pause();
}
}
}
}
ZT_INLINE V get(const uint64_t key) noexcept
{
V tmp;
std::atomic<uintptr_t> &bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKETS_MASK];
for (;;) {
const uintptr_t vptr = bucket.exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
if (likely(vptr != 0)) {
for (typename EV::const_iterator n(reinterpret_cast<const EV *>(vptr)->begin()); n != reinterpret_cast<const EV *>(vptr)->end(); ++n) {
if (likely(n->first == key)) {
tmp = n->second;
break;
}
}
}
bucket.store(vptr, std::memory_order_release);
return tmp;
}
else {
Spinlock::pause();
}
}
}
ZT_INLINE void set(const uint64_t key, const V &value)
{
std::atomic<uintptr_t> &bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKETS_MASK];
for (;;) {
uintptr_t vptr = bucket.exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
if (vptr == 0) {
vptr = reinterpret_cast<uintptr_t>(new EV());
}
else {
for (typename EV::iterator n(reinterpret_cast<EV *>(vptr)->begin()); n != reinterpret_cast<EV *>(vptr)->end(); ++n) {
if (n->first == key) {
n->second = value;
bucket.store(vptr, std::memory_order_release);
return;
}
}
}
reinterpret_cast<EV *>(vptr)->push_back(std::pair<uint64_t, V>(key, value));
bucket.store(vptr, std::memory_order_release);
return;
}
else {
Spinlock::pause();
}
}
}
ZT_INLINE void erase(const uint64_t key)
{
std::atomic<uintptr_t> &bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKETS_MASK];
for (;;) {
uintptr_t vptr = bucket.exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
if (likely(vptr != 0)) {
for (typename EV::iterator n(reinterpret_cast<EV *>(vptr)->begin()); n != reinterpret_cast<EV *>(vptr)->end(); ++n) {
if (n->first == key) {
reinterpret_cast<EV *>(vptr)->erase(n);
break;
}
}
if (reinterpret_cast<EV *>(vptr)->empty()) {
delete reinterpret_cast<EV *>(vptr);
vptr = 0;
}
}
bucket.store(vptr, std::memory_order_release);
return;
}
else {
Spinlock::pause();
}
}
}
private:
std::atomic<uintptr_t> m_buckets[ZT_TINYMAP_BUCKETS];
};
static_assert((ZT_TINYMAP_BUCKETS % (sizeof(uintptr_t) * 8)) == 0, "ZT_TINYMAP_BUCKETS is not a power of two");
} // namespace ZeroTier
#endif

View file

@ -1,260 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "Topology.hpp"
#include "Defaults.hpp"
#include "Locator.hpp"
#include "TrustStore.hpp"
namespace ZeroTier {
Topology::Topology(const Context &ctx, const CallContext &cc) : m_ctx(ctx) {}
SharedPtr<Peer> Topology::add(const CallContext &cc, const SharedPtr<Peer> &peer)
{
RWMutex::Lock _l(m_peers_l);
SharedPtr<Peer> &hp = m_peers[peer->address()];
if (hp)
return hp;
m_loadCached(cc, peer->address(), hp);
if (hp)
return hp;
hp = peer;
return peer;
}
void Topology::allPeers(Vector<SharedPtr<Peer>> &allPeers, Vector<SharedPtr<Peer>> &rootPeers) const
{
allPeers.clear();
{
RWMutex::RLock l(m_peers_l);
allPeers.reserve(m_peers.size());
for (Map<Address, SharedPtr<Peer>>::const_iterator i(m_peers.begin()); i != m_peers.end(); ++i)
allPeers.push_back(i->second);
}
{
Mutex::Lock l(m_roots_l);
rootPeers = m_roots;
}
}
void Topology::doPeriodicTasks(const CallContext &cc)
{
// Get a list of root peer pointer addresses for filtering during peer cleanup.
Vector<uintptr_t> rootLookup;
{
Mutex::Lock l(m_roots_l);
m_rankRoots();
rootLookup.reserve(m_roots.size());
for (Vector<SharedPtr<Peer>>::const_iterator r(m_roots.begin()); r != m_roots.end(); ++r)
rootLookup.push_back((uintptr_t)r->ptr());
}
std::sort(rootLookup.begin(), rootLookup.end());
// Cleaning of peers and paths uses a two pass method to avoid write locking
// m_peers or m_paths for any significant amount of time. This avoids pauses
// on nodes with large numbers of peers or paths.
{
Vector<Address> toDelete;
{
RWMutex::RLock l1(m_peers_l);
for (Map<Address, SharedPtr<Peer>>::iterator i(m_peers.begin()); i != m_peers.end(); ++i) {
// TODO: also delete if the peer has not exchanged meaningful communication in a while, such as a
// network frame or non-trivial control packet.
if (((cc.ticks - i->second->lastReceive()) > ZT_PEER_ALIVE_TIMEOUT) && (!std::binary_search(rootLookup.begin(), rootLookup.end(), reinterpret_cast<uintptr_t>(i->second.ptr()))))
toDelete.push_back(i->first);
}
}
if (!toDelete.empty()) {
ZT_SPEW("garbage collecting %u offline or stale peer objects", (unsigned int)toDelete.size());
for (Vector<Address>::iterator i(toDelete.begin()); i != toDelete.end(); ++i) {
SharedPtr<Peer> toSave;
{
RWMutex::Lock l1(m_peers_l);
const Map<Address, SharedPtr<Peer>>::iterator p(m_peers.find(*i));
if (p != m_peers.end()) {
p->second.swap(toSave);
m_peers.erase(p);
}
}
if (toSave)
toSave->save(m_ctx, cc);
}
}
}
{
Vector<Path *> toDelete;
{
RWMutex::Lock l1(m_paths_l);
for (Map<Path::Key, SharedPtr<Path>>::iterator i(m_paths.begin()); i != m_paths.end();) {
Path *const d = i->second.weakGC();
if (likely(d == nullptr)) {
++i;
}
else {
m_paths.erase(i++);
try {
toDelete.push_back(d);
}
catch (...) {
delete d;
}
}
}
}
if (!toDelete.empty()) {
for (Vector<Path *>::iterator i(toDelete.begin()); i != toDelete.end(); ++i)
delete *i;
ZT_SPEW("garbage collected %u orphaned paths", (unsigned int)toDelete.size());
}
}
}
void Topology::trustStoreChanged(const CallContext &cc)
{
Map<Identity, SharedPtr<const Locator>> roots(m_ctx.ts->roots());
Vector<SharedPtr<Peer>> newRootList;
newRootList.reserve(roots.size());
for (Map<Identity, SharedPtr<const Locator>>::const_iterator r(roots.begin()); r != roots.end(); ++r) {
SharedPtr<Peer> root(this->peer(cc, r->first.address(), true));
if (!root) {
root.set(new Peer());
if (root->init(m_ctx, cc, r->first)) {
root = this->add(cc, root);
}
else {
root.zero();
}
}
if (root) {
newRootList.push_back(root);
if (r->second)
root->setLocator(r->second, true);
}
}
{
Mutex::Lock l(m_roots_l);
m_roots.swap(newRootList);
m_rankRoots();
}
}
void Topology::saveAll(const CallContext &cc)
{
RWMutex::RLock l(m_peers_l);
for (Map<Address, SharedPtr<Peer>>::iterator i(m_peers.begin()); i != m_peers.end(); ++i)
i->second->save(m_ctx, cc);
}
struct p_RootRankingComparisonOperator {
ZT_INLINE bool operator()(const SharedPtr<Peer> &a, const SharedPtr<Peer> &b) const noexcept
{
// Sort roots first in order of which root has spoken most recently, but
// only at a resolution of ZT_PATH_KEEPALIVE_PERIOD/2 units of time. This
// means that living roots that seem responsive are ranked the same. Then
// they're sorted in descending order of latency so that the apparently
// fastest root is ranked first.
const int64_t alr = a->lastReceive() / (ZT_PATH_KEEPALIVE_PERIOD / 2);
const int64_t blr = b->lastReceive() / (ZT_PATH_KEEPALIVE_PERIOD / 2);
if (alr < blr) {
return true;
}
else if (blr == alr) {
const int bb = b->latency();
if (bb < 0)
return true;
return bb < a->latency();
}
return false;
}
};
void Topology::m_rankRoots()
{
// assumes m_roots is locked
if (unlikely(m_roots.empty())) {
l_bestRoot.lock();
m_bestRoot.zero();
l_bestRoot.unlock();
}
else {
std::sort(m_roots.begin(), m_roots.end(), p_RootRankingComparisonOperator());
l_bestRoot.lock();
m_bestRoot = m_roots.front();
l_bestRoot.unlock();
}
}
void Topology::m_loadCached(const CallContext &cc, const Address &zta, SharedPtr<Peer> &peer)
{
// does not require any locks to be held
try {
uint64_t id[2];
id[0] = zta.toInt();
id[1] = 0;
Vector<uint8_t> data(m_ctx.store->get(cc, ZT_STATE_OBJECT_PEER, id, 1));
if (data.size() > 8) {
const uint8_t *d = data.data();
int dl = (int)data.size();
const int64_t ts = (int64_t)Utils::loadBigEndian<uint64_t>(d);
Peer *const p = new Peer();
int n = p->unmarshal(m_ctx, cc.ticks, d + 8, dl - 8);
if (n < 0) {
delete p;
return;
}
if ((cc.ticks - ts) < ZT_PEER_GLOBAL_TIMEOUT) {
// TODO: handle many peers, same address (?)
peer.set(p);
return;
}
}
}
catch (...) {
peer.zero();
}
}
SharedPtr<Peer> Topology::m_peerFromCached(const CallContext &cc, const Address &zta)
{
SharedPtr<Peer> p;
m_loadCached(cc, zta, p);
if (p) {
RWMutex::Lock l(m_peers_l);
SharedPtr<Peer> &hp = m_peers[zta];
if (hp)
return hp;
hp = p;
}
return p;
}
SharedPtr<Path> Topology::m_newPath(const int64_t l, const InetAddress &r, const Path::Key &k)
{
SharedPtr<Path> p(new Path(l, r));
RWMutex::Lock lck(m_paths_l);
SharedPtr<Path> &p2 = m_paths[k];
if (p2)
return p2;
p2 = p;
return p;
}
} // namespace ZeroTier

View file

@ -1,171 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_TOPOLOGY_HPP
#define ZT_TOPOLOGY_HPP
#include "Address.hpp"
#include "CallContext.hpp"
#include "Certificate.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "FCV.hpp"
#include "Fingerprint.hpp"
#include "Identity.hpp"
#include "InetAddress.hpp"
#include "Mutex.hpp"
#include "Path.hpp"
#include "Peer.hpp"
#include "ScopedPtr.hpp"
#include "SharedPtr.hpp"
#include "Spinlock.hpp"
namespace ZeroTier {
class Context;
/**
* Database of network topology
*/
class Topology {
public:
Topology(const Context &ctx, const CallContext &cc);
/**
* Add peer to database
*
* If there's already a peer with this address, the existing peer is
* returned. Otherwise the new peer is added and returned.
*
* @param peer Peer to add
* @return New or existing peer
*/
SharedPtr<Peer> add(const CallContext &cc, const SharedPtr<Peer> &peer);
/**
* Get a peer from its address
*
* @param zta ZeroTier address of peer
* @param loadFromCached If false do not load from cache if not in memory (default: true)
* @return Peer or NULL if not found
*/
ZT_INLINE SharedPtr<Peer> peer(const CallContext &cc, const Address &zta, const bool loadFromCached = true)
{
{
RWMutex::RLock l(m_peers_l);
Map<Address, SharedPtr<Peer>>::const_iterator ap(m_peers.find(zta));
if (likely(ap != m_peers.end()))
return ap->second;
}
if (loadFromCached)
return m_peerFromCached(cc, zta);
return SharedPtr<Peer>();
}
/**
* Get a Path object for a given local and remote physical address, creating if needed
*
* @param l Local socket
* @param r Remote address
* @return Pointer to canonicalized Path object or NULL on error
*/
ZT_INLINE SharedPtr<Path> path(const int64_t l, const InetAddress &r)
{
const Path::Key k(r);
{
RWMutex::RLock lck(m_paths_l);
Map<Path::Key, SharedPtr<Path>>::const_iterator p(m_paths.find(k));
if (likely(p != m_paths.end()))
return p->second;
}
return m_newPath(l, r, k);
}
/**
* Get current best root
*
* @return Root peer or nullptr if none
*/
ZT_INLINE SharedPtr<Peer> root()
{
l_bestRoot.lock(); // spinlock
SharedPtr<Peer> r(m_bestRoot);
l_bestRoot.unlock();
return r;
}
/**
* Get current best root by setting a result parameter
*
* @param root Set to best root or nullptr if none
*/
ZT_INLINE void root(SharedPtr<Peer> &root)
{
l_bestRoot.lock(); // spinlock
root = m_bestRoot;
l_bestRoot.unlock();
}
/**
* @param allPeers Vector to fill with all current peers
* @param rootPeers Vector to fill with peers that are roots
*/
void allPeers(Vector<SharedPtr<Peer>> &allPeers, Vector<SharedPtr<Peer>> &rootPeers) const;
/**
* Do periodic tasks such as database cleanup, cert cleanup, root ranking, etc.
*/
void doPeriodicTasks(const CallContext &cc);
/**
* Rank root servers in descending order of quality
*/
ZT_INLINE void rankRoots(const CallContext &cc)
{
Mutex::Lock l(m_roots_l);
m_rankRoots();
}
/**
* Perform internal updates based on changes in the trust store
*/
void trustStoreChanged(const CallContext &cc);
/**
* Save all currently known peers to data store
*/
void saveAll(const CallContext &cc);
private:
void m_rankRoots();
void m_loadCached(const CallContext &cc, const Address &zta, SharedPtr<Peer> &peer);
SharedPtr<Peer> m_peerFromCached(const CallContext &cc, const Address &zta);
SharedPtr<Path> m_newPath(int64_t l, const InetAddress &r, const Path::Key &k);
const Context &m_ctx;
Vector<SharedPtr<Peer>> m_roots;
Map<Address, SharedPtr<Peer>> m_peers;
Map<Path::Key, SharedPtr<Path>> m_paths;
RWMutex m_peers_l; // m_peers
RWMutex m_paths_l; // m_paths
Mutex m_roots_l; // m_roots
SharedPtr<Peer> m_bestRoot;
Spinlock l_bestRoot;
};
} // namespace ZeroTier
#endif

View file

@ -1,206 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "Trace.hpp"
#include "Context.hpp"
#include "FCV.hpp"
#include "InetAddress.hpp"
#include "Node.hpp"
#include "Peer.hpp"
// NOTE: packet IDs are always handled in network byte order, so no need to convert them.
namespace ZeroTier {
Trace::Trace(const Context &ctx) : m_ctx(ctx), m_traceFlags(0) {}
void Trace::unexpectedError(const CallContext &cc, uint32_t codeLocation, const char *message, ...)
{
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_UNEXPECTED_ERROR);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_MESSAGE, message);
buf.push_back(0);
m_ctx.node->postEvent(cc.tPtr, ZT_EVENT_TRACE, buf.data());
}
void Trace::m_resettingPathsInScope(void *tPtr, uint32_t codeLocation, const Identity &reporter, const InetAddress &from, const InetAddress &oldExternal, const InetAddress &newExternal, ZT_InetAddress_IpScope scope)
{
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL1_RESETTING_PATHS_IN_SCOPE);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
if (reporter)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, reporter.fingerprint());
if (from)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_TRIGGER_FROM_ENDPOINT, Endpoint(from));
if (oldExternal)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_OLD_ENDPOINT, Endpoint(oldExternal));
if (newExternal)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_NEW_ENDPOINT, Endpoint(newExternal));
Dictionary::append(buf, ZT_TRACE_FIELD_RESET_ADDRESS_SCOPE, scope);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
void Trace::m_tryingNewPath(void *tPtr, uint32_t codeLocation, const Identity &trying, const InetAddress &physicalAddress, const InetAddress &triggerAddress, uint64_t triggeringPacketId, uint8_t triggeringPacketVerb, const Identity &triggeringPeer)
{
if ((trying) && (physicalAddress)) {
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL1_TRYING_NEW_PATH);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, trying.fingerprint());
Dictionary::appendObject(buf, ZT_TRACE_FIELD_ENDPOINT, physicalAddress);
if (triggerAddress)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_TRIGGER_FROM_ENDPOINT, Endpoint(triggerAddress));
Dictionary::appendPacketId(buf, ZT_TRACE_FIELD_TRIGGER_FROM_PACKET_ID, triggeringPacketId);
Dictionary::append(buf, ZT_TRACE_FIELD_TRIGGER_FROM_PACKET_VERB, triggeringPacketVerb);
if (triggeringPeer)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_TRIGGER_FROM_PEER_FINGERPRINT, triggeringPeer.fingerprint());
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
}
void Trace::m_learnedNewPath(void *tPtr, uint32_t codeLocation, uint64_t packetId, const Identity &peerIdentity, const InetAddress &physicalAddress, const InetAddress &replaced)
{
if (peerIdentity) {
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL1_LEARNED_NEW_PATH);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::appendPacketId(buf, ZT_TRACE_FIELD_PACKET_ID, packetId);
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, peerIdentity.fingerprint());
if (physicalAddress)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_ENDPOINT, Endpoint(physicalAddress));
if (replaced)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_OLD_ENDPOINT, Endpoint(replaced));
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
}
void Trace::m_incomingPacketDropped(void *tPtr, uint32_t codeLocation, uint64_t packetId, uint64_t networkId, const Identity &peerIdentity, const InetAddress &physicalAddress, uint8_t hops, uint8_t verb, ZT_TracePacketDropReason reason)
{
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL1_INCOMING_PACKET_DROPPED);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::appendPacketId(buf, ZT_TRACE_FIELD_PACKET_ID, packetId);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
if (peerIdentity)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, peerIdentity.fingerprint());
if (physicalAddress)
Dictionary::append(buf, ZT_TRACE_FIELD_ENDPOINT, Endpoint(physicalAddress));
Dictionary::append(buf, ZT_TRACE_FIELD_PACKET_HOPS, hops);
Dictionary::append(buf, ZT_TRACE_FIELD_PACKET_VERB, verb);
Dictionary::append(buf, ZT_TRACE_FIELD_REASON, reason);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
void Trace::m_outgoingNetworkFrameDropped(void *tPtr, uint32_t codeLocation, uint64_t networkId, const MAC &sourceMac, const MAC &destMac, uint16_t etherType, uint16_t frameLength, const uint8_t *frameData, ZT_TraceFrameDropReason reason)
{
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL1_INCOMING_PACKET_DROPPED);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
Dictionary::append(buf, ZT_TRACE_FIELD_SOURCE_MAC, sourceMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_DEST_MAC, destMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_ETHERTYPE, etherType);
Dictionary::append(buf, ZT_TRACE_FIELD_FRAME_LENGTH, frameLength);
if (frameData)
Dictionary::append(buf, ZT_TRACE_FIELD_FRAME_DATA, frameData, std::min((unsigned int)64, (unsigned int)frameLength));
Dictionary::append(buf, ZT_TRACE_FIELD_REASON, reason);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
void Trace::m_incomingNetworkFrameDropped(
void *tPtr, uint32_t codeLocation, uint64_t networkId, const MAC &sourceMac, const MAC &destMac, const uint16_t etherType, const Identity &peerIdentity, const InetAddress &physicalAddress, uint8_t hops, uint16_t frameLength, const uint8_t *frameData, uint8_t verb, bool credentialRequestSent, ZT_TraceFrameDropReason reason)
{
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL2_INCOMING_FRAME_DROPPED);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
Dictionary::append(buf, ZT_TRACE_FIELD_SOURCE_MAC, sourceMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_DEST_MAC, destMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_ETHERTYPE, etherType);
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, peerIdentity.fingerprint());
if (physicalAddress)
Dictionary::appendObject(buf, ZT_TRACE_FIELD_ENDPOINT, Endpoint(physicalAddress));
Dictionary::append(buf, ZT_TRACE_FIELD_PACKET_HOPS, hops);
Dictionary::append(buf, ZT_TRACE_FIELD_PACKET_VERB, verb);
Dictionary::append(buf, ZT_TRACE_FIELD_FRAME_LENGTH, frameLength);
if (frameData)
Dictionary::append(buf, ZT_TRACE_FIELD_FRAME_DATA, frameData, std::min((unsigned int)64, (unsigned int)frameLength));
Dictionary::append(buf, ZT_TRACE_FIELD_FLAG_CREDENTIAL_REQUEST_SENT, credentialRequestSent);
Dictionary::append(buf, ZT_TRACE_FIELD_REASON, reason);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
void Trace::m_networkConfigRequestSent(void *tPtr, uint32_t codeLocation, uint64_t networkId)
{
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL2_NETWORK_CONFIG_REQUESTED);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
void Trace::m_networkFilter(
void *tPtr, uint32_t codeLocation, uint64_t networkId, const uint8_t *primaryRuleSetLog, const uint8_t *matchingCapabilityRuleSetLog, uint32_t matchingCapabilityId, int64_t matchingCapabilityTimestamp, const Address &source, const Address &dest, const MAC &sourceMac, const MAC &destMac, uint16_t frameLength, const uint8_t *frameData, uint16_t etherType,
uint16_t vlanId, bool noTee, bool inbound, int accept)
{
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL2_NETWORK_FILTER);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
if ((primaryRuleSetLog) && (!Utils::allZero(primaryRuleSetLog, 512)))
Dictionary::append(buf, ZT_TRACE_FIELD_PRIMARY_RULE_SET_LOG, primaryRuleSetLog, 512);
if ((matchingCapabilityRuleSetLog) && (!Utils::allZero(matchingCapabilityRuleSetLog, 512)))
Dictionary::append(buf, ZT_TRACE_FIELD_MATCHING_CAPABILITY_RULE_SET_LOG, matchingCapabilityRuleSetLog, 512);
Dictionary::append(buf, ZT_TRACE_FIELD_MATCHING_CAPABILITY_ID, matchingCapabilityId);
Dictionary::append(buf, ZT_TRACE_FIELD_MATCHING_CAPABILITY_TIMESTAMP, matchingCapabilityTimestamp);
Dictionary::append(buf, ZT_TRACE_FIELD_SOURCE_ZT_ADDRESS, source);
Dictionary::append(buf, ZT_TRACE_FIELD_DEST_ZT_ADDRESS, dest);
Dictionary::append(buf, ZT_TRACE_FIELD_SOURCE_MAC, sourceMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_DEST_MAC, destMac.toInt());
Dictionary::append(buf, ZT_TRACE_FIELD_FRAME_LENGTH, frameLength);
if (frameData)
Dictionary::append(buf, ZT_TRACE_FIELD_FRAME_DATA, frameData, std::min((unsigned int)64, (unsigned int)frameLength));
Dictionary::append(buf, ZT_TRACE_FIELD_ETHERTYPE, etherType);
Dictionary::append(buf, ZT_TRACE_FIELD_VLAN_ID, vlanId);
Dictionary::append(buf, ZT_TRACE_FIELD_RULE_FLAG_NOTEE, noTee);
Dictionary::append(buf, ZT_TRACE_FIELD_RULE_FLAG_INBOUND, inbound);
Dictionary::append(buf, ZT_TRACE_FIELD_RULE_FLAG_ACCEPT, (int32_t)accept);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
void Trace::m_credentialRejected(void *tPtr, uint32_t codeLocation, uint64_t networkId, const Identity &identity, uint32_t credentialId, int64_t credentialTimestamp, uint8_t credentialType, ZT_TraceCredentialRejectionReason reason)
{
FCV<uint8_t, 4096> buf;
Dictionary::append(buf, ZT_TRACE_FIELD_TYPE, ZT_TRACE_VL2_NETWORK_CREDENTIAL_REJECTED);
Dictionary::append(buf, ZT_TRACE_FIELD_CODE_LOCATION, codeLocation);
Dictionary::append(buf, ZT_TRACE_FIELD_NETWORK_ID, networkId);
Dictionary::appendObject(buf, ZT_TRACE_FIELD_IDENTITY_FINGERPRINT, identity.fingerprint());
Dictionary::append(buf, ZT_TRACE_FIELD_CREDENTIAL_ID, credentialId);
Dictionary::append(buf, ZT_TRACE_FIELD_CREDENTIAL_TIMESTAMP, credentialTimestamp);
Dictionary::append(buf, ZT_TRACE_FIELD_CREDENTIAL_TYPE, credentialType);
Dictionary::append(buf, ZT_TRACE_FIELD_REASON, reason);
buf.push_back(0);
m_ctx.node->postEvent(tPtr, ZT_EVENT_TRACE, buf.data());
}
} // namespace ZeroTier

View file

@ -1,161 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_TRACE_HPP
#define ZT_TRACE_HPP
#include "Address.hpp"
#include "CallContext.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "InetAddress.hpp"
#include "MAC.hpp"
#include "Mutex.hpp"
#include "SharedPtr.hpp"
#include "Utils.hpp"
#define ZT_TRACE_F_VL1 0x01U
#define ZT_TRACE_F_VL2 0x02U
#define ZT_TRACE_F_VL2_FILTER 0x04U
#define ZT_TRACE_F_VL2_MULTICAST 0x08U
namespace ZeroTier {
class Context;
class Identity;
class Peer;
class Path;
class Network;
class MembershipCredential;
class OwnershipCredential;
class RevocationCredential;
class TagCredential;
class CapabilityCredential;
struct NetworkConfig;
/**
* Remote tracing and trace logging handler
*
* These methods are called when things happen that may be of interested to
* someone debugging ZeroTier or its virtual networks. The codeLocation parameter
* is an arbitrary pseudo-random identifier of the form 0xNNNNNNNN that could be
* easily found by searching the code base. This makes it easy to locate the
* specific line where a trace originated without relying on brittle non-portable
* things like source file and line number. The same identifier should be used
* for the same 'place' in the code across versions. These could eventually be
* turned into constants that are semi-official and stored in a database to
* provide extra debug context.
*/
class Trace {
public:
struct RuleResultLog : public TriviallyCopyable {
uint8_t l[ZT_MAX_NETWORK_RULES / 2]; // ZT_MAX_NETWORK_RULES 4-bit fields
ZT_INLINE void log(const unsigned int rn, const uint8_t thisRuleMatches, const uint8_t thisSetMatches) noexcept { l[rn >> 1U] |= (((thisRuleMatches + 1U) << 2U) | (thisSetMatches + 1U)) << ((rn & 1U) << 2U); }
ZT_INLINE void logSkipped(const unsigned int rn, const uint8_t thisSetMatches) noexcept { l[rn >> 1U] |= (thisSetMatches + 1U) << ((rn & 1U) << 2U); }
ZT_INLINE void clear() noexcept { memoryZero(this); }
};
explicit Trace(const Context &ctx);
void unexpectedError(const CallContext &cc, uint32_t codeLocation, const char *message, ...);
ZT_INLINE void resettingPathsInScope(const CallContext &cc, const uint32_t codeLocation, const Identity &reporter, const InetAddress &from, const InetAddress &oldExternal, const InetAddress &newExternal, const InetAddress::IpScope scope)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL1) != 0))
m_resettingPathsInScope(cc.tPtr, codeLocation, reporter, from, oldExternal, newExternal, scope);
}
ZT_INLINE void tryingNewPath(const CallContext &cc, const uint32_t codeLocation, const Identity &trying, const InetAddress &physicalAddress, const InetAddress &triggerAddress, uint64_t triggeringPacketId, uint8_t triggeringPacketVerb, const Identity &triggeringPeer)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL1) != 0))
m_tryingNewPath(cc.tPtr, codeLocation, trying, physicalAddress, triggerAddress, triggeringPacketId, triggeringPacketVerb, triggeringPeer);
}
ZT_INLINE void learnedNewPath(const CallContext &cc, const uint32_t codeLocation, uint64_t packetId, const Identity &peerIdentity, const InetAddress &physicalAddress, const InetAddress &replaced)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL1) != 0))
m_learnedNewPath(cc.tPtr, codeLocation, packetId, peerIdentity, physicalAddress, replaced);
}
ZT_INLINE void incomingPacketDropped(const CallContext &cc, const uint32_t codeLocation, uint64_t packetId, uint64_t networkId, const Identity &peerIdentity, const InetAddress &physicalAddress, uint8_t hops, uint8_t verb, const ZT_TracePacketDropReason reason)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL1) != 0))
m_incomingPacketDropped(cc.tPtr, codeLocation, packetId, networkId, peerIdentity, physicalAddress, hops, verb, reason);
}
ZT_INLINE void outgoingNetworkFrameDropped(const CallContext &cc, const uint32_t codeLocation, uint64_t networkId, const MAC &sourceMac, const MAC &destMac, uint16_t etherType, uint16_t frameLength, const uint8_t *frameData, ZT_TraceFrameDropReason reason)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL2) != 0))
m_outgoingNetworkFrameDropped(cc.tPtr, codeLocation, networkId, sourceMac, destMac, etherType, frameLength, frameData, reason);
}
ZT_INLINE void incomingNetworkFrameDropped(
const CallContext &cc, const uint32_t codeLocation, uint64_t networkId, const MAC &sourceMac, const MAC &destMac, const uint16_t etherType, const Identity &peerIdentity, const InetAddress &physicalAddress, uint8_t hops, uint16_t frameLength, const uint8_t *frameData, uint8_t verb, bool credentialRequestSent, ZT_TraceFrameDropReason reason)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL2) != 0))
m_incomingNetworkFrameDropped(cc.tPtr, codeLocation, networkId, sourceMac, destMac, etherType, peerIdentity, physicalAddress, hops, frameLength, frameData, verb, credentialRequestSent, reason);
}
ZT_INLINE void networkConfigRequestSent(const CallContext &cc, const uint32_t codeLocation, uint64_t networkId)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL2) != 0))
m_networkConfigRequestSent(cc.tPtr, codeLocation, networkId);
}
ZT_INLINE void networkFilter(
const CallContext &cc, const uint32_t codeLocation, uint64_t networkId, const uint8_t primaryRuleSetLog[512], const uint8_t matchingCapabilityRuleSetLog[512], uint32_t matchingCapabilityId, int64_t matchingCapabilityTimestamp, const Address &source, const Address &dest, const MAC &sourceMac, const MAC &destMac, uint16_t frameLength,
const uint8_t *frameData, uint16_t etherType, uint16_t vlanId, bool noTee, bool inbound, int accept)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL2_FILTER) != 0)) {
m_networkFilter(cc.tPtr, codeLocation, networkId, primaryRuleSetLog, matchingCapabilityRuleSetLog, matchingCapabilityId, matchingCapabilityTimestamp, source, dest, sourceMac, destMac, frameLength, frameData, etherType, vlanId, noTee, inbound, accept);
}
}
ZT_INLINE void credentialRejected(const CallContext &cc, const uint32_t codeLocation, uint64_t networkId, const Identity &identity, uint32_t credentialId, int64_t credentialTimestamp, uint8_t credentialType, ZT_TraceCredentialRejectionReason reason)
{
if (unlikely((m_traceFlags & ZT_TRACE_F_VL2) != 0))
m_credentialRejected(cc.tPtr, codeLocation, networkId, identity, credentialId, credentialTimestamp, credentialType, reason);
}
private:
void m_resettingPathsInScope(void *tPtr, uint32_t codeLocation, const Identity &reporter, const InetAddress &from, const InetAddress &oldExternal, const InetAddress &newExternal, InetAddress::IpScope scope);
void m_tryingNewPath(void *tPtr, uint32_t codeLocation, const Identity &trying, const InetAddress &physicalAddress, const InetAddress &triggerAddress, uint64_t triggeringPacketId, uint8_t triggeringPacketVerb, const Identity &triggeringPeer);
void m_learnedNewPath(void *tPtr, uint32_t codeLocation, uint64_t packetId, const Identity &peerIdentity, const InetAddress &physicalAddress, const InetAddress &replaced);
void m_incomingPacketDropped(void *tPtr, uint32_t codeLocation, uint64_t packetId, uint64_t networkId, const Identity &peerIdentity, const InetAddress &physicalAddress, uint8_t hops, uint8_t verb, ZT_TracePacketDropReason reason);
void m_outgoingNetworkFrameDropped(void *tPtr, uint32_t codeLocation, uint64_t networkId, const MAC &sourceMac, const MAC &destMac, uint16_t etherType, uint16_t frameLength, const uint8_t *frameData, ZT_TraceFrameDropReason reason);
void
m_incomingNetworkFrameDropped(void *tPtr, uint32_t codeLocation, uint64_t networkId, const MAC &sourceMac, const MAC &destMac, const uint16_t etherType, const Identity &peerIdentity, const InetAddress &physicalAddress, uint8_t hops, uint16_t frameLength, const uint8_t *frameData, uint8_t verb, bool credentialRequestSent, ZT_TraceFrameDropReason reason);
void m_networkConfigRequestSent(void *tPtr, uint32_t codeLocation, uint64_t networkId);
void m_networkFilter(
void *tPtr, uint32_t codeLocation, uint64_t networkId, const uint8_t *primaryRuleSetLog, const uint8_t *matchingCapabilityRuleSetLog, uint32_t matchingCapabilityId, int64_t matchingCapabilityTimestamp, const Address &source, const Address &dest, const MAC &sourceMac, const MAC &destMac, uint16_t frameLength, const uint8_t *frameData,
uint16_t etherType, uint16_t vlanId, bool noTee, bool inbound, int accept);
void m_credentialRejected(void *tPtr, uint32_t codeLocation, uint64_t networkId, const Identity &identity, uint32_t credentialId, int64_t credentialTimestamp, uint8_t credentialType, ZT_TraceCredentialRejectionReason reason);
const Context &m_ctx;
volatile unsigned int m_traceFlags; // faster than atomic, but may not "instantly" change... should be okay
};
} // namespace ZeroTier
#endif

View file

@ -1,60 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_TRIVIALLYCOPYABLE_HPP
#define ZT_TRIVIALLYCOPYABLE_HPP
#include "Constants.hpp"
#include "Utils.hpp"
namespace ZeroTier {
/**
* Classes inheriting from this base class are safe to abuse in C-like ways.
*
* It also includes some static methods to do this conveniently.
*/
struct TriviallyCopyable {
public:
/**
* Zero a TriviallyCopyable object
*
* @tparam T Automatically inferred type of object
* @param obj Any TriviallyCopyable object
*/
template <typename T> static ZT_INLINE void memoryZero(T *obj) noexcept
{
mustBeTriviallyCopyable(obj);
Utils::zero<sizeof(T)>(obj);
}
/**
* Zero a TriviallyCopyable object
*
* @tparam T Automatically inferred type of object
* @param obj Any TriviallyCopyable object
*/
template <typename T> static ZT_INLINE void memoryZero(T &obj) noexcept
{
mustBeTriviallyCopyable(obj);
Utils::zero<sizeof(T)>(&obj);
}
private:
static ZT_INLINE void mustBeTriviallyCopyable(const TriviallyCopyable &) noexcept {}
static ZT_INLINE void mustBeTriviallyCopyable(const TriviallyCopyable *) noexcept {}
};
} // namespace ZeroTier
#endif

View file

@ -1,360 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "TrustStore.hpp"
#include "LZ4.hpp"
namespace ZeroTier {
TrustStore::TrustStore() {}
TrustStore::~TrustStore() {}
SharedPtr<TrustStore::Entry> TrustStore::get(const H384 &serial) const
{
RWMutex::RLock l(m_lock);
Map<H384, SharedPtr<Entry>>::const_iterator c(m_bySerial.find(serial));
return (c != m_bySerial.end()) ? c->second : SharedPtr<TrustStore::Entry>();
}
Map<Identity, SharedPtr<const Locator>> TrustStore::roots()
{
RWMutex::RLock l(m_lock);
Map<Identity, SharedPtr<const Locator>> r;
// Iterate using m_bySubjectIdentity to only scan certificates with subject identities.
// This map also does not contian error or deprecated certificates.
for (Map<Fingerprint, Vector<SharedPtr<Entry>>>::const_iterator cv(m_bySubjectIdentity.begin()); cv != m_bySubjectIdentity.end(); ++cv) {
for (Vector<SharedPtr<Entry>>::const_iterator c(cv->second.begin()); c != cv->second.end(); ++c) {
// A root set cert must be marked for this use and authorized to influence this node's config.
if ((((*c)->m_certificate.usageFlags & ZT_CERTIFICATE_USAGE_ZEROTIER_ROOT_SET) != 0) && (((*c)->m_localTrust & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_CONFIG) != 0)) {
// Add all identities to the root set, and for each entry in the set make sure we have the latest
// locator if there's more than one cert with one.
for (unsigned int j = 0; j < (*c)->certificate().subject.identityCount; ++j) {
auto id = Identity::from((*c)->certificate().subject.identities[j].identity);
if ((id) && (*id)) { // sanity check
SharedPtr<const Locator> &existingLoc = r[*id];
auto loc = Locator::from((*c)->certificate().subject.identities[j].locator);
if (loc) {
if ((!existingLoc) || (existingLoc->revision() < loc->revision()))
existingLoc.set(new Locator(*loc));
}
}
}
}
}
}
return r;
}
Vector<SharedPtr<TrustStore::Entry>> TrustStore::all(const bool includeRejectedCertificates) const
{
RWMutex::RLock l(m_lock);
Vector<SharedPtr<Entry>> r;
r.reserve(m_bySerial.size());
for (Map<H384, SharedPtr<Entry>>::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
if ((includeRejectedCertificates) || (c->second->error() == ZT_CERTIFICATE_ERROR_NONE))
r.push_back(c->second);
}
return r;
}
void TrustStore::add(const Certificate &cert, const unsigned int localTrust)
{
RWMutex::Lock l(m_lock);
m_addQueue.push_front(SharedPtr<Entry>(new Entry(this->m_lock, cert, localTrust)));
}
void TrustStore::erase(const H384 &serial)
{
RWMutex::Lock l(m_lock);
m_deleteQueue.push_front(serial);
}
bool TrustStore::update(const int64_t clock, Vector<SharedPtr<Entry>> *const purge)
{
RWMutex::Lock l(m_lock);
// Check for certificate time validity status changes. If any of these occur then
// full re-validation is required.
bool errorStateModified = false;
for (Map<H384, SharedPtr<Entry>>::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
const bool timeValid = c->second->m_certificate.verifyTimeWindow(clock);
switch (c->second->m_error) {
case ZT_CERTIFICATE_ERROR_NONE:
case ZT_CERTIFICATE_ERROR_INVALID_CHAIN:
if (!timeValid) {
c->second->m_error = ZT_CERTIFICATE_ERROR_OUT_OF_VALID_TIME_WINDOW;
errorStateModified = true;
}
break;
case ZT_CERTIFICATE_ERROR_OUT_OF_VALID_TIME_WINDOW:
if (timeValid) {
c->second->m_error = c->second->m_certificate.verify(-1, false);
errorStateModified = true;
}
break;
default: break;
}
}
// If there were not any such changes and if the add and delete queues are empty,
// there is nothing more to be done.
if ((!errorStateModified) && (m_addQueue.empty()) && (m_deleteQueue.empty()))
return false;
// Add new certificates to m_bySerial, which is the master certificate set. They still
// have yet to have their full certificate chains validated. Full signature checking is
// performed here.
while (!m_addQueue.empty()) {
SharedPtr<Entry> &qi = m_addQueue.front();
qi->m_error = qi->m_certificate.verify(clock, true);
m_bySerial[H384(qi->m_certificate.serialNo)].move(qi);
m_addQueue.pop_front();
}
// Delete any certificates enqueued to be deleted.
while (!m_deleteQueue.empty()) {
m_bySerial.erase(m_deleteQueue.front());
m_deleteQueue.pop_front();
}
// Reset flags for deprecation and a cert being on a trust path, which are
// recomputed when chain and subjects are checked below.
for (Map<H384, SharedPtr<Entry>>::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
if (c->second->m_error == ZT_CERTIFICATE_ERROR_NONE) {
c->second->m_subjectDeprecated = false;
c->second->m_onTrustPath = false;
}
}
// Validate certificate trust paths.
{
Vector<Entry *> visited;
visited.reserve(8);
for (Map<H384, SharedPtr<Entry>>::iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
if (c->second->m_error == ZT_CERTIFICATE_ERROR_NONE) {
if (c->second->m_certificate.isSelfSigned()) {
// If this is a self-signed certificate it's only valid if it's trusted as a CA.
if ((c->second->m_localTrust & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ROOT_CA) == 0) {
c->second->m_error = ZT_CERTIFICATE_ERROR_INVALID_CHAIN;
}
}
else {
if ((!c->second->m_onTrustPath) && ((c->second->m_localTrust & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ROOT_CA) == 0)) {
// Trace the path of each certificate all the way back to a trusted CA.
unsigned int pathLength = 0;
Map<H384, SharedPtr<Entry>>::const_iterator current(c);
visited.clear();
for (;;) {
if (pathLength <= current->second->m_certificate.maxPathLength) {
// Check if this cert isn't a CA or already part of a valid trust path. If so then step
// upward toward CA.
if (((current->second->m_localTrust & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ROOT_CA) == 0) && (!current->second->m_onTrustPath)) {
// If the issuer (parent) certificiate is (1) valid, (2) not already visited (to
// prevent loops), and (3) has a public key that matches this cert's issuer public
// key (sanity check), proceed up the certificate graph toward a potential CA.
visited.push_back(current->second.ptr());
const Map<H384, SharedPtr<Entry>>::const_iterator prevChild(current);
current = m_bySerial.find(H384(current->second->m_certificate.issuer));
if ((current != m_bySerial.end()) && (std::find(visited.begin(), visited.end(), current->second.ptr()) == visited.end()) && (current->second->m_error == ZT_CERTIFICATE_ERROR_NONE) && (current->second->m_certificate.publicKeySize == prevChild->second->m_certificate.issuerPublicKeySize)
&& (memcmp(current->second->m_certificate.publicKey, prevChild->second->m_certificate.issuerPublicKey, current->second->m_certificate.publicKeySize) == 0)) {
++pathLength;
continue;
}
}
else {
// If we've traced this to a root CA, flag its parents as also being on a trust
// path. Then break the loop without setting an error. We don't flag the current
// cert as being on a trust path since no other certificates depend on it.
for (Vector<Entry *>::const_iterator v(visited.begin()); v != visited.end(); ++v) {
if (*v != c->second.ptr())
(*v)->m_onTrustPath = true;
}
break;
}
}
// If we made it here without breaking or continuing, no path to a
// CA was found and the certificate's chain is invalid.
c->second->m_error = ZT_CERTIFICATE_ERROR_INVALID_CHAIN;
break;
}
}
}
}
}
}
// Repopulate mapping of subject unique IDs to their certificates, marking older
// certificates for the same subject as deprecated. A deprecated certificate is not invalid
// but will be purged if it is also not part of a trust path. Error certificates are ignored.
m_bySubjectUniqueId.clear();
for (Map<H384, SharedPtr<Entry>>::const_iterator c(m_bySerial.begin()); c != m_bySerial.end();) {
if (c->second->m_error == ZT_CERTIFICATE_ERROR_NONE) {
const unsigned int uniqueIdSize = c->second->m_certificate.subject.uniqueIdSize;
if ((uniqueIdSize > 0) && (uniqueIdSize <= ZT_CERTIFICATE_MAX_PUBLIC_KEY_SIZE)) {
SharedPtr<Entry> &entry = m_bySubjectUniqueId[Blob<ZT_CERTIFICATE_MAX_PUBLIC_KEY_SIZE>(c->second->m_certificate.subject.uniqueId, uniqueIdSize)];
if (entry) {
// If there's already an entry, see if there's a newer certificate for this subject.
if (c->second->m_certificate.subject.timestamp > entry->m_certificate.subject.timestamp) {
entry->m_subjectDeprecated = true;
entry = c->second;
}
else if (c->second->m_certificate.subject.timestamp < entry->m_certificate.subject.timestamp) {
c->second->m_subjectDeprecated = true;
}
else {
// Equal timestamps should never happen, but handle it anyway by comparing serials.
if (memcmp(c->second->m_certificate.serialNo, entry->m_certificate.serialNo, ZT_CERTIFICATE_HASH_SIZE) > 0) {
entry->m_subjectDeprecated = true;
entry = c->second;
}
else {
c->second->m_subjectDeprecated = true;
}
}
}
else {
entry = c->second;
}
}
}
}
// Populate mapping of identities to certificates whose subjects reference them, ignoring
// error or deprecated certificates.
m_bySubjectIdentity.clear();
for (Map<H384, SharedPtr<Entry>>::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
if ((c->second->m_error == ZT_CERTIFICATE_ERROR_NONE) && (!c->second->m_subjectDeprecated)) {
for (unsigned int i = 0; i < c->second->m_certificate.subject.identityCount; ++i) {
auto id = Identity::from(c->second->m_certificate.subject.identities[i].identity);
if ((id) && (*id)) // sanity check
m_bySubjectIdentity[id->fingerprint()].push_back(c->second);
}
}
}
// If purge is set, erase and return error and deprecated certs (that are not on a trust path).
if (purge) {
for (Map<H384, SharedPtr<Entry>>::const_iterator c(m_bySerial.begin()); c != m_bySerial.end();) {
if ((c->second->error() != ZT_CERTIFICATE_ERROR_NONE) || ((c->second->m_subjectDeprecated) && (!c->second->m_onTrustPath))) {
purge->push_back(c->second);
m_bySerial.erase(c++);
}
else {
++c;
}
}
}
return true;
}
Vector<uint8_t> TrustStore::save() const
{
Vector<uint8_t> comp;
int compSize;
{
RWMutex::RLock l(m_lock);
Vector<uint8_t> b;
b.reserve(4096);
// A version byte.
b.push_back(0);
// <size[2]> <certificate[...]> <trust[2]> tuples terminated by a 0 size.
for (Map<H384, SharedPtr<Entry>>::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
const Vector<uint8_t> cdata(c->second->certificate().encode());
const unsigned long size = (uint32_t)cdata.size();
if ((size > 0) && (size <= 0xffff)) {
b.push_back((uint8_t)(size >> 8U));
b.push_back((uint8_t)size);
b.insert(b.end(), cdata.begin(), cdata.end());
const uint32_t localTrust = (uint32_t)c->second->localTrust();
b.push_back((uint8_t)(localTrust >> 8U));
b.push_back((uint8_t)localTrust);
}
}
b.push_back(0);
b.push_back(0);
comp.resize((unsigned long)LZ4_COMPRESSBOUND(b.size()) + 8);
compSize = LZ4_compress_fast(reinterpret_cast<const char *>(b.data()), reinterpret_cast<char *>(comp.data() + 8), (int)b.size(), (int)(comp.size() - 8));
if (unlikely(compSize <= 0)) // shouldn't be possible
return Vector<uint8_t>();
const uint32_t uncompSize = (uint32_t)b.size();
Utils::storeBigEndian(comp.data(), uncompSize);
Utils::storeBigEndian(comp.data() + 4, Utils::fnv1a32(b.data(), (unsigned int)uncompSize));
compSize += 8;
}
comp.resize((unsigned long)compSize);
comp.shrink_to_fit();
return comp;
}
int TrustStore::load(const Vector<uint8_t> &data)
{
if (data.size() < 8)
return -1;
const unsigned int uncompSize = Utils::loadBigEndian<uint32_t>(data.data());
if ((uncompSize == 0) || (uncompSize > (unsigned int)(data.size() * 128)))
return -1;
Vector<uint8_t> uncomp;
uncomp.resize(uncompSize);
if (LZ4_decompress_safe(reinterpret_cast<const char *>(data.data() + 8), reinterpret_cast<char *>(uncomp.data()), (int)(data.size() - 8), (int)uncompSize) != (int)uncompSize)
return -1;
const uint8_t *b = uncomp.data();
if (Utils::fnv1a32(b, (unsigned int)uncompSize) != Utils::loadBigEndian<uint32_t>(data.data() + 4))
return -1;
const uint8_t *const eof = b + uncompSize;
if (*(b++) != 0) // unrecognized version
return -1;
int readCount = 0;
for (;;) {
if ((b + 2) > eof)
break;
const uint32_t certDataSize = Utils::loadBigEndian<uint16_t>(b);
b += 2;
if (certDataSize == 0)
break;
if ((b + certDataSize + 2) > eof) // certificate length + 2 bytes for trust flags
break;
Certificate c;
if (c.decode(b, (unsigned int)certDataSize)) {
b += certDataSize;
this->add(c, Utils::loadBigEndian<uint16_t>(b));
b += 2;
++readCount;
}
}
return readCount;
}
} // namespace ZeroTier

View file

@ -1,207 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_TRUSTSTORE_HPP
#define ZT_TRUSTSTORE_HPP
#include "Certificate.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Context.hpp"
#include "Fingerprint.hpp"
#include "Identity.hpp"
#include "Mutex.hpp"
#include "Peer.hpp"
#include "SHA512.hpp"
#include "SharedPtr.hpp"
namespace ZeroTier {
/**
* Certificate store and chain validator.
*
* WARNING: SharedPtr<Entry> entries returned from a trust store are valid
* only as long as the trust store exists. The trust store is a core object
* that lives as long as a Node, so this isn't an issue in the core, but it
* should be remembered when testing.
*
* This is because each Entry includes a reference to its parent's mutex and
* is synchronized by this mutex so its fields are safe to access while the
* parent trust store is being modified or synchronized.
*
* This also means entries can't be moved between TrustStore instances,
* hence there are no methods for doing that. There's only one instance in a
* node anyway.
*/
class TrustStore {
public:
/**
* An entry in the node certificate trust store
*/
class Entry {
friend class SharedPtr<TrustStore::Entry>;
friend class SharedPtr<const TrustStore::Entry>;
friend class TrustStore;
public:
/**
* @return Reference to held certificate
*/
ZT_INLINE const Certificate &certificate() const noexcept { return m_certificate; }
/**
* Get the local trust for this certificate
*
* This value may be changed dynamically by calls to update().
*
* @return Local trust bit mask
*/
ZT_INLINE unsigned int localTrust() const noexcept
{
RWMutex::RLock l(m_lock);
return m_localTrust;
}
/**
* Change the local trust of this entry
*
* @param lt New local trust bit mask
*/
ZT_INLINE void setLocalTrust(const unsigned int lt) noexcept
{
RWMutex::Lock l(m_lock);
m_localTrust = lt;
}
/**
* Get the error code for this certificate
*
* @return Error or ZT_CERTIFICATE_ERROR_NONE if none
*/
ZT_INLINE ZT_CertificateError error() const noexcept
{
RWMutex::RLock l(m_lock);
return m_error;
}
private:
Entry &operator=(const Entry &) { return *this; }
ZT_INLINE Entry(RWMutex &l, const Certificate &cert, const unsigned int lt) noexcept : __refCount(0), m_lock(l), m_certificate(cert), m_localTrust(lt), m_error(ZT_CERTIFICATE_ERROR_NONE), m_subjectDeprecated(false), m_onTrustPath(false) {}
std::atomic<int> __refCount;
RWMutex &m_lock;
const Certificate m_certificate;
unsigned int m_localTrust;
ZT_CertificateError m_error;
bool m_subjectDeprecated;
bool m_onTrustPath;
};
TrustStore();
~TrustStore();
/**
* Get certificate by certificate serial number
*
* Note that the error code should be checked. The certificate may be
* rejected and may still be in the store unless the store has been
* purged.
*
* @param serial SHA384 hash of certificate
* @return Entry or empty/nil if not found
*/
SharedPtr<Entry> get(const H384 &serial) const;
/**
* Get roots specified by root set certificates in the local store.
*
* If more than one certificate locally trusted as a root set specifies
* the root, it will be returned once (as per Map behavior) but the latest
* locator will be returned from among those available.
*
* @return Roots and the latest locator specified for each (if any)
*/
Map<Identity, SharedPtr<const Locator>> roots();
/**
* @param includeRejectedCertificates If true, also include certificates with error codes
* @return All certificates in asecending sort order by serial
*/
Vector<SharedPtr<Entry>> all(bool includeRejectedCertificates) const;
/**
* Add a certificate
*
* A copy is made so it's fine if the original is freed after this call. If
* the certificate already exists its local trust flags are updated.
*
* IMPORTANT: The caller MUST also call update() after calling add() one or
* more times to actually add and revalidate certificates and their signature
* chains.
*
* @param cert Certificate to add
*/
void add(const Certificate &cert, unsigned int localTrust);
/**
* Queue a certificate to be deleted
*
* Actual delete does not happen until the next update().
*
* @param serial Serial of certificate to delete
*/
void erase(const H384 &serial);
/**
* Validate all certificates and their certificate chains
*
* This also processes any certificates added with add() since the last call to update().
*
* @param clock Current time in milliseconds since epoch, or -1 to not check times on this pass
* @param purge If non-NULL, purge rejected certificates and return them in this vector (vector should be empty)
* @return True if there were changes
*/
bool update(int64_t clock, Vector<SharedPtr<Entry>> *purge);
/**
* Create a compressed binary version of certificates and their local trust
*
* @return Binary compressed certificates and local trust info
*/
Vector<uint8_t> save() const;
/**
* Decode a saved trust store
*
* Decoded certificates are added to the add queue, so update() must be
* called after this to actually apply them.
*
* @param data Data to decode
* @return Number of certificates or -1 if input is invalid
*/
int load(const Vector<uint8_t> &data);
private:
Map<H384, SharedPtr<Entry>> m_bySerial; // all certificates
Map<Blob<ZT_CERTIFICATE_MAX_PUBLIC_KEY_SIZE>, SharedPtr<Entry>> m_bySubjectUniqueId; // non-rejected certificates only
Map<Fingerprint, Vector<SharedPtr<Entry>>> m_bySubjectIdentity; // non-rejected certificates only
ForwardList<SharedPtr<Entry>> m_addQueue;
ForwardList<H384> m_deleteQueue;
RWMutex m_lock;
};
} // namespace ZeroTier
#endif

View file

@ -1,549 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "Utils.hpp"
#include "AES.hpp"
#include "Mutex.hpp"
#include "SHA512.hpp"
#include <time.h>
#ifdef __UNIX_LIKE__
#include <fcntl.h>
#include <sys/uio.h>
#include <unistd.h>
#endif
#ifdef __WINDOWS__
#include <intrin.h>
#include <wincrypt.h>
#endif
#ifdef ZT_ARCH_ARM_HAS_NEON
#ifdef __LINUX__
#include <asm/hwcap.h>
#include <sys/auxv.h>
#endif
#if defined(__FreeBSD__)
#include <elf.h>
#include <sys/auxv.h>
static inline long getauxval(int caps)
{
long hwcaps = 0;
elf_aux_info(caps, &hwcaps, sizeof(hwcaps));
return hwcaps;
}
#endif
// If these are not even defined, obviously they are not supported.
#ifndef HWCAP_AES
#define HWCAP_AES 0
#endif
#ifndef HWCAP_CRC32
#define HWCAP_CRC32 0
#endif
#ifndef HWCAP_PMULL
#define HWCAP_PMULL 0
#endif
#ifndef HWCAP_SHA1
#define HWCAP_SHA1 0
#endif
#ifndef HWCAP_SHA2
#define HWCAP_SHA2 0
#endif
#endif /* ZT_ARCH_ARM_HAS_NEON */
namespace ZeroTier {
namespace Utils {
#ifdef ZT_ARCH_ARM_HAS_NEON
ARMCapabilities::ARMCapabilities() noexcept
{
#ifdef __APPLE__
this->aes = true;
this->crc32 = true;
this->pmull = true;
this->sha1 = true;
this->sha2 = true;
#else
#ifdef __LINUX__
#ifdef HWCAP2_AES
if (sizeof(void *) == 4) {
const long hwcaps2 = getauxval(AT_HWCAP2);
this->aes = (hwcaps2 & HWCAP2_AES) != 0;
this->crc32 = (hwcaps2 & HWCAP2_CRC32) != 0;
this->pmull = (hwcaps2 & HWCAP2_PMULL) != 0;
this->sha1 = (hwcaps2 & HWCAP2_SHA1) != 0;
this->sha2 = (hwcaps2 & HWCAP2_SHA2) != 0;
}
else {
#endif
const long hwcaps = getauxval(AT_HWCAP);
this->aes = (hwcaps & HWCAP_AES) != 0;
this->crc32 = (hwcaps & HWCAP_CRC32) != 0;
this->pmull = (hwcaps & HWCAP_PMULL) != 0;
this->sha1 = (hwcaps & HWCAP_SHA1) != 0;
this->sha2 = (hwcaps & HWCAP_SHA2) != 0;
#ifdef HWCAP2_AES
}
#endif
#endif
#endif
}
const ARMCapabilities ARMCAP;
#endif /* ZT_ARCH_ARM_HAS_NEON */
#ifdef ZT_ARCH_X64
CPUIDRegisters::CPUIDRegisters() noexcept
{
uint32_t eax, ebx, ecx, edx;
#ifdef __WINDOWS__
int regs[4];
__cpuid(regs, 1);
eax = (uint32_t)regs[0];
ebx = (uint32_t)regs[1];
ecx = (uint32_t)regs[2];
edx = (uint32_t)regs[3];
#else
__asm__ __volatile__("cpuid" : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) : "a"(1), "c"(0));
#endif
rdrand = ((ecx & (1U << 30U)) != 0);
aes = (((ecx & (1U << 25U)) != 0) && ((ecx & (1U << 19U)) != 0) && ((ecx & (1U << 1U)) != 0));
avx = ((ecx & (1U << 25U)) != 0);
#ifdef __WINDOWS__
__cpuid(regs, 7);
eax = (uint32_t)regs[0];
ebx = (uint32_t)regs[1];
ecx = (uint32_t)regs[2];
edx = (uint32_t)regs[3];
#else
__asm__ __volatile__("cpuid" : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) : "a"(7), "c"(0));
#endif
vaes = aes && avx && ((ecx & (1U << 9U)) != 0);
vpclmulqdq = aes && avx && ((ecx & (1U << 10U)) != 0);
avx2 = avx && ((ebx & (1U << 5U)) != 0);
avx512f = avx && ((ebx & (1U << 16U)) != 0);
sha = ((ebx & (1U << 29U)) != 0);
fsrm = ((edx & (1U << 4U)) != 0);
}
const CPUIDRegisters CPUID;
#endif /* ZT_ARCH_X64 */
const std::bad_alloc BadAllocException;
const std::out_of_range OutOfRangeException("access out of range");
const uint64_t ZERO256[4] = { 0, 0, 0, 0 };
const char HEXCHARS[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
const uint64_t s_mapNonce = getSecureRandomU64();
bool secureEq(const void *const a, const void *const b, const unsigned int len) noexcept
{
uint8_t diff = 0;
for (unsigned int i = 0; i < len; ++i)
diff |= ((reinterpret_cast<const uint8_t *>(a))[i] ^ (reinterpret_cast<const uint8_t *>(b))[i]);
return (diff == 0);
}
void burn(volatile void *const ptr, const unsigned int len)
{
static volatile uintptr_t foo = 0;
Utils::zero((void *)ptr, len);
// Force compiler not to optimize this function out by taking a volatile
// parameter and also updating a volatile variable.
foo += (uintptr_t)len ^ (uintptr_t) reinterpret_cast<volatile uint8_t *>(ptr)[0];
}
static unsigned long s_decimalRecursive(unsigned long n, char *s)
{
if (n == 0)
return 0;
unsigned long pos = s_decimalRecursive(n / 10, s);
if (pos >= 22) // sanity check,should be impossible
pos = 22;
s[pos] = (char)('0' + (n % 10));
return pos + 1;
}
char *decimal(unsigned long n, char s[24]) noexcept
{
if (n == 0) {
s[0] = '0';
s[1] = (char)0;
return s;
}
s[s_decimalRecursive(n, s)] = (char)0;
return s;
}
char *hex(uint64_t i, char buf[17]) noexcept
{
if (i != 0) {
char *p = nullptr;
for (int b = 60; b >= 0; b -= 4) {
const unsigned int nyb = (unsigned int)(i >> (unsigned int)b) & 0xfU;
if (p) {
*(p++) = HEXCHARS[nyb];
}
else if (nyb != 0) {
p = buf;
*(p++) = HEXCHARS[nyb];
}
}
*p = 0;
return buf;
}
else {
buf[0] = '0';
buf[1] = 0;
return buf;
}
}
uint64_t unhex(const char *s) noexcept
{
uint64_t n = 0;
if (s) {
int k = 0;
while (k < 16) {
char hc = *(s++);
if (!hc)
break;
uint8_t c = 0;
if ((hc >= 48) && (hc <= 57))
c = (uint8_t)hc - 48;
else if ((hc >= 97) && (hc <= 102))
c = (uint8_t)hc - 87;
else if ((hc >= 65) && (hc <= 70))
c = (uint8_t)hc - 55;
n <<= 4U;
n |= (uint64_t)c;
++k;
}
}
return n;
}
char *hex(const void *d, unsigned int l, char *s) noexcept
{
char *const save = s;
for (unsigned int i = 0; i < l; ++i) {
const unsigned int b = reinterpret_cast<const uint8_t *>(d)[i];
*(s++) = HEXCHARS[b >> 4U];
*(s++) = HEXCHARS[b & 0xfU];
}
*s = (char)0;
return save;
}
unsigned int unhex(const char *h, unsigned int hlen, void *buf, unsigned int buflen) noexcept
{
unsigned int l = 0;
const char *hend = h + hlen;
while (l < buflen) {
if (h == hend)
break;
uint8_t hc = *(reinterpret_cast<const uint8_t *>(h++));
if (!hc)
break;
uint8_t c = 0;
if ((hc >= 48) && (hc <= 57))
c = hc - 48;
else if ((hc >= 97) && (hc <= 102))
c = hc - 87;
else if ((hc >= 65) && (hc <= 70))
c = hc - 55;
if (h == hend)
break;
hc = *(reinterpret_cast<const uint8_t *>(h++));
if (!hc)
break;
c <<= 4U;
if ((hc >= 48) && (hc <= 57))
c |= hc - 48;
else if ((hc >= 97) && (hc <= 102))
c |= hc - 87;
else if ((hc >= 65) && (hc <= 70))
c |= hc - 55;
reinterpret_cast<uint8_t *>(buf)[l++] = c;
}
return l;
}
#define ZT_GETSECURERANDOM_STATE_SIZE 64
#define ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR 1048576
#if defined(__GNUC__) && defined(ZT_ARCH_X64)
__attribute__((__target__("sse,sse2,rdrnd")))
#endif
void getSecureRandom(void *const buf, unsigned int bytes) noexcept
{
static Mutex globalLock;
static bool initialized = false;
static uint64_t randomState[ZT_GETSECURERANDOM_STATE_SIZE];
static unsigned int randomByteCounter = ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR; // init on first run
static AES randomGen;
Mutex::Lock gl(globalLock);
// Re-initialize the PRNG every ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR bytes. Note that
// if 'bytes' is larger than ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR we can generate more
// than this, but this isn't an issue. ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR could be
// much larger if we wanted and this would still be safe.
randomByteCounter += bytes;
if (unlikely(randomByteCounter >= ZT_GETSECURERANDOM_ITERATIONS_PER_GENERATOR)) {
randomByteCounter = 0;
if (unlikely(!initialized)) {
initialized = true;
Utils::zero<sizeof(randomState)>(randomState);
#ifdef __WINDOWS__
HCRYPTPROV cryptProvider = NULL;
if (!CryptAcquireContextA(&cryptProvider, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT | CRYPT_SILENT)) {
fprintf(stderr, "FATAL: Utils::getSecureRandom() unable to obtain WinCrypt context!\r\n");
exit(1);
}
if (!CryptGenRandom(cryptProvider, (DWORD)sizeof(randomState), (BYTE *)randomState)) {
fprintf(stderr, "FATAL: Utils::getSecureRandom() CryptGenRandom failed!\r\n");
exit(1);
}
CryptReleaseContext(cryptProvider, 0);
#else
int devURandomFd = ::open("/dev/urandom", O_RDONLY);
if (devURandomFd < 0) {
fprintf(stderr, "FATAL: Utils::getSecureRandom() unable to open /dev/urandom\n");
exit(1);
}
if ((long)::read(devURandomFd, randomState, sizeof(randomState)) != (long)sizeof(randomState)) {
::close(devURandomFd);
fprintf(stderr, "FATAL: Utils::getSecureRandom() unable to read from /dev/urandom\n");
exit(1);
}
close(devURandomFd);
#endif
#ifdef __UNIX_LIKE__
randomState[0] += (uint64_t)getpid();
randomState[1] += (uint64_t)getppid();
#endif
#ifdef ZT_ARCH_X64
if (CPUID.rdrand) {
uint64_t tmp = 0;
for (unsigned long i = 0; i < ZT_GETSECURERANDOM_STATE_SIZE; ++i) {
_rdrand64_step((unsigned long long *)&tmp);
randomState[i] ^= tmp;
}
}
#endif
}
// Initialize or re-initialize generator by hashing the full state,
// replacing the first 64 bytes with this hash, and then re-initializing
// AES with the first 32 bytes.
randomState[0] += (uint64_t)time(nullptr);
SHA512(randomState, randomState, sizeof(randomState));
randomGen.init(randomState);
}
// Generate random bytes using AES and bytes 32-48 of randomState as an in-place
// AES-CTR counter. Counter can be machine endian; we don't care about portability
// for a random generator.
uint64_t *const ctr = randomState + 4;
uint8_t *out = reinterpret_cast<uint8_t *>(buf);
while (bytes >= 16) {
++*ctr;
randomGen.encrypt(ctr, out);
out += 16;
bytes -= 16;
}
if (bytes > 0) {
uint8_t tmp[16];
++*ctr;
randomGen.encrypt(ctr, tmp);
for (unsigned int i = 0; i < bytes; ++i)
out[i] = tmp[i];
Utils::burn(tmp, sizeof(tmp)); // don't leave used cryptographic randomness lying around!
}
}
uint64_t getSecureRandomU64() noexcept
{
uint64_t tmp;
getSecureRandom(&tmp, sizeof(tmp));
return tmp;
}
int b32e(const uint8_t *data, int length, char *result, int bufSize) noexcept
{
if (length < 0 || length > (1 << 28U)) {
result[0] = (char)0;
return -1;
}
int count = 0;
if (length > 0) {
int buffer = data[0];
int next = 1;
int bitsLeft = 8;
while (count < bufSize && (bitsLeft > 0 || next < length)) {
if (bitsLeft < 5) {
if (next < length) {
buffer <<= 8U;
buffer |= data[next++] & 0xffU;
bitsLeft += 8;
}
else {
int pad = 5 - bitsLeft;
buffer <<= pad;
bitsLeft += pad;
}
}
int index = 0x1f & (buffer >> (unsigned int)(bitsLeft - 5));
bitsLeft -= 5;
result[count++] = "abcdefghijklmnopqrstuvwxyz234567"[index];
}
}
if (count < bufSize) {
result[count] = (char)0;
return count;
}
result[0] = (char)0;
return -1;
}
int b32d(const char *encoded, uint8_t *result, int bufSize) noexcept
{
int buffer = 0;
int bitsLeft = 0;
int count = 0;
for (const uint8_t *ptr = (const uint8_t *)encoded; count < bufSize && *ptr; ++ptr) {
uint8_t ch = *ptr;
if (ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' || ch == '-' || ch == '.') {
continue;
}
buffer <<= 5;
if (ch == '0') {
ch = 'O';
}
else if (ch == '1') {
ch = 'L';
}
else if (ch == '8') {
ch = 'B';
}
if ((ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z')) {
ch = (ch & 0x1f) - 1;
}
else if (ch >= '2' && ch <= '7') {
ch -= '2' - 26;
}
else {
return -1;
}
buffer |= ch;
bitsLeft += 5;
if (bitsLeft >= 8) {
result[count++] = buffer >> (bitsLeft - 8);
bitsLeft -= 8;
}
}
if (count < bufSize)
result[count] = (uint8_t)0;
return count;
}
uint64_t random() noexcept
{
static volatile uint64_t s_s0 = getSecureRandomU64();
static volatile uint64_t s_s1 = getSecureRandomU64();
static volatile uint64_t s_s2 = getSecureRandomU64();
static volatile uint64_t s_s3 = getSecureRandomU64();
// https://en.wikipedia.org/wiki/Xorshift#xoshiro256**
uint64_t s0 = s_s0;
uint64_t s1 = s_s1;
uint64_t s2 = s_s2;
uint64_t s3 = s_s3;
const uint64_t s1x5 = s1 * 5ULL;
const uint64_t result = ((s1x5 << 7U) | (s1x5 >> 57U)) * 9ULL;
const uint64_t t = s1 << 17U;
s2 ^= s0;
s3 ^= s1;
s1 ^= s2;
s0 ^= s3;
s2 ^= t;
s3 = ((s3 << 45U) | (s3 >> 19U));
s_s0 = s0;
s_s1 = s1;
s_s2 = s2;
s_s3 = s3;
return result;
}
bool scopy(char *const dest, const unsigned int len, const char *const src) noexcept
{
if (unlikely((len == 0) || (dest == nullptr))) {
return false;
}
if (unlikely(src == nullptr)) {
*dest = (char)0;
return true;
}
unsigned int i = 0;
for (;;) {
if (i >= len) {
dest[len - 1] = 0;
return false;
}
if ((dest[i] = src[i]) == 0) {
return true;
}
++i;
}
}
uint32_t fnv1a32(const void *const restrict data, const unsigned int len) noexcept
{
uint32_t h = 0x811c9dc5;
const uint32_t p = 0x01000193;
for (unsigned int i = 0; i < len; ++i)
h = (h ^ (uint32_t) reinterpret_cast<const uint8_t *>(data)[i]) * p;
return h;
}
} // namespace Utils
} // namespace ZeroTier

View file

@ -1,680 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_UTILS_HPP
#define ZT_UTILS_HPP
#include "Constants.hpp"
#include <algorithm>
#include <memory>
#include <stdarg.h>
#include <stddef.h>
#include <stdexcept>
#include <utility>
namespace ZeroTier {
namespace Utils {
#ifndef __WINDOWS__
#include <sys/mman.h>
#endif
// Macros to convert endian-ness at compile time for constants.
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define ZT_CONST_TO_BE_UINT16(x) ((uint16_t)((uint16_t)((uint16_t)(x) << 8U) | (uint16_t)((uint16_t)(x) >> 8U)))
#define ZT_CONST_TO_BE_UINT64(x) \
((((uint64_t)(x)&0x00000000000000ffULL) << 56U) | (((uint64_t)(x)&0x000000000000ff00ULL) << 40U) | (((uint64_t)(x)&0x0000000000ff0000ULL) << 24U) | (((uint64_t)(x)&0x00000000ff000000ULL) << 8U) | (((uint64_t)(x)&0x000000ff00000000ULL) >> 8U) | (((uint64_t)(x)&0x0000ff0000000000ULL) >> 24U) | (((uint64_t)(x)&0x00ff000000000000ULL) >> 40U) \
| (((uint64_t)(x)&0xff00000000000000ULL) >> 56U))
#else
#define ZT_CONST_TO_BE_UINT16(x) ((uint16_t)(x))
#define ZT_CONST_TO_BE_UINT64(x) ((uint64_t)(x))
#endif
#define ZT_ROR64(x, r) (((x) >> (r)) | ((x) << (64 - (r))))
#define ZT_ROL64(x, r) (((x) << (r)) | ((x) >> (64 - (r))))
#define ZT_ROR32(x, r) (((x) >> (r)) | ((x) << (32 - (r))))
#define ZT_ROL32(x, r) (((x) << (r)) | ((x) >> (32 - (r))))
#ifdef ZT_ARCH_ARM_HAS_NEON
struct ARMCapabilities {
ARMCapabilities() noexcept;
bool aes, crc32, pmull, sha1, sha2;
};
extern const ARMCapabilities ARMCAP;
#endif
#ifdef ZT_ARCH_X64
struct CPUIDRegisters {
CPUIDRegisters() noexcept;
bool rdrand, aes, avx, vaes, vpclmulqdq, avx2, avx512f, sha, fsrm;
};
extern const CPUIDRegisters CPUID;
#endif
extern const std::bad_alloc BadAllocException;
extern const std::out_of_range OutOfRangeException;
/**
* 256 zero bits / 32 zero bytes
*/
extern const uint64_t ZERO256[4];
/**
* Hexadecimal characters 0-f
*/
extern const char HEXCHARS[16];
/**
* A random integer generated at startup for Map's hash bucket calculation.
*/
extern const uint64_t s_mapNonce;
/**
* Perform a time-invariant binary comparison
*
* @param a First binary string
* @param b Second binary string
* @param len Length of strings
* @return True if strings are equal
*/
bool secureEq(const void *a, const void *b, unsigned int len) noexcept;
/**
* Be absolutely sure to zero memory
*
* This uses a few tricks to make sure the compiler doesn't optimize it
* out, including passing the memory as volatile.
*
* @param ptr Memory to zero
* @param len Length of memory in bytes
*/
void burn(volatile void *ptr, unsigned int len);
/**
* @param n Number to convert
* @param s Buffer, at least 24 bytes in size
* @return String containing 'n' in base 10 form
*/
char *decimal(unsigned long n, char s[24]) noexcept;
/**
* Convert an unsigned integer into hex
*
* @param i Any unsigned integer
* @param s Buffer to receive hex, must be at least (2*sizeof(i))+1 in size or overflow will occur.
* @return Pointer to s containing hex string with trailing zero byte
*/
char *hex(uint64_t i, char buf[17]) noexcept;
/**
* Decode an unsigned integer in hex format
*
* @param s String to decode, non-hex chars are ignored
* @return Unsigned integer
*/
uint64_t unhex(const char *s) noexcept;
/**
* Convert a byte array into hex
*
* @param d Bytes
* @param l Length of bytes
* @param s String buffer, must be at least (l*2)+1 in size or overflow will occur
* @return Pointer to filled string buffer
*/
char *hex(const void *d, unsigned int l, char *s) noexcept;
/**
* Decode a hex string
*
* @param h Hex C-string (non hex chars are ignored)
* @param hlen Maximum length of string (will stop at terminating zero)
* @param buf Output buffer
* @param buflen Length of output buffer
* @return Number of written bytes
*/
unsigned int unhex(const char *h, unsigned int hlen, void *buf, unsigned int buflen) noexcept;
/**
* Generate secure random bytes
*
* This will try to use whatever OS sources of entropy are available. It's
* guarded by an internal mutex so it's thread-safe.
*
* @param buf Buffer to fill
* @param bytes Number of random bytes to generate
*/
void getSecureRandom(void *buf, unsigned int bytes) noexcept;
/**
* @return Secure random 64-bit integer
*/
uint64_t getSecureRandomU64() noexcept;
/**
* Encode string to base32
*
* @param data Binary data to encode
* @param length Length of data in bytes
* @param result Result buffer
* @param bufSize Size of result buffer
* @return Number of bytes written
*/
int b32e(const uint8_t *data, int length, char *result, int bufSize) noexcept;
/**
* Decode base32 string
*
* @param encoded C-string in base32 format (non-base32 characters are ignored)
* @param result Result buffer
* @param bufSize Size of result buffer
* @return Number of bytes written or -1 on error
*/
int b32d(const char *encoded, uint8_t *result, int bufSize) noexcept;
/**
* Get a non-cryptographic random integer.
*
* This should never be used for cryptographic use cases, not even for choosing
* message nonce/IV values if they should not repeat. It should only be used when
* a fast and potentially "dirty" random source is needed.
*/
uint64_t random() noexcept;
/**
* Perform a safe C string copy, ALWAYS null-terminating the result
*
* This will never ever EVER result in dest[] not being null-terminated
* regardless of any input parameter (other than len==0 which is invalid).
*
* @param dest Destination buffer (must not be NULL)
* @param len Length of dest[] (if zero, false is returned and nothing happens)
* @param src Source string (if NULL, dest will receive a zero-length string and true is returned)
* @return True on success, false on overflow (buffer will still be 0-terminated)
*/
bool scopy(char *dest, unsigned int len, const char *src) noexcept;
/**
* Check if a buffer's contents are all zero
*/
static ZT_INLINE bool allZero(const void *const b, const unsigned int l) noexcept
{
for (unsigned int i = 0; i < l; ++i) {
if (reinterpret_cast<const uint8_t *>(b)[i] != 0)
return false;
}
return true;
}
/**
* Wrapper around reentrant strtok functions, which differ in name by platform
*
* @param str String to tokenize or NULL for subsequent calls
* @param delim Delimiter
* @param saveptr Pointer to pointer where function can save state
* @return Next token or NULL if none
*/
static ZT_INLINE char *stok(char *str, const char *delim, char **saveptr) noexcept
{
#ifdef __WINDOWS__
return strtok_s(str, delim, saveptr);
#else
return strtok_r(str, delim, saveptr);
#endif
}
static ZT_INLINE unsigned int strToUInt(const char *s) noexcept { return (unsigned int)strtoul(s, nullptr, 10); }
static ZT_INLINE unsigned long long hexStrToU64(const char *s) noexcept
{
#ifdef __WINDOWS__
return (unsigned long long)_strtoui64(s, nullptr, 16);
#else
return strtoull(s, nullptr, 16);
#endif
}
#ifdef __GNUC__
static ZT_INLINE unsigned int countBits(const uint8_t v) noexcept { return (unsigned int)__builtin_popcount((unsigned int)v); }
static ZT_INLINE unsigned int countBits(const uint16_t v) noexcept { return (unsigned int)__builtin_popcount((unsigned int)v); }
static ZT_INLINE unsigned int countBits(const uint32_t v) noexcept { return (unsigned int)__builtin_popcountl((unsigned long)v); }
static ZT_INLINE unsigned int countBits(const uint64_t v) noexcept { return (unsigned int)__builtin_popcountll((unsigned long long)v); }
#else
template <typename T> static ZT_INLINE unsigned int countBits(T v) noexcept
{
v = v - ((v >> 1) & (T) ~(T)0 / 3);
v = (v & (T) ~(T)0 / 15 * 3) + ((v >> 2) & (T) ~(T)0 / 15 * 3);
v = (v + (v >> 4)) & (T) ~(T)0 / 255 * 15;
return (unsigned int)((v * ((~((T)0)) / ((T)255))) >> ((sizeof(T) - 1) * 8));
}
#endif
/**
* Unconditionally swap bytes regardless of host byte order
*
* @param n Integer to swap
* @return Integer with bytes reversed
*/
static ZT_INLINE uint64_t swapBytes(const uint64_t n) noexcept
{
#ifdef __GNUC__
return __builtin_bswap64(n);
#else
#ifdef _MSC_VER
return (uint64_t)_byteswap_uint64((unsigned __int64)n);
#else
return (((n & 0x00000000000000ffULL) << 56) | ((n & 0x000000000000ff00ULL) << 40) | ((n & 0x0000000000ff0000ULL) << 24) | ((n & 0x00000000ff000000ULL) << 8) | ((n & 0x000000ff00000000ULL) >> 8) | ((n & 0x0000ff0000000000ULL) >> 24) | ((n & 0x00ff000000000000ULL) >> 40) | ((n & 0xff00000000000000ULL) >> 56));
#endif
#endif
}
/**
* Unconditionally swap bytes regardless of host byte order
*
* @param n Integer to swap
* @return Integer with bytes reversed
*/
static ZT_INLINE uint32_t swapBytes(const uint32_t n) noexcept
{
#if defined(__GNUC__)
return __builtin_bswap32(n);
#else
#ifdef _MSC_VER
return (uint32_t)_byteswap_ulong((unsigned long)n);
#else
return htonl(n);
#endif
#endif
}
/**
* Unconditionally swap bytes regardless of host byte order
*
* @param n Integer to swap
* @return Integer with bytes reversed
*/
static ZT_INLINE uint16_t swapBytes(const uint16_t n) noexcept
{
#if defined(__GNUC__)
return __builtin_bswap16(n);
#else
#ifdef _MSC_VER
return (uint16_t)_byteswap_ushort((unsigned short)n);
#else
return htons(n);
#endif
#endif
}
// These are helper adapters to load and swap integer types special cased by size
// to work with all typedef'd variants, signed/unsigned, etc.
template <typename I, unsigned int S> class _swap_bytes_bysize;
template <typename I> class _swap_bytes_bysize<I, 1> {
public:
static ZT_INLINE I s(const I n) noexcept { return n; }
};
template <typename I> class _swap_bytes_bysize<I, 2> {
public:
static ZT_INLINE I s(const I n) noexcept { return (I)swapBytes((uint16_t)n); }
};
template <typename I> class _swap_bytes_bysize<I, 4> {
public:
static ZT_INLINE I s(const I n) noexcept { return (I)swapBytes((uint32_t)n); }
};
template <typename I> class _swap_bytes_bysize<I, 8> {
public:
static ZT_INLINE I s(const I n) noexcept { return (I)swapBytes((uint64_t)n); }
};
template <typename I, unsigned int S> class _load_be_bysize;
template <typename I> class _load_be_bysize<I, 1> {
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept { return p[0]; }
};
template <typename I> class _load_be_bysize<I, 2> {
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept { return (I)(((unsigned int)p[0] << 8U) | (unsigned int)p[1]); }
};
template <typename I> class _load_be_bysize<I, 4> {
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept { return (I)(((uint32_t)p[0] << 24U) | ((uint32_t)p[1] << 16U) | ((uint32_t)p[2] << 8U) | (uint32_t)p[3]); }
};
template <typename I> class _load_be_bysize<I, 8> {
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept { return (I)(((uint64_t)p[0] << 56U) | ((uint64_t)p[1] << 48U) | ((uint64_t)p[2] << 40U) | ((uint64_t)p[3] << 32U) | ((uint64_t)p[4] << 24U) | ((uint64_t)p[5] << 16U) | ((uint64_t)p[6] << 8U) | (uint64_t)p[7]); }
};
template <typename I, unsigned int S> class _load_le_bysize;
template <typename I> class _load_le_bysize<I, 1> {
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept { return p[0]; }
};
template <typename I> class _load_le_bysize<I, 2> {
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept { return (I)((unsigned int)p[0] | ((unsigned int)p[1] << 8U)); }
};
template <typename I> class _load_le_bysize<I, 4> {
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept { return (I)((uint32_t)p[0] | ((uint32_t)p[1] << 8U) | ((uint32_t)p[2] << 16U) | ((uint32_t)p[3] << 24U)); }
};
template <typename I> class _load_le_bysize<I, 8> {
public:
static ZT_INLINE I l(const uint8_t *const p) noexcept { return (I)((uint64_t)p[0] | ((uint64_t)p[1] << 8U) | ((uint64_t)p[2] << 16U) | ((uint64_t)p[3] << 24U) | ((uint64_t)p[4] << 32U) | ((uint64_t)p[5] << 40U) | ((uint64_t)p[6] << 48U) | ((uint64_t)p[7]) << 56U); }
};
/**
* Convert any signed or unsigned integer type to big-endian ("network") byte order
*
* @tparam I Integer type (usually inferred)
* @param n Value to convert
* @return Value in big-endian order
*/
template <typename I> static ZT_INLINE I hton(const I n) noexcept
{
#if __BYTE_ORDER == __LITTLE_ENDIAN
return _swap_bytes_bysize<I, sizeof(I)>::s(n);
#else
return n;
#endif
}
/**
* Convert any signed or unsigned integer type to host byte order from big-endian ("network") byte order
*
* @tparam I Integer type (usually inferred)
* @param n Value to convert
* @return Value in host byte order
*/
template <typename I> static ZT_INLINE I ntoh(const I n) noexcept
{
#if __BYTE_ORDER == __LITTLE_ENDIAN
return _swap_bytes_bysize<I, sizeof(I)>::s(n);
#else
return n;
#endif
}
/**
* Copy bits from memory into an integer type without modifying their order
*
* @tparam I Type to load
* @param p Byte stream, must be at least sizeof(I) in size
* @return Loaded raw integer
*/
template <typename I> static ZT_INLINE I loadMachineEndian(const void *const restrict p) noexcept
{
#ifdef ZT_NO_UNALIGNED_ACCESS
I tmp;
for (int i = 0; i < (int)sizeof(I); ++i)
reinterpret_cast<uint8_t *>(&tmp)[i] = reinterpret_cast<const uint8_t *>(p)[i];
return tmp;
#else
return *reinterpret_cast<const I *>(p);
#endif
}
/**
* Copy bits from memory into an integer type without modifying their order
*
* @tparam I Type to store
* @param p Byte array (must be at least sizeof(I))
* @param i Integer to store
*/
template <typename I> static ZT_INLINE void storeMachineEndian(void *const restrict p, const I i) noexcept
{
#ifdef ZT_NO_UNALIGNED_ACCESS
for (unsigned int k = 0; k < sizeof(I); ++k)
reinterpret_cast<uint8_t *>(p)[k] = reinterpret_cast<const uint8_t *>(&i)[k];
#else
*reinterpret_cast<I *>(p) = i;
#endif
}
/**
* Decode a big-endian value from a byte stream
*
* @tparam I Type to decode (should be unsigned e.g. uint32_t or uint64_t)
* @param p Byte stream, must be at least sizeof(I) in size
* @return Decoded integer
*/
template <typename I> static ZT_INLINE I loadBigEndian(const void *const restrict p) noexcept
{
#ifdef ZT_NO_UNALIGNED_ACCESS
return _load_be_bysize<I, sizeof(I)>::l(reinterpret_cast<const uint8_t *>(p));
#else
return ntoh(*reinterpret_cast<const I *>(p));
#endif
}
/**
* Save an integer in big-endian format
*
* @tparam I Integer type to store (usually inferred)
* @param p Byte stream to write (must be at least sizeof(I))
* #param i Integer to write
*/
template <typename I> static ZT_INLINE void storeBigEndian(void *const restrict p, I i) noexcept
{
#ifdef ZT_NO_UNALIGNED_ACCESS
storeMachineEndian(p, hton(i));
#else
*reinterpret_cast<I *>(p) = hton(i);
#endif
}
/**
* Decode a little-endian value from a byte stream
*
* @tparam I Type to decode
* @param p Byte stream, must be at least sizeof(I) in size
* @return Decoded integer
*/
template <typename I> static ZT_INLINE I loadLittleEndian(const void *const restrict p) noexcept
{
#if __BYTE_ORDER == __BIG_ENDIAN || defined(ZT_NO_UNALIGNED_ACCESS)
return _load_le_bysize<I, sizeof(I)>::l(reinterpret_cast<const uint8_t *>(p));
#else
return *reinterpret_cast<const I *>(p);
#endif
}
/**
* Save an integer in little-endian format
*
* @tparam I Integer type to store (usually inferred)
* @param p Byte stream to write (must be at least sizeof(I))
* #param i Integer to write
*/
template <typename I> static ZT_INLINE void storeLittleEndian(void *const restrict p, const I i) noexcept
{
#if __BYTE_ORDER == __BIG_ENDIAN
storeMachineEndian(p, _swap_bytes_bysize<I, sizeof(I)>::s(i));
#else
#ifdef ZT_NO_UNALIGNED_ACCESS
storeMachineEndian(p, i);
#else
*reinterpret_cast<I *>(p) = i;
#endif
#endif
}
/**
* Copy memory block whose size is known at compile time.
*
* @tparam L Size of memory
* @param dest Destination memory
* @param src Source memory
*/
template <unsigned long L> static ZT_INLINE void copy(void *dest, const void *src) noexcept
{
#if defined(ZT_ARCH_X64) && defined(__GNUC__)
uintptr_t l = L;
__asm__ __volatile__("cld ; rep movsb" : "+c"(l), "+S"(src), "+D"(dest)::"memory");
#else
memcpy(dest, src, L);
#endif
}
/**
* Copy memory block whose size is known at run time
*
* @param dest Destination memory
* @param src Source memory
* @param len Bytes to copy
*/
static ZT_INLINE void copy(void *dest, const void *src, unsigned long len) noexcept
{
#if defined(ZT_ARCH_X64) && defined(__GNUC__)
__asm__ __volatile__("cld ; rep movsb" : "+c"(len), "+S"(src), "+D"(dest)::"memory");
#else
memcpy(dest, src, len);
#endif
}
/**
* Zero memory block whose size is known at compile time
*
* @tparam L Size in bytes
* @param dest Memory to zero
*/
template <unsigned long L> static ZT_INLINE void zero(void *dest) noexcept
{
#if defined(ZT_ARCH_X64) && defined(__GNUC__)
uintptr_t l = L;
__asm__ __volatile__("cld ; rep stosb" : "+c"(l), "+D"(dest) : "a"(0) : "memory");
#else
memset(dest, 0, L);
#endif
}
/**
* Zero memory block whose size is known at run time
*
* @param dest Memory to zero
* @param len Size in bytes
*/
static ZT_INLINE void zero(void *dest, unsigned long len) noexcept
{
#if defined(ZT_ARCH_X64) && defined(__GNUC__)
__asm__ __volatile__("cld ; rep stosb" : "+c"(len), "+D"(dest) : "a"(0) : "memory");
#else
memset(dest, 0, len);
#endif
}
/**
* Zero memory block whose size is known at compile time
*
* @tparam L Size in bytes
* @param dest Memory to zero
*/
template <unsigned long L, uint8_t B> static ZT_INLINE void fill(void *dest) noexcept
{
#if defined(ZT_ARCH_X64) && defined(__GNUC__)
uintptr_t l = L;
__asm__ __volatile__("cld ; rep stosb" : "+c"(l), "+D"(dest) : "a"(B) : "memory");
#else
memset(dest, B, L);
#endif
}
/**
* Zero memory block whose size is known at run time
*
* @param dest Memory to zero
* @param len Size in bytes
*/
template <uint8_t B> static ZT_INLINE void fill(void *dest, unsigned long len) noexcept
{
#if defined(ZT_ARCH_X64) && defined(__GNUC__)
__asm__ __volatile__("cld ; rep stosb" : "+c"(len), "+D"(dest) : "a"(B) : "memory");
#else
memset(dest, B, len);
#endif
}
/**
* Compute 32-bit FNV-1a checksum
*
* See: http://www.isthe.com/chongo/tech/comp/fnv/
*
* @param data Data to checksum
* @param len Length of data
* @return FNV1a checksum
*/
uint32_t fnv1a32(const void *restrict data, unsigned int len) noexcept;
/**
* Mix bits in a 64-bit integer (non-cryptographic, for hash tables)
*
* https://nullprogram.com/blog/2018/07/31/
*
* @param x Integer to mix
* @return Hashed value
*/
static ZT_INLINE uint64_t hash64(uint64_t x) noexcept
{
x ^= x >> 30U;
x *= 0xbf58476d1ce4e5b9ULL;
x ^= x >> 27U;
x *= 0x94d049bb133111ebULL;
x ^= x >> 31U;
return x;
}
/**
* Mix bits in a 32-bit integer (non-cryptographic, for hash tables)
*
* https://nullprogram.com/blog/2018/07/31/
*
* @param x Integer to mix
* @return Hashed value
*/
static ZT_INLINE uint32_t hash32(uint32_t x) noexcept
{
x ^= x >> 16U;
x *= 0x7feb352dU;
x ^= x >> 15U;
x *= 0x846ca68bU;
x ^= x >> 16U;
return x;
}
} // namespace Utils
} // namespace ZeroTier
#endif

View file

@ -1,897 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "VL1.hpp"
#include "AES.hpp"
#include "Context.hpp"
#include "Expect.hpp"
#include "Identity.hpp"
#include "LZ4.hpp"
#include "Path.hpp"
#include "Peer.hpp"
#include "Poly1305.hpp"
#include "SHA512.hpp"
#include "Salsa20.hpp"
#include "SelfAwareness.hpp"
#include "Topology.hpp"
#include "VL2.hpp"
namespace ZeroTier {
namespace {
ZT_INLINE const Identity &identityFromPeerPtr(const SharedPtr<Peer> &p) { return (p) ? p->identity() : Identity::NIL; }
struct p_SalsaPolyCopyFunction {
Salsa20 s20;
Poly1305 poly1305;
unsigned int hdrRemaining;
ZT_INLINE p_SalsaPolyCopyFunction(const void *salsaKey, const void *salsaIv) : s20(salsaKey, salsaIv), poly1305(), hdrRemaining(ZT_PROTO_PACKET_ENCRYPTED_SECTION_START)
{
uint8_t macKey[ZT_POLY1305_KEY_SIZE];
s20.crypt12(Utils::ZERO256, macKey, ZT_POLY1305_KEY_SIZE);
poly1305.init(macKey);
}
ZT_INLINE void operator()(void *dest, const void *src, unsigned int len) noexcept
{
if (hdrRemaining != 0) {
unsigned int hdrBytes = (len > hdrRemaining) ? hdrRemaining : len;
Utils::copy(dest, src, hdrBytes);
hdrRemaining -= hdrBytes;
dest = reinterpret_cast<uint8_t *>(dest) + hdrBytes;
src = reinterpret_cast<const uint8_t *>(src) + hdrBytes;
len -= hdrBytes;
}
poly1305.update(src, len);
s20.crypt12(src, dest, len);
}
};
struct p_PolyCopyFunction {
Poly1305 poly1305;
unsigned int hdrRemaining;
ZT_INLINE p_PolyCopyFunction(const void *salsaKey, const void *salsaIv) : poly1305(), hdrRemaining(ZT_PROTO_PACKET_ENCRYPTED_SECTION_START)
{
uint8_t macKey[ZT_POLY1305_KEY_SIZE];
Salsa20(salsaKey, salsaIv).crypt12(Utils::ZERO256, macKey, ZT_POLY1305_KEY_SIZE);
poly1305.init(macKey);
}
ZT_INLINE void operator()(void *dest, const void *src, unsigned int len) noexcept
{
if (hdrRemaining != 0) {
unsigned int hdrBytes = (len > hdrRemaining) ? hdrRemaining : len;
Utils::copy(dest, src, hdrBytes);
hdrRemaining -= hdrBytes;
dest = reinterpret_cast<uint8_t *>(dest) + hdrBytes;
src = reinterpret_cast<const uint8_t *>(src) + hdrBytes;
len -= hdrBytes;
}
poly1305.update(src, len);
Utils::copy(dest, src, len);
}
};
} // anonymous namespace
VL1::VL1(const Context &ctx) : m_ctx(ctx) {}
void VL1::onRemotePacket(CallContext &cc, const int64_t localSocket, const InetAddress &fromAddr, SharedPtr<Buf> &data, const unsigned int len) noexcept
{
const SharedPtr<Path> path(m_ctx.topology->path(localSocket, fromAddr));
ZT_SPEW("%u bytes from %s (local socket %lld)", len, fromAddr.toString().c_str(), localSocket);
path->received(cc, len);
// NOTE: likely/unlikely are used here to highlight the most common code path
// for valid data packets. This may allow the compiler to generate very slightly
// faster code for that path.
try {
if (unlikely(len < ZT_PROTO_MIN_FRAGMENT_LENGTH))
return;
static_assert((ZT_PROTO_PACKET_ID_INDEX + sizeof(uint64_t)) < ZT_PROTO_MIN_FRAGMENT_LENGTH, "overflow");
const uint64_t packetId = Utils::loadMachineEndian<uint64_t>(data->unsafeData + ZT_PROTO_PACKET_ID_INDEX);
static_assert((ZT_PROTO_PACKET_DESTINATION_INDEX + ZT_ADDRESS_LENGTH) < ZT_PROTO_MIN_FRAGMENT_LENGTH, "overflow");
const Address destination(data->unsafeData + ZT_PROTO_PACKET_DESTINATION_INDEX);
if (destination != m_ctx.identity.address()) {
m_relay(cc, path, destination, data, len);
return;
}
// ----------------------------------------------------------------------------------------------------------------
// If we made it this far, the packet is at least MIN_FRAGMENT_LENGTH and is addressed to this node's ZT address
// ----------------------------------------------------------------------------------------------------------------
Buf::PacketVector pktv;
static_assert(ZT_PROTO_PACKET_FRAGMENT_INDICATOR_INDEX <= ZT_PROTO_MIN_FRAGMENT_LENGTH, "overflow");
if (data->unsafeData[ZT_PROTO_PACKET_FRAGMENT_INDICATOR_INDEX] == ZT_PROTO_PACKET_FRAGMENT_INDICATOR) {
// This looks like a fragment (excluding the head) of a larger packet.
static_assert(ZT_PROTO_PACKET_FRAGMENT_COUNTS < ZT_PROTO_MIN_FRAGMENT_LENGTH, "overflow");
const unsigned int totalFragments = (data->unsafeData[ZT_PROTO_PACKET_FRAGMENT_COUNTS] >> 4U) & 0x0fU;
const unsigned int fragmentNo = data->unsafeData[ZT_PROTO_PACKET_FRAGMENT_COUNTS] & 0x0fU;
switch (m_inputPacketAssembler.assemble(packetId, pktv, data, ZT_PROTO_PACKET_FRAGMENT_PAYLOAD_START_AT, len - ZT_PROTO_PACKET_FRAGMENT_PAYLOAD_START_AT, fragmentNo, totalFragments, cc.ticks, path)) {
case Defragmenter<ZT_MAX_PACKET_FRAGMENTS>::COMPLETE: break;
default:
// case Defragmenter<ZT_MAX_PACKET_FRAGMENTS>::OK:
// case Defragmenter<ZT_MAX_PACKET_FRAGMENTS>::ERR_DUPLICATE_FRAGMENT:
// case Defragmenter<ZT_MAX_PACKET_FRAGMENTS>::ERR_INVALID_FRAGMENT:
// case Defragmenter<ZT_MAX_PACKET_FRAGMENTS>::ERR_TOO_MANY_FRAGMENTS_FOR_PATH:
// case Defragmenter<ZT_MAX_PACKET_FRAGMENTS>::ERR_OUT_OF_MEMORY:
return;
}
}
else {
if (unlikely(len < ZT_PROTO_MIN_PACKET_LENGTH))
return;
static_assert(ZT_PROTO_PACKET_FLAGS_INDEX < ZT_PROTO_MIN_PACKET_LENGTH, "overflow");
if ((data->unsafeData[ZT_PROTO_PACKET_FLAGS_INDEX] & ZT_PROTO_FLAG_FRAGMENTED) != 0) {
// This is the head of a series of fragments that we may or may not already have.
switch (m_inputPacketAssembler.assemble(
packetId, pktv, data,
0, // fragment index is 0 since this is the head
len,
0, // always the zero'eth fragment
0, // this is specified in fragments, not in the head
cc.ticks, path)) {
case Defragmenter<ZT_MAX_PACKET_FRAGMENTS>::COMPLETE: break;
default:
// case Defragmenter<ZT_MAX_PACKET_FRAGMENTS>::OK:
// case Defragmenter<ZT_MAX_PACKET_FRAGMENTS>::ERR_DUPLICATE_FRAGMENT:
// case Defragmenter<ZT_MAX_PACKET_FRAGMENTS>::ERR_INVALID_FRAGMENT:
// case Defragmenter<ZT_MAX_PACKET_FRAGMENTS>::ERR_TOO_MANY_FRAGMENTS_FOR_PATH:
// case Defragmenter<ZT_MAX_PACKET_FRAGMENTS>::ERR_OUT_OF_MEMORY:
return;
}
}
else {
// This is a single whole packet with no fragments.
Buf::Slice s = pktv.push();
s.b.swap(data);
s.s = 0;
s.e = len;
}
}
// ----------------------------------------------------------------------------------------------------------------
// If we made it this far without returning, a packet is fully assembled and ready to process.
// ----------------------------------------------------------------------------------------------------------------
const uint8_t *const hdr = pktv[0].b->unsafeData + pktv[0].s;
static_assert((ZT_PROTO_PACKET_SOURCE_INDEX + ZT_ADDRESS_LENGTH) < ZT_PROTO_MIN_PACKET_LENGTH, "overflow");
const Address source(hdr + ZT_PROTO_PACKET_SOURCE_INDEX);
static_assert(ZT_PROTO_PACKET_FLAGS_INDEX < ZT_PROTO_MIN_PACKET_LENGTH, "overflow");
const uint8_t hops = hdr[ZT_PROTO_PACKET_FLAGS_INDEX] & ZT_PROTO_FLAG_FIELD_HOPS_MASK;
const uint8_t cipher = (hdr[ZT_PROTO_PACKET_FLAGS_INDEX] >> 3U) & 3U;
SharedPtr<Buf> pkt(new Buf());
int pktSize = 0;
static_assert(ZT_PROTO_PACKET_VERB_INDEX < ZT_PROTO_MIN_PACKET_LENGTH, "overflow");
if (unlikely(((cipher == ZT_PROTO_CIPHER_POLY1305_NONE) || (cipher == ZT_PROTO_CIPHER_NONE)) && ((hdr[ZT_PROTO_PACKET_VERB_INDEX] & ZT_PROTO_VERB_MASK) == Protocol::VERB_HELLO))) {
// Handle unencrypted HELLO packets.
pktSize = pktv.mergeCopy(*pkt);
if (unlikely(pktSize < ZT_PROTO_MIN_PACKET_LENGTH)) {
ZT_SPEW("discarding packet %.16llx from %s(%s): assembled packet size: %d", packetId, source.toString().c_str(), fromAddr.toString().c_str(), pktSize);
return;
}
const SharedPtr<Peer> peer(m_HELLO(cc, path, *pkt, pktSize));
if (likely(peer))
peer->received(m_ctx, cc, path, hops, packetId, pktSize - ZT_PROTO_PACKET_PAYLOAD_START, Protocol::VERB_HELLO, Protocol::VERB_NOP);
return;
}
// This remains zero if authentication fails. Otherwise it gets set to a bit mask
// indicating authentication and other security flags like encryption and forward
// secrecy status.
unsigned int auth = 0;
SharedPtr<Peer> peer(m_ctx.topology->peer(cc, source));
if (likely(peer)) {
switch (cipher) {
case ZT_PROTO_CIPHER_POLY1305_NONE: {
uint8_t perPacketKey[ZT_SALSA20_KEY_SIZE];
Protocol::salsa2012DeriveKey(peer->rawIdentityKey(), perPacketKey, *pktv[0].b, pktv.totalSize());
p_PolyCopyFunction s20cf(perPacketKey, &packetId);
pktSize = pktv.mergeMap<p_PolyCopyFunction &>(*pkt, ZT_PROTO_PACKET_ENCRYPTED_SECTION_START, s20cf);
if (unlikely(pktSize < ZT_PROTO_MIN_PACKET_LENGTH)) {
ZT_SPEW("discarding packet %.16llx from %s(%s): assembled packet size: %d", packetId, source.toString().c_str(), fromAddr.toString().c_str(), pktSize);
return;
}
uint64_t mac[2];
s20cf.poly1305.finish(mac);
static_assert((ZT_PROTO_PACKET_MAC_INDEX + 8) < ZT_PROTO_MIN_PACKET_LENGTH, "overflow");
if (unlikely(Utils::loadMachineEndian<uint64_t>(hdr + ZT_PROTO_PACKET_MAC_INDEX) != mac[0])) {
ZT_SPEW("discarding packet %.16llx from %s(%s): packet MAC failed (none/poly1305)", packetId, source.toString().c_str(), fromAddr.toString().c_str());
m_ctx.t->incomingPacketDropped(cc, 0xcc89c812, packetId, 0, peer->identity(), path->address(), hops, Protocol::VERB_NOP, ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return;
}
auth = ZT_VL1_AUTH_RESULT_FLAG_AUTHENTICATED;
} break;
case ZT_PROTO_CIPHER_POLY1305_SALSA2012: {
uint8_t perPacketKey[ZT_SALSA20_KEY_SIZE];
Protocol::salsa2012DeriveKey(peer->rawIdentityKey(), perPacketKey, *pktv[0].b, pktv.totalSize());
p_SalsaPolyCopyFunction s20cf(perPacketKey, &packetId);
pktSize = pktv.mergeMap<p_SalsaPolyCopyFunction &>(*pkt, ZT_PROTO_PACKET_ENCRYPTED_SECTION_START, s20cf);
if (unlikely(pktSize < ZT_PROTO_MIN_PACKET_LENGTH)) {
ZT_SPEW("discarding packet %.16llx from %s(%s): assembled packet size: %d", packetId, source.toString().c_str(), fromAddr.toString().c_str(), pktSize);
return;
}
uint64_t mac[2];
s20cf.poly1305.finish(mac);
static_assert((ZT_PROTO_PACKET_MAC_INDEX + 8) < ZT_PROTO_MIN_PACKET_LENGTH, "overflow");
if (unlikely(Utils::loadMachineEndian<uint64_t>(hdr + ZT_PROTO_PACKET_MAC_INDEX) != mac[0])) {
ZT_SPEW("discarding packet %.16llx from %s(%s): packet MAC failed (salsa/poly1305)", packetId, source.toString().c_str(), fromAddr.toString().c_str());
m_ctx.t->incomingPacketDropped(cc, 0xcc89c812, packetId, 0, peer->identity(), path->address(), hops, Protocol::VERB_NOP, ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return;
}
auth = ZT_VL1_AUTH_RESULT_FLAG_AUTHENTICATED | ZT_VL1_AUTH_RESULT_FLAG_ENCRYPTED;
} break;
case ZT_PROTO_CIPHER_NONE: {
// TODO
} break;
case ZT_PROTO_CIPHER_AES_GMAC_SIV: {
// TODO
} break;
default: m_ctx.t->incomingPacketDropped(cc, 0x5b001099, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, Protocol::VERB_NOP, ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT); return;
}
}
if (likely(auth != 0)) {
// If authentication was successful go on and process the packet.
if (unlikely(pktSize < ZT_PROTO_MIN_PACKET_LENGTH)) {
ZT_SPEW(
"discarding packet %.16llx from %s(%s): assembled packet size %d is smaller than minimum packet "
"length",
packetId, source.toString().c_str(), fromAddr.toString().c_str(), pktSize);
return;
}
// TODO: should take instance ID into account here once that is fully implemented.
if (unlikely(peer->deduplicateIncomingPacket(packetId))) {
ZT_SPEW("discarding packet %.16llx from %s(%s): duplicate!", packetId, source.toString().c_str(), fromAddr.toString().c_str());
return;
}
static_assert(ZT_PROTO_PACKET_VERB_INDEX < ZT_PROTO_MIN_PACKET_LENGTH, "overflow");
const uint8_t verbFlags = pkt->unsafeData[ZT_PROTO_PACKET_VERB_INDEX];
const Protocol::Verb verb = (Protocol::Verb)(verbFlags & ZT_PROTO_VERB_MASK);
// Decompress packet payload if compressed. For additional safety decompression is
// only performed on packets whose MACs have already been validated. (Only HELLO is
// sent without this, and HELLO doesn't benefit from compression.)
if (((verbFlags & ZT_PROTO_VERB_FLAG_COMPRESSED) != 0) && (pktSize > ZT_PROTO_PACKET_PAYLOAD_START)) {
SharedPtr<Buf> dec(new Buf());
Utils::copy<ZT_PROTO_PACKET_PAYLOAD_START>(dec->unsafeData, pkt->unsafeData);
const int uncompressedLen = LZ4_decompress_safe(reinterpret_cast<const char *>(pkt->unsafeData + ZT_PROTO_PACKET_PAYLOAD_START), reinterpret_cast<char *>(dec->unsafeData + ZT_PROTO_PACKET_PAYLOAD_START), pktSize - ZT_PROTO_PACKET_PAYLOAD_START, ZT_BUF_MEM_SIZE - ZT_PROTO_PACKET_PAYLOAD_START);
if (likely((uncompressedLen >= 0) && (uncompressedLen <= (ZT_BUF_MEM_SIZE - ZT_PROTO_PACKET_PAYLOAD_START)))) {
pkt.swap(dec);
ZT_SPEW("decompressed packet: %d -> %d", pktSize, ZT_PROTO_PACKET_PAYLOAD_START + uncompressedLen);
pktSize = ZT_PROTO_PACKET_PAYLOAD_START + uncompressedLen;
}
else {
m_ctx.t->incomingPacketDropped(cc, 0xee9e4392, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, verb, ZT_TRACE_PACKET_DROP_REASON_INVALID_COMPRESSED_DATA);
return;
}
}
ZT_SPEW("%s from %s(%s) (%d bytes)", Protocol::verbName(verb), source.toString().c_str(), fromAddr.toString().c_str(), pktSize);
// NOTE: HELLO is normally sent in the clear (in terms of our usual AEAD modes) and is handled
// above. We will try to process it here, but if so it'll still get re-authenticated via HELLO's
// own internal authentication logic as usual. It would be abnormal to make it here with HELLO
// but not invalid.
Protocol::Verb inReVerb = Protocol::VERB_NOP;
bool ok = true;
switch (verb) {
case Protocol::VERB_NOP: break;
case Protocol::VERB_HELLO: ok = (bool)(m_HELLO(cc, path, *pkt, pktSize)); break;
case Protocol::VERB_ERROR: ok = m_ERROR(cc, packetId, auth, path, peer, *pkt, pktSize, inReVerb); break;
case Protocol::VERB_OK: ok = m_OK(cc, packetId, auth, path, peer, *pkt, pktSize, inReVerb); break;
case Protocol::VERB_WHOIS: ok = m_WHOIS(cc, packetId, auth, path, peer, *pkt, pktSize); break;
case Protocol::VERB_RENDEZVOUS: ok = m_RENDEZVOUS(cc, packetId, auth, path, peer, *pkt, pktSize); break;
case Protocol::VERB_FRAME: ok = m_ctx.vl2->m_FRAME(cc, packetId, auth, path, peer, *pkt, pktSize); break;
case Protocol::VERB_EXT_FRAME: ok = m_ctx.vl2->m_EXT_FRAME(cc, packetId, auth, path, peer, *pkt, pktSize); break;
case Protocol::VERB_ECHO: ok = m_ECHO(cc, packetId, auth, path, peer, *pkt, pktSize); break;
case Protocol::VERB_MULTICAST_LIKE: ok = m_ctx.vl2->m_MULTICAST_LIKE(cc, packetId, auth, path, peer, *pkt, pktSize); break;
case Protocol::VERB_NETWORK_CREDENTIALS: ok = m_ctx.vl2->m_NETWORK_CREDENTIALS(cc, packetId, auth, path, peer, *pkt, pktSize); break;
case Protocol::VERB_NETWORK_CONFIG_REQUEST: ok = m_ctx.vl2->m_NETWORK_CONFIG_REQUEST(cc, packetId, auth, path, peer, *pkt, pktSize); break;
case Protocol::VERB_NETWORK_CONFIG: ok = m_ctx.vl2->m_NETWORK_CONFIG(cc, packetId, auth, path, peer, *pkt, pktSize); break;
case Protocol::VERB_MULTICAST_GATHER: ok = m_ctx.vl2->m_MULTICAST_GATHER(cc, packetId, auth, path, peer, *pkt, pktSize); break;
case Protocol::VERB_MULTICAST_FRAME_deprecated: ok = m_ctx.vl2->m_MULTICAST_FRAME_deprecated(cc, packetId, auth, path, peer, *pkt, pktSize); break;
case Protocol::VERB_PUSH_DIRECT_PATHS: ok = m_PUSH_DIRECT_PATHS(cc, packetId, auth, path, peer, *pkt, pktSize); break;
case Protocol::VERB_USER_MESSAGE: ok = m_USER_MESSAGE(cc, packetId, auth, path, peer, *pkt, pktSize); break;
case Protocol::VERB_MULTICAST: ok = m_ctx.vl2->m_MULTICAST(cc, packetId, auth, path, peer, *pkt, pktSize); break;
case Protocol::VERB_ENCAP: ok = m_ENCAP(cc, packetId, auth, path, peer, *pkt, pktSize); break;
default: m_ctx.t->incomingPacketDropped(cc, 0xeeeeeff0, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, verb, ZT_TRACE_PACKET_DROP_REASON_UNRECOGNIZED_VERB); break;
}
if (likely(ok))
peer->received(m_ctx, cc, path, hops, packetId, pktSize - ZT_PROTO_PACKET_PAYLOAD_START, verb, inReVerb);
}
else {
// If decryption and authentication were not successful, try to look up identities.
// This is rate limited by virtue of the retry rate limit timer.
if (pktSize <= 0)
pktSize = pktv.mergeCopy(*pkt);
if (likely(pktSize >= ZT_PROTO_MIN_PACKET_LENGTH)) {
ZT_SPEW("authentication failed or no peers match, queueing WHOIS for %s", source.toString().c_str());
bool sendPending;
{
Mutex::Lock wl(m_whoisQueue_l);
p_WhoisQueueItem &wq = m_whoisQueue[source];
const unsigned int wpidx = wq.waitingPacketCount++ % ZT_VL1_MAX_WHOIS_WAITING_PACKETS;
wq.waitingPacketSize[wpidx] = (unsigned int)pktSize;
wq.waitingPacket[wpidx] = pkt;
sendPending = (cc.ticks - wq.lastRetry) >= ZT_WHOIS_RETRY_DELAY;
}
if (sendPending)
m_sendPendingWhois(cc);
}
}
}
catch (...) {
m_ctx.t->unexpectedError(cc, 0xea1b6dea, "unexpected exception in onRemotePacket() parsing packet from %s", path->address().toString().c_str());
}
}
void VL1::m_relay(CallContext &cc, const SharedPtr<Path> &path, Address destination, SharedPtr<Buf> &pkt, int pktSize) {}
void VL1::m_sendPendingWhois(CallContext &cc)
{
const SharedPtr<Peer> root(m_ctx.topology->root());
if (unlikely(!root))
return;
const SharedPtr<Path> rootPath(root->path(cc));
if (unlikely(!rootPath))
return;
Vector<Address> toSend;
{
Mutex::Lock wl(m_whoisQueue_l);
for (Map<Address, p_WhoisQueueItem>::iterator wi(m_whoisQueue.begin()); wi != m_whoisQueue.end(); ++wi) {
if ((cc.ticks - wi->second.lastRetry) >= ZT_WHOIS_RETRY_DELAY) {
wi->second.lastRetry = cc.ticks;
++wi->second.retries;
toSend.push_back(wi->first);
}
}
}
if (!toSend.empty()) {
SymmetricKey &key = root->key();
uint8_t outp[ZT_DEFAULT_UDP_MTU - ZT_PROTO_MIN_PACKET_LENGTH];
Vector<Address>::iterator a(toSend.begin());
while (a != toSend.end()) {
const uint64_t packetId = key.nextMessage(m_ctx.identity.address(), root->address());
int p = Protocol::newPacket(outp, packetId, root->address(), m_ctx.identity.address(), Protocol::VERB_WHOIS);
while ((a != toSend.end()) && (p < (sizeof(outp) - ZT_ADDRESS_LENGTH))) {
a->copyTo(outp + p);
++a;
p += ZT_ADDRESS_LENGTH;
}
m_ctx.expect->sending(Protocol::armor(outp, p, key, root->cipher()), cc.ticks);
root->send(m_ctx, cc, outp, p, rootPath);
}
}
}
SharedPtr<Peer> VL1::m_HELLO(CallContext &cc, const SharedPtr<Path> &path, Buf &pkt, int packetSize)
{
const uint64_t packetId = Utils::loadMachineEndian<uint64_t>(pkt.unsafeData + ZT_PROTO_PACKET_ID_INDEX);
const uint64_t mac = Utils::loadMachineEndian<uint64_t>(pkt.unsafeData + ZT_PROTO_PACKET_MAC_INDEX);
const uint8_t hops = pkt.unsafeData[ZT_PROTO_PACKET_FLAGS_INDEX] & ZT_PROTO_FLAG_FIELD_HOPS_MASK;
const uint8_t protoVersion = pkt.lI8<ZT_PROTO_PACKET_PAYLOAD_START>();
if (unlikely(protoVersion < ZT_PROTO_VERSION_MIN)) {
m_ctx.t->incomingPacketDropped(cc, 0x907a9891, packetId, 0, Identity::NIL, path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_PEER_TOO_OLD);
return SharedPtr<Peer>();
}
const unsigned int versionMajor = pkt.lI8<ZT_PROTO_PACKET_PAYLOAD_START + 1>();
const unsigned int versionMinor = pkt.lI8<ZT_PROTO_PACKET_PAYLOAD_START + 2>();
const unsigned int versionRev = pkt.lI16<ZT_PROTO_PACKET_PAYLOAD_START + 3>();
const uint64_t timestamp = pkt.lI64<ZT_PROTO_PACKET_PAYLOAD_START + 5>();
int ii = ZT_PROTO_PACKET_PAYLOAD_START + 13;
// Get identity and verify that it matches the sending address in the packet.
Identity id;
if (unlikely(pkt.rO(ii, id) < 0)) {
m_ctx.t->incomingPacketDropped(cc, 0x707a9810, packetId, 0, Identity::NIL, path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
return SharedPtr<Peer>();
}
if (unlikely(id.address() != Address(pkt.unsafeData + ZT_PROTO_PACKET_SOURCE_INDEX))) {
m_ctx.t->incomingPacketDropped(cc, 0x707a9010, packetId, 0, Identity::NIL, path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return SharedPtr<Peer>();
}
// Get the peer that matches this identity, or learn a new one if we don't know it.
SharedPtr<Peer> peer(m_ctx.topology->peer(cc, id.address(), true));
if (peer) {
if (unlikely(peer->identity() != id)) {
m_ctx.t->incomingPacketDropped(cc, 0x707a9891, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return SharedPtr<Peer>();
}
if (unlikely(peer->deduplicateIncomingPacket(packetId))) {
ZT_SPEW("discarding packet %.16llx from %s(%s): duplicate!", packetId, id.address().toString().c_str(), path->address().toString().c_str());
return SharedPtr<Peer>();
}
}
else {
if (unlikely(!id.locallyValidate())) {
m_ctx.t->incomingPacketDropped(cc, 0x707a9892, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
return SharedPtr<Peer>();
}
peer.set(new Peer());
if (unlikely(!peer->init(m_ctx, cc, id))) {
m_ctx.t->incomingPacketDropped(cc, 0x707a9893, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_UNSPECIFIED);
return SharedPtr<Peer>();
}
peer = m_ctx.topology->add(cc, peer);
}
// ------------------------------------------------------------------------------------------------------------------
// If we made it this far, peer is non-NULL and the identity is valid and matches it.
// ------------------------------------------------------------------------------------------------------------------
if (protoVersion >= 11) {
// V2.x and newer use HMAC-SHA384 for HELLO, which offers a larger security margin
// to guard key exchange and connection setup than typical AEAD. The packet MAC
// field is ignored, and eventually it'll be undefined.
uint8_t hmac[ZT_HMACSHA384_LEN];
if (unlikely(packetSize < ZT_HMACSHA384_LEN)) {
m_ctx.t->incomingPacketDropped(cc, 0xab9c9891, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return SharedPtr<Peer>();
}
packetSize -= ZT_HMACSHA384_LEN;
pkt.unsafeData[ZT_PROTO_PACKET_FLAGS_INDEX] &= ~ZT_PROTO_FLAG_FIELD_HOPS_MASK; // mask hops to 0
Utils::storeMachineEndian<uint64_t>(pkt.unsafeData + ZT_PROTO_PACKET_MAC_INDEX, 0); // set MAC field to 0
HMACSHA384(peer->identityHelloHmacKey(), pkt.unsafeData, packetSize, hmac);
if (unlikely(!Utils::secureEq(hmac, pkt.unsafeData + packetSize, ZT_HMACSHA384_LEN))) {
m_ctx.t->incomingPacketDropped(cc, 0x707a9891, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return SharedPtr<Peer>();
}
}
else {
// Older versions use Poly1305 MAC (but no whole packet encryption) for HELLO.
if (likely(packetSize > ZT_PROTO_PACKET_ENCRYPTED_SECTION_START)) {
uint8_t perPacketKey[ZT_SALSA20_KEY_SIZE];
Protocol::salsa2012DeriveKey(peer->rawIdentityKey(), perPacketKey, pkt, packetSize);
uint8_t macKey[ZT_POLY1305_KEY_SIZE];
Salsa20(perPacketKey, &packetId).crypt12(Utils::ZERO256, macKey, ZT_POLY1305_KEY_SIZE);
Poly1305 poly1305(macKey);
poly1305.update(pkt.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START, packetSize - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START);
uint64_t polyMac[2];
poly1305.finish(polyMac);
if (unlikely(mac != polyMac[0])) {
m_ctx.t->incomingPacketDropped(cc, 0x11bfff82, packetId, 0, id, path->address(), hops, Protocol::VERB_NOP, ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return SharedPtr<Peer>();
}
}
else {
m_ctx.t->incomingPacketDropped(cc, 0x11bfff81, packetId, 0, id, path->address(), hops, Protocol::VERB_NOP, ZT_TRACE_PACKET_DROP_REASON_MAC_FAILED);
return SharedPtr<Peer>();
}
}
// ------------------------------------------------------------------------------------------------------------------
// This far means we passed MAC (Poly1305 or HMAC-SHA384 for newer peers)
// ------------------------------------------------------------------------------------------------------------------
InetAddress sentTo;
if (unlikely(pkt.rO(ii, sentTo) < 0)) {
m_ctx.t->incomingPacketDropped(cc, 0x707a9811, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
return SharedPtr<Peer>();
}
SymmetricKey &key = peer->key();
if (protoVersion >= 11) {
// V2.x and newer supports an encrypted section and has a new OK format.
ii += 4; // skip reserved field
if (likely((ii + 12) < packetSize)) {
AES::CTR ctr(peer->identityHelloDictionaryEncryptionCipher());
const uint8_t *const ctrNonce = pkt.unsafeData + ii;
ii += 12;
ctr.init(ctrNonce, 0, pkt.unsafeData + ii);
ctr.crypt(pkt.unsafeData + ii, packetSize - ii);
ctr.finish();
ii += 2; // skip reserved field
const unsigned int dictSize = pkt.rI16(ii);
if (unlikely((ii + dictSize) > packetSize)) {
m_ctx.t->incomingPacketDropped(cc, 0x707a9815, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
return peer;
}
Dictionary md;
if (!md.decode(pkt.unsafeData + ii, dictSize)) {
m_ctx.t->incomingPacketDropped(cc, 0x707a9816, packetId, 0, identityFromPeerPtr(peer), path->address(), hops, Protocol::VERB_HELLO, ZT_TRACE_PACKET_DROP_REASON_INVALID_OBJECT);
return peer;
}
if (!md.empty()) {
// TODO
}
}
}
Protocol::newPacket(pkt, key.nextMessage(m_ctx.identity.address(), peer->address()), peer->address(), m_ctx.identity.address(), Protocol::VERB_OK);
ii = ZT_PROTO_PACKET_PAYLOAD_START;
pkt.wI8(ii, Protocol::VERB_HELLO);
pkt.wI64(ii, packetId);
pkt.wI64(ii, timestamp);
pkt.wI8(ii, ZT_PROTO_VERSION);
pkt.wI8(ii, ZEROTIER_VERSION_MAJOR);
pkt.wI8(ii, ZEROTIER_VERSION_MINOR);
pkt.wI16(ii, ZEROTIER_VERSION_REVISION);
pkt.wO(ii, path->address());
pkt.wI16(ii, 0); // reserved, specifies no "moons" for older versions
if (protoVersion >= 11) {
FCV<uint8_t, 1024> okmd;
pkt.wI16(ii, (uint16_t)okmd.size());
pkt.wB(ii, okmd.data(), okmd.size());
if (unlikely((ii + ZT_HMACSHA384_LEN) > ZT_BUF_MEM_SIZE)) // sanity check, should be impossible
return SharedPtr<Peer>();
HMACSHA384(peer->identityHelloHmacKey(), pkt.unsafeData, ii, pkt.unsafeData + ii);
ii += ZT_HMACSHA384_LEN;
}
peer->setRemoteVersion(protoVersion, versionMajor, versionMinor, versionRev);
peer->send(m_ctx, cc, pkt.unsafeData, ii, path);
return peer;
}
bool VL1::m_ERROR(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb)
{
#if 0
if (packetSize < (int)sizeof(Protocol::ERROR::Header)) {
RR->t->incomingPacketDropped(tPtr,0x3beb1947,0,0,identityFromPeerPtr(peer),path->address(),0,Protocol::VERB_ERROR,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
Protocol::ERROR::Header &eh = pkt.as<Protocol::ERROR::Header>();
inReVerb = (Protocol::Verb)eh.inReVerb;
const int64_t now = RR->node->now();
if (!RR->expect->expecting(eh.inRePacketId,now)) {
RR->t->incomingPacketDropped(tPtr,0x4c1f1ff7,0,0,identityFromPeerPtr(peer),path->address(),0,Protocol::VERB_OK,ZT_TRACE_PACKET_DROP_REASON_REPLY_NOT_EXPECTED);
return false;
}
switch(eh.error) {
//case Protocol::ERROR_INVALID_REQUEST:
//case Protocol::ERROR_BAD_PROTOCOL_VERSION:
//case Protocol::ERROR_CANNOT_DELIVER:
default:
break;
case Protocol::ERROR_OBJ_NOT_FOUND:
if (eh.inReVerb == Protocol::VERB_NETWORK_CONFIG_REQUEST) {
}
break;
case Protocol::ERROR_UNSUPPORTED_OPERATION:
if (eh.inReVerb == Protocol::VERB_NETWORK_CONFIG_REQUEST) {
}
break;
case Protocol::ERROR_NEED_MEMBERSHIP_CERTIFICATE:
break;
case Protocol::ERROR_NETWORK_ACCESS_DENIED_:
if (eh.inReVerb == Protocol::VERB_NETWORK_CONFIG_REQUEST) {
}
break;
}
return true;
#endif
}
bool VL1::m_OK(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb)
{
int ii = ZT_PROTO_PACKET_PAYLOAD_START + 13;
inReVerb = (Protocol::Verb)pkt.rI8(ii);
const uint64_t inRePacketId = pkt.rI64(ii);
if (unlikely(Buf::readOverflow(ii, packetSize))) {
m_ctx.t->incomingPacketDropped(cc, 0x4c1f1ff7, packetId, 0, identityFromPeerPtr(peer), path->address(), 0, Protocol::VERB_OK, ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
if (unlikely(!m_ctx.expect->expecting(inRePacketId, cc.ticks))) {
m_ctx.t->incomingPacketDropped(cc, 0x4c1f1ff8, packetId, 0, identityFromPeerPtr(peer), path->address(), 0, Protocol::VERB_OK, ZT_TRACE_PACKET_DROP_REASON_REPLY_NOT_EXPECTED);
return false;
}
ZT_SPEW("got OK in-re %s (packet ID %.16llx) from %s(%s)", Protocol::verbName(inReVerb), inRePacketId, peer->address().toString().c_str(), path->address().toString().c_str());
switch (inReVerb) {
case Protocol::VERB_HELLO: break;
case Protocol::VERB_WHOIS: break;
case Protocol::VERB_NETWORK_CONFIG_REQUEST: break;
case Protocol::VERB_MULTICAST_GATHER: break;
}
return true;
}
bool VL1::m_WHOIS(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
#if 0
if (packetSize < (int)sizeof(Protocol::OK::Header)) {
RR->t->incomingPacketDropped(tPtr,0x4c1f1ff7,0,0,identityFromPeerPtr(peer),path->address(),0,Protocol::VERB_OK,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
Protocol::Header &ph = pkt.as<Protocol::Header>();
if (!peer->rateGateInboundWhoisRequest(RR->node->now())) {
RR->t->incomingPacketDropped(tPtr,0x19f7194a,ph.packetId,0,peer->identity(),path->address(),Protocol::packetHops(ph),Protocol::VERB_WHOIS,ZT_TRACE_PACKET_DROP_REASON_RATE_LIMIT_EXCEEDED);
return true;
}
Buf outp;
Protocol::OK::WHOIS &outh = outp.as<Protocol::OK::WHOIS>();
int ptr = sizeof(Protocol::Header);
while ((ptr + ZT_ADDRESS_LENGTH) <= packetSize) {
outh.h.h.packetId = Protocol::getPacketId();
peer->address().copyTo(outh.h.h.destination);
RR->identity.address().copyTo(outh.h.h.source);
outh.h.h.flags = 0;
outh.h.h.verb = Protocol::VERB_OK;
outh.h.inReVerb = Protocol::VERB_WHOIS;
outh.h.inRePacketId = ph.packetId;
int outl = sizeof(Protocol::OK::WHOIS);
while ( ((ptr + ZT_ADDRESS_LENGTH) <= packetSize) && ((outl + ZT_IDENTITY_MARSHAL_SIZE_MAX + ZT_LOCATOR_MARSHAL_SIZE_MAX) < ZT_PROTO_MAX_PACKET_LENGTH) ) {
const SharedPtr<Peer> &wp(RR->topology->peer(tPtr,Address(pkt.unsafeData + ptr)));
if (wp) {
outp.wO(outl,wp->identity());
if (peer->remoteVersionProtocol() >= 11) { // older versions don't know what a locator is
const Locator loc(wp->locator());
outp.wO(outl,loc);
}
if (Buf::writeOverflow(outl)) { // sanity check, shouldn't be possible
RR->t->unexpectedError(tPtr,0xabc0f183,"Buf write overflow building OK(WHOIS) to reply to %s",Trace::str(peer->address(),path).s);
return false;
}
}
ptr += ZT_ADDRESS_LENGTH;
}
if (outl > (int)sizeof(Protocol::OK::WHOIS)) {
Protocol::armor(outp,outl,peer->key(),peer->cipher());
path->send(RR,tPtr,outp.unsafeData,outl,RR->node->now());
}
}
return true;
#endif
}
bool VL1::m_RENDEZVOUS(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
#if 0
if (RR->topology->isRoot(peer->identity())) {
if (packetSize < (int)sizeof(Protocol::RENDEZVOUS)) {
RR->t->incomingPacketDropped(tPtr,0x43e90ab3,Protocol::packetId(pkt,packetSize),0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_RENDEZVOUS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
Protocol::RENDEZVOUS &rdv = pkt.as<Protocol::RENDEZVOUS>();
const SharedPtr<Peer> with(RR->topology->peer(tPtr,Address(rdv.peerAddress)));
if (with) {
const int64_t now = RR->node->now();
const unsigned int port = Utils::ntoh(rdv.port);
if (port != 0) {
switch(rdv.addressLength) {
case 4:
case 16:
if ((int)(sizeof(Protocol::RENDEZVOUS) + rdv.addressLength) <= packetSize) {
const InetAddress atAddr(pkt.unsafeData + sizeof(Protocol::RENDEZVOUS),rdv.addressLength,port);
peer->tryToContactAt(tPtr,Endpoint(atAddr),now,false);
RR->t->tryingNewPath(tPtr,0x55a19aaa,with->identity(),atAddr,path->address(),Protocol::packetId(pkt,packetSize),Protocol::VERB_RENDEZVOUS,peer->identity(),ZT_TRACE_TRYING_NEW_PATH_REASON_RENDEZVOUS);
}
break;
case 255: {
Endpoint ep;
int p = sizeof(Protocol::RENDEZVOUS);
int epl = pkt.rO(p,ep);
if ((epl > 0) && (ep) && (!Buf::readOverflow(p,packetSize))) {
switch (ep.type()) {
case Endpoint::TYPE_INETADDR_V4:
case Endpoint::TYPE_INETADDR_V6:
peer->tryToContactAt(tPtr,ep,now,false);
RR->t->tryingNewPath(tPtr,0x55a19aab,with->identity(),ep.inetAddr(),path->address(),Protocol::packetId(pkt,packetSize),Protocol::VERB_RENDEZVOUS,peer->identity(),ZT_TRACE_TRYING_NEW_PATH_REASON_RENDEZVOUS);
break;
default:
break;
}
}
} break;
}
}
}
}
return true;
#endif
}
bool VL1::m_ECHO(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
#if 0
const uint64_t packetId = Protocol::packetId(pkt,packetSize);
const uint64_t now = RR->node->now();
if (packetSize < (int)sizeof(Protocol::Header)) {
RR->t->incomingPacketDropped(tPtr,0x14d70bb0,packetId,0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_ECHO,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
if (peer->rateGateEchoRequest(now)) {
Buf outp;
Protocol::OK::ECHO &outh = outp.as<Protocol::OK::ECHO>();
outh.h.h.packetId = Protocol::getPacketId();
peer->address().copyTo(outh.h.h.destination);
RR->identity.address().copyTo(outh.h.h.source);
outh.h.h.flags = 0;
outh.h.h.verb = Protocol::VERB_OK;
outh.h.inReVerb = Protocol::VERB_ECHO;
outh.h.inRePacketId = packetId;
int outl = sizeof(Protocol::OK::ECHO);
outp.wB(outl,pkt.unsafeData + sizeof(Protocol::Header),packetSize - sizeof(Protocol::Header));
if (Buf::writeOverflow(outl)) {
RR->t->incomingPacketDropped(tPtr,0x14d70bb0,packetId,0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_ECHO,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
Protocol::armor(outp,outl,peer->key(),peer->cipher());
path->send(RR,tPtr,outp.unsafeData,outl,now);
} else {
RR->t->incomingPacketDropped(tPtr,0x27878bc1,packetId,0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_ECHO,ZT_TRACE_PACKET_DROP_REASON_RATE_LIMIT_EXCEEDED);
}
return true;
#endif
}
bool VL1::m_PUSH_DIRECT_PATHS(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
#if 0
if (packetSize < (int)sizeof(Protocol::PUSH_DIRECT_PATHS)) {
RR->t->incomingPacketDropped(tPtr,0x1bb1bbb1,Protocol::packetId(pkt,packetSize),0,peer->identity(),path->address(),Protocol::packetHops(pkt,packetSize),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
Protocol::PUSH_DIRECT_PATHS &pdp = pkt.as<Protocol::PUSH_DIRECT_PATHS>();
int ptr = sizeof(Protocol::PUSH_DIRECT_PATHS);
const unsigned int numPaths = Utils::ntoh(pdp.numPaths);
InetAddress a;
Endpoint ep;
for(unsigned int pi=0;pi<numPaths;++pi) {
/*const uint8_t flags = pkt.rI8(ptr);*/ ++ptr; // flags are not presently used
const int xas = (int)pkt.rI16(ptr);
//const uint8_t *const extendedAttrs = pkt.rBnc(ptr,xas);
ptr += xas;
const unsigned int addrType = pkt.rI8(ptr);
const unsigned int addrRecordLen = pkt.rI8(ptr);
if (addrRecordLen == 0) {
RR->t->incomingPacketDropped(tPtr,0xaed00118,pdp.h.packetId,0,peer->identity(),path->address(),Protocol::packetHops(pdp.h),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
if (Buf::readOverflow(ptr,packetSize)) {
RR->t->incomingPacketDropped(tPtr,0xb450e10f,pdp.h.packetId,0,peer->identity(),path->address(),Protocol::packetHops(pdp.h),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
const void *addrBytes = nullptr;
unsigned int addrLen = 0;
unsigned int addrPort = 0;
switch(addrType) {
case 0:
addrBytes = pkt.rBnc(ptr,addrRecordLen);
addrLen = addrRecordLen;
break;
case 4:
addrBytes = pkt.rBnc(ptr,4);
addrLen = 4;
addrPort = pkt.rI16(ptr);
break;
case 6:
addrBytes = pkt.rBnc(ptr,16);
addrLen = 16;
addrPort = pkt.rI16(ptr);
break;
//case 200:
// TODO: this would be a WebRTC SDP offer contained in the extended attrs field
//break;
default: break;
}
if (Buf::readOverflow(ptr,packetSize)) {
RR->t->incomingPacketDropped(tPtr,0xb4d0f10f,pdp.h.packetId,0,peer->identity(),path->address(),Protocol::packetHops(pdp.h),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
if (addrPort) {
a.set(addrBytes,addrLen,addrPort);
} else if (addrLen) {
if (ep.unmarshal(reinterpret_cast<const uint8_t *>(addrBytes),(int)addrLen) <= 0) {
RR->t->incomingPacketDropped(tPtr,0x00e0f00d,pdp.h.packetId,0,peer->identity(),path->address(),Protocol::packetHops(pdp.h),Protocol::VERB_PUSH_DIRECT_PATHS,ZT_TRACE_PACKET_DROP_REASON_MALFORMED_PACKET);
return false;
}
switch(ep.type()) {
case Endpoint::TYPE_INETADDR_V4:
case Endpoint::TYPE_INETADDR_V6:
a = ep.inetAddr();
break;
default: // other types are not supported yet
break;
}
}
if (a) {
RR->t->tryingNewPath(tPtr,0xa5ab1a43,peer->identity(),a,path->address(),Protocol::packetId(pkt,packetSize),Protocol::VERB_RENDEZVOUS,peer->identity(),ZT_TRACE_TRYING_NEW_PATH_REASON_RECEIVED_PUSH_DIRECT_PATHS);
}
ptr += (int)addrRecordLen;
}
// TODO: add to a peer try-queue
return true;
#endif
}
bool VL1::m_USER_MESSAGE(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
// TODO
return true;
}
bool VL1::m_ENCAP(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize)
{
// TODO: not implemented yet
return true;
}
} // namespace ZeroTier

View file

@ -1,101 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_VL1_HPP
#define ZT_VL1_HPP
#include "Address.hpp"
#include "Buf.hpp"
#include "CallContext.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "Defragmenter.hpp"
#include "FCV.hpp"
#include "Mutex.hpp"
#include "Protocol.hpp"
#define ZT_VL1_MAX_WHOIS_WAITING_PACKETS 32
#define ZT_VL1_AUTH_RESULT_FLAG_AUTHENTICATED 0x01U
#define ZT_VL1_AUTH_RESULT_FLAG_ENCRYPTED 0x02U
#define ZT_VL1_AUTH_RESULT_FLAG_FORWARD_SECRET 0x04U
namespace ZeroTier {
class Context;
class Peer;
class VL2;
/**
* VL1 (virtual layer 1) packet I/O and messaging.
*
* This class is thread safe.
*/
class VL1 {
public:
explicit VL1(const Context &ctx);
/**
* Called when a packet is received from the real network
*
* The packet data supplied to this method may be modified. Internal
* packet handler code may also take possession of it via atomic swap
* and leave the 'data' pointer NULL. The 'data' pointer and its
* contents should not be used after this call. Make a copy if the
* data might still be needed.
*
* @param localSocket Local I/O socket as supplied by external code
* @param fromAddr Internet IP address of origin
* @param data Packet data
* @param len Packet length
*/
void onRemotePacket(CallContext &cc, int64_t localSocket, const InetAddress &fromAddr, SharedPtr<Buf> &data, unsigned int len) noexcept;
private:
void m_relay(CallContext &cc, const SharedPtr<Path> &path, Address destination, SharedPtr<Buf> &pkt, int pktSize);
void m_sendPendingWhois(CallContext &cc);
SharedPtr<Peer> m_HELLO(CallContext &cc, const SharedPtr<Path> &path, Buf &pkt, int packetSize);
bool m_ERROR(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb);
bool m_OK(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize, Protocol::Verb &inReVerb);
bool m_WHOIS(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_RENDEZVOUS(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_ECHO(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_PUSH_DIRECT_PATHS(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_USER_MESSAGE(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_ENCAP(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, const SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
const Context &m_ctx;
// Defragmentation engine for handling inbound packets with more than one fragment.
Defragmenter<ZT_MAX_PACKET_FRAGMENTS> m_inputPacketAssembler;
// Queue of outbound WHOIS reqeusts and packets waiting on them.
struct p_WhoisQueueItem {
ZT_INLINE p_WhoisQueueItem() : lastRetry(0), retries(0), waitingPacketCount(0) {}
int64_t lastRetry;
unsigned int retries;
unsigned int waitingPacketCount;
unsigned int waitingPacketSize[ZT_VL1_MAX_WHOIS_WAITING_PACKETS];
SharedPtr<Buf> waitingPacket[ZT_VL1_MAX_WHOIS_WAITING_PACKETS];
};
Map<Address, p_WhoisQueueItem> m_whoisQueue;
Mutex m_whoisQueue_l;
};
} // namespace ZeroTier
#endif

View file

@ -1,48 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#include "VL2.hpp"
#include "Context.hpp"
#include "MAC.hpp"
#include "Network.hpp"
#include "Path.hpp"
#include "Peer.hpp"
#include "Topology.hpp"
#include "VL1.hpp"
namespace ZeroTier {
VL2::VL2(const Context &ctx) : m_ctx(ctx) {}
void VL2::onLocalEthernet(CallContext &cc, const SharedPtr<Network> &network, const MAC &from, const MAC &to, const unsigned int etherType, unsigned int vlanId, SharedPtr<Buf> &data, unsigned int len) {}
bool VL2::m_FRAME(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize) {}
bool VL2::m_EXT_FRAME(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize) {}
bool VL2::m_MULTICAST_LIKE(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize) {}
bool VL2::m_NETWORK_CREDENTIALS(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize) {}
bool VL2::m_NETWORK_CONFIG_REQUEST(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize) {}
bool VL2::m_NETWORK_CONFIG(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize) {}
bool VL2::m_MULTICAST_GATHER(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize) {}
bool VL2::m_MULTICAST_FRAME_deprecated(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize) {}
bool VL2::m_MULTICAST(CallContext &cc, const uint64_t packetId, const unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize) {}
} // namespace ZeroTier

View file

@ -1,71 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_VL2_HPP
#define ZT_VL2_HPP
#include "Address.hpp"
#include "Buf.hpp"
#include "CallContext.hpp"
#include "Constants.hpp"
#include "Containers.hpp"
#include "FCV.hpp"
#include "Mutex.hpp"
#include "Protocol.hpp"
namespace ZeroTier {
class Path;
class Peer;
class Context;
class VL1;
class Network;
class MAC;
class VL2 {
friend class VL1;
public:
explicit VL2(const Context &ctx);
/**
* Called when a packet comes from a local Ethernet tap
*
* @param network Which network's TAP did this packet come from?
* @param from Originating MAC address
* @param to Destination MAC address
* @param etherType Ethernet packet type
* @param vlanId VLAN ID or 0 if none
* @param data Ethernet payload
* @param len Frame length
*/
void onLocalEthernet(CallContext &cc, const SharedPtr<Network> &network, const MAC &from, const MAC &to, unsigned int etherType, unsigned int vlanId, SharedPtr<Buf> &data, unsigned int len);
protected:
bool m_FRAME(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_EXT_FRAME(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_MULTICAST_LIKE(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_NETWORK_CREDENTIALS(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_NETWORK_CONFIG_REQUEST(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_NETWORK_CONFIG(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_MULTICAST_GATHER(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_MULTICAST_FRAME_deprecated(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
bool m_MULTICAST(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr<Path> &path, SharedPtr<Peer> &peer, Buf &pkt, int packetSize);
private:
const Context &m_ctx;
};
} // namespace ZeroTier
#endif

Some files were not shown because too many files have changed in this diff Show more