Break out a few things from Node in the core to make code cleaner.

This commit is contained in:
Adam Ierymenko 2021-04-02 15:28:25 -04:00
parent 7ad660c3ef
commit b39d4bed16
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
24 changed files with 521 additions and 541 deletions

View file

@ -31,12 +31,10 @@ namespace ZeroTier {
class Address : public TriviallyCopyable
{
public:
ZT_INLINE Address() noexcept:
_a(0)
ZT_INLINE Address() noexcept: _a(0)
{}
ZT_INLINE Address(const uint64_t a) noexcept:
_a(a)
ZT_INLINE Address(const uint64_t a) noexcept: _a(a)
{}
explicit ZT_INLINE Address(const uint8_t b[5]) noexcept:

View file

@ -1,78 +0,0 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_BLOB_HPP
#define ZT_BLOB_HPP
#include "Constants.hpp"
#include "Utils.hpp"
#include "TriviallyCopyable.hpp"
#include <algorithm>
// This header contains simple statically sized binary object types.
namespace ZeroTier {
/**
* Blob type for SHA384 hashes
*/
struct SHA384Hash
{
uint64_t data[6];
ZT_INLINE SHA384Hash() noexcept
{ Utils::zero< sizeof(data) >(data); }
explicit ZT_INLINE SHA384Hash(const void *const d) noexcept
{ Utils::copy< 48 >(data, d); }
ZT_INLINE const uint8_t *bytes() const noexcept
{ return reinterpret_cast<const uint8_t *>(data); }
ZT_INLINE unsigned long hashCode() const noexcept
{ return (unsigned long)data[0]; }
ZT_INLINE operator bool() const noexcept
{ return ((data[0] != 0) && (data[1] != 0) && (data[2] != 0) && (data[3] != 0) && (data[4] != 0) && (data[5] != 0)); }
ZT_INLINE bool operator==(const SHA384Hash &b) const noexcept
{ return ((data[0] == b.data[0]) && (data[1] == b.data[1]) && (data[2] == b.data[2]) && (data[3] == b.data[3]) && (data[4] == b.data[4]) && (data[5] == b.data[5])); }
ZT_INLINE bool operator!=(const SHA384Hash &b) const noexcept
{ return !(*this == b); }
ZT_INLINE bool operator<(const SHA384Hash &b) const noexcept
{ return (memcmp(data, b.data, 48) < 0); }
ZT_INLINE bool operator>(const SHA384Hash &b) const noexcept
{ return (memcmp(data, b.data, 48) > 0); }
ZT_INLINE bool operator<=(const SHA384Hash &b) const noexcept
{ return (memcmp(data, b.data, 48) <= 0); }
ZT_INLINE bool operator>=(const SHA384Hash &b) const noexcept
{ return (memcmp(data, b.data, 48) >= 0); }
};
static_assert(sizeof(SHA384Hash) == 48, "SHA384Hash contains unnecessary padding");
template< unsigned long S >
struct Blob
{
uint8_t data[S];
};
} // namespace ZeroTier
#endif

View file

@ -16,6 +16,9 @@
#include "Identity.hpp"
#include "Locator.hpp"
#include "Certificate.hpp"
#include "InetAddress.hpp"
#include "VL1.hpp"
#include "VL2.hpp"
extern "C" {
@ -108,14 +111,14 @@ enum ZT_ResultCode ZT_Node_processWirePacket(
{
try {
ZeroTier::SharedPtr< ZeroTier::Buf > buf((isZtBuffer) ? ZT_PTRTOBUF(packetData) : new ZeroTier::Buf(packetData, packetLength & ZT_BUF_MEM_MASK));
return reinterpret_cast<ZeroTier::Node *>(node)->processWirePacket(tptr, now, localSocket, ZT_InetAddress_ptr_cast_const_sockaddr_storage_ptr(remoteAddress), buf, packetLength, nextBackgroundTaskDeadline);
reinterpret_cast<ZeroTier::Node *>(node)->RR->vl1->onRemotePacket(tptr, localSocket, *ZeroTier::asInetAddress(remoteAddress), buf, packetLength);
} catch (std::bad_alloc &exc) {
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
} catch (...) {
// "OK" since invalid packets are simply dropped, but the system is still up.
// We should never make it here, but if we did that would be the interpretation.
return ZT_RESULT_OK;
}
return ZT_RESULT_OK;
}
enum ZT_ResultCode ZT_Node_processVirtualNetworkFrame(
@ -133,8 +136,14 @@ enum ZT_ResultCode ZT_Node_processVirtualNetworkFrame(
volatile int64_t *nextBackgroundTaskDeadline)
{
try {
ZeroTier::SharedPtr< ZeroTier::Buf > buf((isZtBuffer) ? ZT_PTRTOBUF(frameData) : new ZeroTier::Buf(frameData, frameLength & ZT_BUF_MEM_MASK));
return reinterpret_cast<ZeroTier::Node *>(node)->processVirtualNetworkFrame(tptr, now, nwid, sourceMac, destMac, etherType, vlanId, buf, frameLength, nextBackgroundTaskDeadline);
ZeroTier::SharedPtr< ZeroTier::Network > network(reinterpret_cast<ZeroTier::Node *>(node)->RR->networks->get(nwid));
if (likely(network)) {
ZeroTier::SharedPtr< ZeroTier::Buf > buf((isZtBuffer) ? ZT_PTRTOBUF(frameData) : new ZeroTier::Buf(frameData, frameLength & ZT_BUF_MEM_MASK));
reinterpret_cast<ZeroTier::Node *>(node)->RR->vl2->onLocalEthernet(tptr, network, ZeroTier::MAC(sourceMac), ZeroTier::MAC(destMac), etherType, vlanId, buf, frameLength);
return ZT_RESULT_OK;
} else {
return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
}
} catch (std::bad_alloc &exc) {
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
} catch (...) {
@ -198,14 +207,10 @@ enum ZT_ResultCode ZT_Node_multicastUnsubscribe(ZT_Node *node, uint64_t nwid, ui
}
uint64_t ZT_Node_address(ZT_Node *node)
{
return reinterpret_cast<ZeroTier::Node *>(node)->address();
}
{ return reinterpret_cast<ZeroTier::Node *>(node)->RR->identity.address().toInt(); }
const ZT_Identity *ZT_Node_identity(ZT_Node *node)
{
return (const ZT_Identity *)(&(reinterpret_cast<ZeroTier::Node *>(node)->identity()));
}
{ return (const ZT_Identity *)(&(reinterpret_cast<ZeroTier::Node *>(node)->identity())); }
void ZT_Node_status(ZT_Node *node, ZT_NodeStatus *status)
{
@ -920,8 +925,6 @@ int ZT_Dictionary_parse(const void *const dict, const unsigned int len, void *co
/********************************************************************************************************************/
uint64_t ZT_random()
{
return ZeroTier::Utils::random();
}
{ return ZeroTier::Utils::random(); }
} // extern "C"

View file

@ -9,7 +9,6 @@ configure_file(
set(core_headers
zerotier.h
Address.hpp
Blob.hpp
Buf.hpp
C25519.hpp
CapabilityCredential.hpp
@ -51,8 +50,10 @@ set(core_headers
SHA512.hpp
SharedPtr.hpp
Spinlock.hpp
Store.hpp
SymmetricKey.hpp
TagCredential.hpp
TinyMap.hpp
Topology.hpp
Trace.hpp
TriviallyCopyable.hpp

View file

@ -163,7 +163,7 @@ ZT_Certificate_Network *Certificate::addSubjectNetwork(const uint64_t id, const
void Certificate::addSubjectCertificate(const uint8_t serialNo[ZT_SHA384_DIGEST_SIZE])
{
// Store local copy of serial in m_serials container.
m_serials.push_front(SHA384Hash(serialNo));
m_serials.push_front(H384(serialNo));
// Enlarge array of uint8_t pointers, set new pointer to local copy of serial, and set
// certificates to point to potentially reallocated array.

View file

@ -22,7 +22,6 @@
#include "Locator.hpp"
#include "Dictionary.hpp"
#include "Utils.hpp"
#include "Blob.hpp"
#include "Containers.hpp"
namespace ZeroTier {
@ -63,11 +62,8 @@ public:
return *this;
}
/**
* @return SHA384Hash containing serial number
*/
ZT_INLINE SHA384Hash getSerialNo() const noexcept
{ return SHA384Hash(this->serialNo); }
ZT_INLINE H384 getSerialNo() const noexcept
{ return H384(this->serialNo); }
/**
* Add a subject node/identity without a locator
@ -223,7 +219,7 @@ private:
ForwardList< Identity > m_identities;
ForwardList< Locator > m_locators;
ForwardList< String > m_strings;
ForwardList< SHA384Hash > m_serials;
ForwardList< H384 > m_serials;
// These are stored in a vector because the memory needs to be contiguous.
Vector< ZT_Certificate_Identity > m_subjectIdentities;

View file

@ -24,6 +24,7 @@
#include <list>
#include <set>
#include <string>
#include <algorithm>
#ifdef __CPP11__
#include <atomic>
@ -119,6 +120,60 @@ class Set : public std::set< V, std::less< V > >
typedef std::string String;
/**
* A 384-bit hash
*/
struct H384
{
uint64_t data[6];
ZT_INLINE H384() noexcept
{ Utils::zero< sizeof(data) >(data); }
explicit ZT_INLINE H384(const void *const d) noexcept
{ Utils::copy< 48 >(data, d); }
ZT_INLINE const uint8_t *bytes() const noexcept
{ return reinterpret_cast<const uint8_t *>(data); }
ZT_INLINE unsigned long hashCode() const noexcept
{ return (unsigned long)data[0]; }
ZT_INLINE operator bool() const noexcept
{ return ((data[0] != 0) && (data[1] != 0) && (data[2] != 0) && (data[3] != 0) && (data[4] != 0) && (data[5] != 0)); }
ZT_INLINE bool operator==(const H384 &b) const noexcept
{ return ((data[0] == b.data[0]) && (data[1] == b.data[1]) && (data[2] == b.data[2]) && (data[3] == b.data[3]) && (data[4] == b.data[4]) && (data[5] == b.data[5])); }
ZT_INLINE bool operator!=(const H384 &b) const noexcept
{ return !(*this == b); }
ZT_INLINE bool operator<(const H384 &b) const noexcept
{ return std::lexicographical_compare(data, data + 6, b.data, b.data + 6); }
ZT_INLINE bool operator>(const H384 &b) const noexcept
{ return (b < *this); }
ZT_INLINE bool operator<=(const H384 &b) const noexcept
{ return !(b < *this); }
ZT_INLINE bool operator>=(const H384 &b) const noexcept
{ return !(*this < b); }
};
static_assert(sizeof(H384) == 48, "H384 contains unnecessary padding");
/**
* A byte array
*
* @tparam S Size in bytes
*/
template< unsigned long S >
struct Blob
{
uint8_t data[S];
};
} // ZeroTier
#endif

View file

@ -562,7 +562,7 @@ Network::Network(const RuntimeEnvironment *renv, void *tPtr, uint64_t nwid, cons
bool got = false;
try {
Dictionary dict;
Vector< uint8_t > nconfData(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_NETWORK_CONFIG, tmp, 1));
Vector< uint8_t > nconfData(RR->store->get(tPtr, ZT_STATE_OBJECT_NETWORK_CONFIG, tmp, 1));
if (nconfData.size() > 2) {
nconfData.push_back(0);
if (dict.decode(nconfData.data(), (unsigned int)nconfData.size())) {
@ -579,7 +579,7 @@ Network::Network(const RuntimeEnvironment *renv, void *tPtr, uint64_t nwid, cons
} catch (...) {}
if (!got)
RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_NETWORK_CONFIG, tmp, 1, "\n", 1);
RR->store->put(tPtr, ZT_STATE_OBJECT_NETWORK_CONFIG, tmp, 1, "\n", 1);
}
if (!m_portInitialized) {
@ -1068,7 +1068,7 @@ int Network::setConfiguration(void *tPtr, const NetworkConfig &nconf, bool saveT
tmp[1] = 0;
Vector< uint8_t > d2;
d.encode(d2);
RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_NETWORK_CONFIG, tmp, 1, d2.data(), (unsigned int)d2.size());
RR->store->put(tPtr, ZT_STATE_OBJECT_NETWORK_CONFIG, tmp, 1, d2.data(), (unsigned int)d2.size());
}
} catch (...) {}
}

View file

@ -27,17 +27,16 @@
#include "VL2.hpp"
#include "Buf.hpp"
#include "TrustStore.hpp"
#include "Store.hpp"
namespace ZeroTier {
namespace {
/*
* All the core objects of ZeroTier in a single struct to reduce allocations.
*/
struct _NodeObjects
{
ZT_INLINE _NodeObjects(RuntimeEnvironment *const RR, void *const tPtr, const int64_t now) :
ZT_INLINE _NodeObjects(RuntimeEnvironment *const RR, Node *const n, void *const tPtr, const int64_t now) :
networks(),
t(RR),
expect(),
vl2(RR),
@ -46,6 +45,7 @@ struct _NodeObjects
sa(RR),
ts()
{
RR->networks = &networks;
RR->t = &t;
RR->expect = &expect;
RR->vl2 = &vl2;
@ -55,6 +55,7 @@ struct _NodeObjects
RR->ts = &ts;
}
TinyMap< SharedPtr< Network > > networks;
Trace t;
Expect expect;
VL2 vl2;
@ -73,10 +74,8 @@ Node::Node(
int64_t now) :
m_RR(this),
RR(&m_RR),
m_store(&m_RR),
m_objects(nullptr),
m_cb(*callbacks),
m_uPtr(uPtr),
m_networks(),
m_lastPeerPulse(0),
m_lastHousekeepingRun(0),
m_lastNetworkHousekeepingRun(0),
@ -86,64 +85,55 @@ Node::Node(
{
ZT_SPEW("Node starting up!");
// Load this node's identity.
uint64_t idtmp[2];
idtmp[0] = 0;
idtmp[1] = 0;
Vector< uint8_t > data(stateObjectGet(tPtr, ZT_STATE_OBJECT_IDENTITY_SECRET, idtmp, 0));
Utils::copy< sizeof(ZT_Node_Callbacks) >(&m_RR.cb, callbacks);
m_RR.uPtr = uPtr;
m_RR.store = &m_store;
Vector< uint8_t > data(m_store.get(tPtr, ZT_STATE_OBJECT_IDENTITY_SECRET, Utils::ZERO256, 0));
bool haveIdentity = false;
if (!data.empty()) {
data.push_back(0); // zero-terminate string
if (RR->identity.fromString((const char *)data.data())) {
RR->identity.toString(false, RR->publicIdentityStr);
RR->identity.toString(true, RR->secretIdentityStr);
if (m_RR.identity.fromString((const char *)data.data())) {
m_RR.identity.toString(false, m_RR.publicIdentityStr);
m_RR.identity.toString(true, m_RR.secretIdentityStr);
haveIdentity = true;
ZT_SPEW("loaded identity %s", RR->identity.toString().c_str());
}
}
// Generate a new identity if we don't have one.
if (!haveIdentity) {
RR->identity.generate(Identity::C25519);
RR->identity.toString(false, RR->publicIdentityStr);
RR->identity.toString(true, RR->secretIdentityStr);
idtmp[0] = RR->identity.address();
idtmp[1] = 0;
stateObjectPut(tPtr, ZT_STATE_OBJECT_IDENTITY_SECRET, idtmp, 1, RR->secretIdentityStr, (unsigned int)strlen(RR->secretIdentityStr));
stateObjectPut(tPtr, ZT_STATE_OBJECT_IDENTITY_PUBLIC, idtmp, 1, RR->publicIdentityStr, (unsigned int)strlen(RR->publicIdentityStr));
m_RR.identity.generate(Identity::C25519);
m_RR.identity.toString(false, m_RR.publicIdentityStr);
m_RR.identity.toString(true, m_RR.secretIdentityStr);
m_store.put(tPtr, ZT_STATE_OBJECT_IDENTITY_SECRET, Utils::ZERO256, 0, m_RR.secretIdentityStr, (unsigned int)strlen(m_RR.secretIdentityStr));
m_store.put(tPtr, ZT_STATE_OBJECT_IDENTITY_PUBLIC, Utils::ZERO256, 0, m_RR.publicIdentityStr, (unsigned int)strlen(m_RR.publicIdentityStr));
ZT_SPEW("no pre-existing identity found, created %s", RR->identity.toString().c_str());
} else {
idtmp[0] = RR->identity.address();
idtmp[1] = 0;
data = stateObjectGet(tPtr, ZT_STATE_OBJECT_IDENTITY_PUBLIC, idtmp, 1);
if ((data.empty()) || (memcmp(data.data(), RR->publicIdentityStr, strlen(RR->publicIdentityStr)) != 0)) {
stateObjectPut(tPtr, ZT_STATE_OBJECT_IDENTITY_PUBLIC, idtmp, 1, RR->publicIdentityStr, (unsigned int)strlen(RR->publicIdentityStr));
}
data = m_store.get(tPtr, ZT_STATE_OBJECT_IDENTITY_PUBLIC, Utils::ZERO256, 0);
if ((data.empty()) || (memcmp(data.data(), m_RR.publicIdentityStr, strlen(m_RR.publicIdentityStr)) != 0))
m_store.put(tPtr, ZT_STATE_OBJECT_IDENTITY_PUBLIC, Utils::ZERO256, 0, m_RR.publicIdentityStr, (unsigned int)strlen(m_RR.publicIdentityStr));
}
// Create a secret key for encrypting local data at rest.
uint8_t tmph[ZT_SHA384_DIGEST_SIZE];
RR->identity.hashWithPrivate(tmph);
SHA384(tmph, tmph, ZT_SHA384_DIGEST_SIZE);
RR->localCacheSymmetric.init(tmph);
Utils::burn(tmph, ZT_SHA384_DIGEST_SIZE);
uint8_t localSecretCipherKey[ZT_FINGERPRINT_HASH_SIZE];
m_RR.identity.hashWithPrivate(localSecretCipherKey);
++localSecretCipherKey[0];
SHA384(localSecretCipherKey, localSecretCipherKey, ZT_FINGERPRINT_HASH_SIZE);
m_RR.localSecretCipher.init(localSecretCipherKey);
// Generate a random sort order for privileged ports for use in NAT-t algorithms.
for (unsigned int i = 0; i < 1023; ++i)
RR->randomPrivilegedPortOrder[i] = (uint16_t)(i + 1);
m_RR.randomPrivilegedPortOrder[i] = (uint16_t)(i + 1);
for (unsigned int i = 0; i < 512; ++i) {
uint64_t rn = Utils::random();
const unsigned int a = (unsigned int)rn % 1023;
const unsigned int b = (unsigned int)(rn >> 32U) % 1023;
if (a != b) {
const uint16_t tmp = RR->randomPrivilegedPortOrder[a];
RR->randomPrivilegedPortOrder[a] = RR->randomPrivilegedPortOrder[b];
RR->randomPrivilegedPortOrder[b] = tmp;
const uint16_t tmp = m_RR.randomPrivilegedPortOrder[a];
m_RR.randomPrivilegedPortOrder[a] = m_RR.randomPrivilegedPortOrder[b];
m_RR.randomPrivilegedPortOrder[b] = tmp;
}
}
// Create all the things!
m_objects = new _NodeObjects(RR, tPtr, now);
m_objects = new _NodeObjects(&m_RR, this, tPtr, now);
ZT_SPEW("node initialized!");
postEvent(tPtr, ZT_EVENT_UP);
@ -151,11 +141,12 @@ Node::Node(
Node::~Node()
{
ZT_SPEW("Node shutting down (destructor called).");
ZT_SPEW("Node shutting down (in destructor).");
m_networks_l.lock();
m_networks_l.unlock();
RR->networks->clear();
m_networks.clear();
m_networks_l.unlock();
delete reinterpret_cast<_NodeObjects *>(m_objects);
@ -168,47 +159,15 @@ Node::~Node()
void Node::shutdown(void *tPtr)
{
m_networks_l.lock();
RR->networks->clear();
m_networks.clear();
m_networks_l.unlock();
postEvent(tPtr, ZT_EVENT_DOWN);
if (RR->topology)
RR->topology->saveAll(tPtr);
}
ZT_ResultCode Node::processWirePacket(
void *tPtr,
int64_t now,
int64_t localSocket,
const struct sockaddr_storage *remoteAddress,
SharedPtr< Buf > &packetData,
unsigned int packetLength,
volatile int64_t *nextBackgroundTaskDeadline)
{
m_now = now;
RR->vl1->onRemotePacket(tPtr, localSocket, (remoteAddress) ? InetAddress::NIL : *asInetAddress(remoteAddress), packetData, packetLength);
return ZT_RESULT_OK;
}
ZT_ResultCode Node::processVirtualNetworkFrame(
void *tPtr,
int64_t now,
uint64_t nwid,
uint64_t sourceMac,
uint64_t destMac,
unsigned int etherType,
unsigned int vlanId,
SharedPtr< Buf > &frameData,
unsigned int frameLength,
volatile int64_t *nextBackgroundTaskDeadline)
{
m_now = now;
SharedPtr< Network > nw(this->network(nwid));
if (likely(nw)) {
RR->vl2->onLocalEthernet(tPtr, nw, MAC(sourceMac), MAC(destMac), etherType, vlanId, frameData, frameLength);
return ZT_RESULT_OK;
} else {
return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
}
}
ZT_ResultCode Node::processBackgroundTasks(
void *tPtr,
int64_t now,
@ -242,9 +201,9 @@ ZT_ResultCode Node::processBackgroundTasks(
if ((now - m_lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) {
m_lastHousekeepingRun = now;
ZT_SPEW("running networking housekeeping...");
RWMutex::RLock l(m_networks_l);
for (Map< uint64_t, SharedPtr< Network > >::const_iterator i(m_networks.begin()); i != m_networks.end(); ++i) {
i->second->doPeriodicTasks(tPtr, now);
Mutex::Lock l(m_networks_l);
for (Vector< SharedPtr< Network > >::const_iterator i(m_networks.begin()); i != m_networks.end(); ++i) {
(*i)->doPeriodicTasks(tPtr, now);
}
}
@ -252,17 +211,6 @@ ZT_ResultCode Node::processBackgroundTasks(
m_lastHousekeepingRun = now;
ZT_SPEW("running housekeeping...");
// Clean up any old local controller auth memoizations. This is an
// optimization for network controllers to know whether to accept
// or trust nodes without doing an extra cert check.
m_localControllerAuthorizations_l.lock();
for (Map< p_LocalControllerAuth, int64_t >::iterator i(m_localControllerAuthorizations.begin()); i != m_localControllerAuthorizations.end();) { // NOLINT(hicpp-use-auto,modernize-use-auto)
if ((i->second - now) > (ZT_NETWORK_AUTOCONF_DELAY * 3))
m_localControllerAuthorizations.erase(i++);
else ++i;
}
m_localControllerAuthorizations_l.unlock();
RR->topology->doPeriodicTasks(tPtr, now);
RR->sa->clean(now);
}
@ -287,6 +235,8 @@ ZT_ResultCode Node::join(
void *uptr,
void *tptr)
{
Mutex::Lock l(m_networks_l);
Fingerprint fp;
if (controllerFingerprint) {
fp = *controllerFingerprint;
@ -295,11 +245,13 @@ ZT_ResultCode Node::join(
ZT_SPEW("joining network %.16llx", nwid);
}
RWMutex::Lock l(m_networks_l);
SharedPtr< Network > &nw = m_networks[nwid];
if (nw)
return ZT_RESULT_OK;
nw.set(new Network(RR, tptr, nwid, fp, uptr, nullptr));
for (Vector< SharedPtr< Network > >::iterator n(m_networks.begin()); n != m_networks.end(); ++n) {
if ((*n)->id() == nwid)
return ZT_RESULT_OK;
}
SharedPtr< Network > network(new Network(RR, tptr, nwid, fp, uptr, nullptr));
m_networks.push_back(network);
RR->networks->set(nwid, network);
return ZT_RESULT_OK;
}
@ -309,34 +261,35 @@ ZT_ResultCode Node::leave(
void **uptr,
void *tptr)
{
Mutex::Lock l(m_networks_l);
ZT_SPEW("leaving network %.16llx", nwid);
ZT_VirtualNetworkConfig ctmp;
m_networks_l.lock();
Map< uint64_t, SharedPtr< Network > >::iterator nwi(m_networks.find(nwid)); // NOLINT(hicpp-use-auto,modernize-use-auto)
if (nwi == m_networks.end()) {
m_networks_l.unlock();
return ZT_RESULT_OK;
SharedPtr< Network > network;
RR->networks->erase(nwid);
for (Vector< SharedPtr< Network > >::iterator n(m_networks.begin()); n != m_networks.end(); ++n) {
if ((*n)->id() == nwid) {
network.move(*n);
m_networks.erase(n);
break;
}
}
SharedPtr< Network > nw(nwi->second);
m_networks.erase(nwi);
m_networks_l.unlock();
if (uptr)
*uptr = *nw->userPtr();
nw->externalConfig(&ctmp);
RR->node->configureVirtualNetworkPort(tptr, nwid, uptr, ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY, &ctmp);
nw->destroy();
nw.zero();
uint64_t tmp[2];
tmp[0] = nwid;
tmp[1] = 0;
RR->node->stateObjectDelete(tptr, ZT_STATE_OBJECT_NETWORK_CONFIG, tmp, 1);
m_store.erase(tptr, ZT_STATE_OBJECT_NETWORK_CONFIG, tmp, 1);
return ZT_RESULT_OK;
if (network) {
if (uptr)
*uptr = *network->userPtr();
network->externalConfig(&ctmp);
RR->node->configureVirtualNetworkPort(tptr, nwid, uptr, ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY, &ctmp);
network->destroy();
return ZT_RESULT_OK;
}
return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
}
ZT_ResultCode Node::multicastSubscribe(
@ -346,11 +299,13 @@ ZT_ResultCode Node::multicastSubscribe(
unsigned long multicastAdi)
{
ZT_SPEW("multicast subscribe to %s:%lu", MAC(multicastGroup).toString().c_str(), multicastAdi);
const SharedPtr< Network > nw(this->network(nwid));
const SharedPtr< Network > nw(RR->networks->get(nwid));
if (nw) {
nw->multicastSubscribe(tPtr, MulticastGroup(MAC(multicastGroup), (uint32_t)(multicastAdi & 0xffffffff)));
return ZT_RESULT_OK;
} else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
} else {
return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
}
}
ZT_ResultCode Node::multicastUnsubscribe(
@ -359,16 +314,15 @@ ZT_ResultCode Node::multicastUnsubscribe(
unsigned long multicastAdi)
{
ZT_SPEW("multicast unsubscribe from %s:%lu", MAC(multicastGroup).toString().c_str(), multicastAdi);
const SharedPtr< Network > nw(this->network(nwid));
const SharedPtr< Network > nw(RR->networks->get(nwid));
if (nw) {
nw->multicastUnsubscribe(MulticastGroup(MAC(multicastGroup), (uint32_t)(multicastAdi & 0xffffffff)));
return ZT_RESULT_OK;
} else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
} else {
return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
}
}
uint64_t Node::address() const
{ return RR->identity.address().toInt(); }
void Node::status(ZT_NodeStatus *status) const
{
status->address = RR->identity.address().toInt();
@ -381,11 +335,12 @@ void Node::status(ZT_NodeStatus *status) const
struct p_ZT_PeerListPrivate : public ZT_PeerList
{
// Actual containers for the memory, hidden from external users.
std::vector< ZT_Peer > p_peers;
std::list< std::vector<ZT_Path> > p_paths;
std::list< Identity > p_identities;
std::list< Blob<ZT_LOCATOR_MARSHAL_SIZE_MAX> > p_locators;
Vector< ZT_Peer > p_peers;
List< Vector< ZT_Path > > p_paths;
List< Identity > p_identities;
List< Blob< ZT_LOCATOR_MARSHAL_SIZE_MAX > > p_locators;
};
static void p_peerListFreeFunction(const void *pl)
{
if (pl)
@ -437,10 +392,10 @@ ZT_PeerList *Node::peers() const
p.networks = nullptr;
p.networkCount = 0; // TODO: networks this peer belongs to
Vector< SharedPtr<Path> > ztPaths;
Vector< SharedPtr< Path > > ztPaths;
pp.getAllPaths(ztPaths);
if (ztPaths.empty()) {
pl->p_paths.push_back(std::vector< ZT_Path >());
pl->p_paths.push_back(Vector< ZT_Path >());
std::vector< ZT_Path > &apiPaths = pl->p_paths.back();
apiPaths.resize(ztPaths.size());
for (unsigned long i = 0; i < (unsigned long)ztPaths.size(); ++i) {
@ -477,7 +432,7 @@ ZT_PeerList *Node::peers() const
pl->peerCount = (unsigned long)pl->p_peers.size();
return pl;
} catch ( ... ) {
} catch (...) {
delete pl;
return nullptr;
}
@ -485,18 +440,19 @@ ZT_PeerList *Node::peers() const
ZT_VirtualNetworkConfig *Node::networkConfig(uint64_t nwid) const
{
SharedPtr< Network > nw(network(nwid));
const SharedPtr< Network > nw(RR->networks->get(nwid));
if (nw) {
ZT_VirtualNetworkConfig *const nc = (ZT_VirtualNetworkConfig *)::malloc(sizeof(ZT_VirtualNetworkConfig));
nw->externalConfig(nc);
return nc;
} else {
return nullptr;
}
return nullptr;
}
ZT_VirtualNetworkList *Node::networks() const
{
RWMutex::RLock l(m_networks_l);
Mutex::Lock l(m_networks_l);
char *const buf = (char *)::malloc(sizeof(ZT_VirtualNetworkList) + (sizeof(ZT_VirtualNetworkConfig) * m_networks.size()));
if (!buf)
@ -506,8 +462,8 @@ ZT_VirtualNetworkList *Node::networks() const
nl->networks = (ZT_VirtualNetworkConfig *)(buf + sizeof(ZT_VirtualNetworkList));
nl->networkCount = 0;
for (Map< uint64_t, SharedPtr< Network > >::const_iterator i(m_networks.begin()); i != m_networks.end(); ++i) // NOLINT(modernize-use-auto,modernize-loop-convert,hicpp-use-auto)
i->second->externalConfig(&(nl->networks[nl->networkCount++]));
for (Vector< SharedPtr< Network > >::const_iterator i(m_networks.begin()); i != m_networks.end(); ++i)
(*i)->externalConfig(&(nl->networks[nl->networkCount++]));
return nl;
}
@ -516,9 +472,12 @@ void Node::setNetworkUserPtr(
uint64_t nwid,
void *ptr)
{
SharedPtr< Network > nw(network(nwid));
if (nw)
SharedPtr< Network > nw(RR->networks->get(nwid));
if (nw) {
m_networks_l.lock(); // ensure no concurrent modification of user PTR in network
*(nw->userPtr()) = ptr;
m_networks_l.unlock();
}
}
void Node::setInterfaceAddresses(
@ -600,7 +559,7 @@ ZT_ResultCode Node::deleteCertificate(
{
if (!serialNo)
return ZT_RESULT_ERROR_BAD_PARAMETER;
RR->ts->erase(SHA384Hash(serialNo));
RR->ts->erase(H384(serialNo));
RR->ts->update(-1, nullptr);
return ZT_RESULT_OK;
}
@ -627,12 +586,12 @@ ZT_CertificateList *Node::listCertificates()
return nullptr;
p_certificateListInternal *const clint = reinterpret_cast<p_certificateListInternal *>(reinterpret_cast<uint8_t *>(cl) + sizeof(ZT_CertificateList));
new (clint) p_certificateListInternal;
new(clint) p_certificateListInternal;
clint->entries = RR->ts->all(false);
clint->c.reserve(clint->entries.size());
clint->t.reserve(clint->entries.size());
for(Vector< SharedPtr< TrustStore::Entry > >::const_iterator i(clint->entries.begin()); i!=clint->entries.end(); ++i) {
for (Vector< SharedPtr< TrustStore::Entry > >::const_iterator i(clint->entries.begin()); i != clint->entries.end(); ++i) {
clint->c.push_back(&((*i)->certificate()));
clint->t.push_back((*i)->localTrust());
}
@ -670,52 +629,29 @@ int Node::sendUserMessage(
void Node::setController(void *networkControllerInstance)
{
RR->localNetworkController = reinterpret_cast<NetworkController *>(networkControllerInstance);
m_RR.localNetworkController = reinterpret_cast<NetworkController *>(networkControllerInstance);
if (networkControllerInstance)
RR->localNetworkController->init(RR->identity, this);
m_RR.localNetworkController->init(RR->identity, this);
}
// Methods used only within the core ----------------------------------------------------------------------------------
Vector< uint8_t > Node::stateObjectGet(void *const tPtr, ZT_StateObjectType type, const uint64_t *id, const unsigned int idSize)
{
Vector< uint8_t > r;
if (m_cb.stateGetFunction) {
void *data = nullptr;
void (*freeFunc)(void *) = nullptr;
int l = m_cb.stateGetFunction(
reinterpret_cast<ZT_Node *>(this),
m_uPtr,
tPtr,
type,
id,
idSize,
&data,
&freeFunc);
if ((l > 0) && (data) && (freeFunc)) {
r.assign(reinterpret_cast<const uint8_t *>(data), reinterpret_cast<const uint8_t *>(data) + l);
freeFunc(data);
}
}
return r;
}
bool Node::shouldUsePathForZeroTierTraffic(void *tPtr, const Identity &id, const int64_t localSocket, const InetAddress &remoteAddress)
{
{
RWMutex::RLock l(m_networks_l);
for (Map< uint64_t, SharedPtr< Network > >::iterator i(m_networks.begin()); i != m_networks.end(); ++i) { // NOLINT(hicpp-use-auto,modernize-use-auto,modernize-loop-convert)
for (unsigned int k = 0, j = i->second->config().staticIpCount; k < j; ++k) {
if (i->second->config().staticIps[k].containsAddress(remoteAddress))
Mutex::Lock l(m_networks_l);
for (Vector< SharedPtr< Network > >::iterator i(m_networks.begin()); i != m_networks.end(); ++i) {
for (unsigned int k = 0, j = (*i)->config().staticIpCount; k < j; ++k) {
if ((*i)->config().staticIps[k].containsAddress(remoteAddress))
return false;
}
}
}
if (m_cb.pathCheckFunction) {
return (m_cb.pathCheckFunction(
if (RR->cb.pathCheckFunction) {
return (RR->cb.pathCheckFunction(
reinterpret_cast<ZT_Node *>(this),
m_uPtr,
RR->uPtr,
tPtr,
id.address().toInt(),
(const ZT_Identity *)&id,
@ -728,10 +664,10 @@ bool Node::shouldUsePathForZeroTierTraffic(void *tPtr, const Identity &id, const
bool Node::externalPathLookup(void *tPtr, const Identity &id, int family, InetAddress &addr)
{
if (m_cb.pathLookupFunction) {
return (m_cb.pathLookupFunction(
if (RR->cb.pathLookupFunction) {
return (RR->cb.pathLookupFunction(
reinterpret_cast<ZT_Node *>(this),
m_uPtr,
RR->uPtr,
tPtr,
id.address().toInt(),
reinterpret_cast<const ZT_Identity *>(&id),
@ -741,27 +677,12 @@ bool Node::externalPathLookup(void *tPtr, const Identity &id, int family, InetAd
return false;
}
bool Node::localControllerHasAuthorized(const int64_t now, const uint64_t nwid, const Address &addr) const
{
m_localControllerAuthorizations_l.lock();
Map<Node::p_LocalControllerAuth, int64_t>::const_iterator i(m_localControllerAuthorizations.find(p_LocalControllerAuth(nwid, addr)));
const int64_t at = (i == m_localControllerAuthorizations.end()) ? -1LL : i->second;
m_localControllerAuthorizations_l.unlock();
if (at > 0)
return ((now - at) < (ZT_NETWORK_AUTOCONF_DELAY * 3));
return false;
}
// Implementation of NetworkController::Sender ------------------------------------------------------------------------
void Node::ncSendConfig(uint64_t nwid, uint64_t requestPacketId, const Address &destination, const NetworkConfig &nc, bool sendLegacyFormatConfig)
{
m_localControllerAuthorizations_l.lock();
m_localControllerAuthorizations[p_LocalControllerAuth(nwid, destination)] = now();
m_localControllerAuthorizations_l.unlock();
if (destination == RR->identity.address()) {
SharedPtr< Network > n(network(nwid));
SharedPtr< Network > n(RR->networks->get(nwid));
if (!n)
return;
n->setConfiguration((void *)0, nc, true);
@ -813,8 +734,9 @@ void Node::ncSendConfig(uint64_t nwid, uint64_t requestPacketId, const Address &
void Node::ncSendRevocation(const Address &destination, const RevocationCredential &rev)
{
if (destination == RR->identity.address()) {
SharedPtr< Network > n(network(rev.networkId()));
if (!n) return;
SharedPtr< Network > n(RR->networks->get(rev.networkId()));
if (!n)
return;
n->addCredential(nullptr, RR->identity, rev);
} else {
// TODO
@ -834,8 +756,9 @@ void Node::ncSendRevocation(const Address &destination, const RevocationCredenti
void Node::ncSendError(uint64_t nwid, uint64_t requestPacketId, const Address &destination, NetworkController::ErrorCode errorCode)
{
if (destination == RR->identity.address()) {
SharedPtr< Network > n(network(nwid));
if (!n) return;
SharedPtr< Network > n(RR->networks->get(nwid));
if (!n)
return;
switch (errorCode) {
case NetworkController::NC_ERROR_OBJECT_NOT_FOUND:
case NetworkController::NC_ERROR_INTERNAL_SERVER_ERROR:

View file

@ -25,6 +25,7 @@
#include "NetworkController.hpp"
#include "Buf.hpp"
#include "Containers.hpp"
#include "Store.hpp"
namespace ZeroTier {
@ -50,27 +51,6 @@ public:
// Public API Functions ---------------------------------------------------------------------------------------------
ZT_ResultCode processWirePacket(
void *tPtr,
int64_t now,
int64_t localSocket,
const struct sockaddr_storage *remoteAddress,
SharedPtr< Buf > &packetData,
unsigned int packetLength,
volatile int64_t *nextBackgroundTaskDeadline);
ZT_ResultCode processVirtualNetworkFrame(
void *tPtr,
int64_t now,
uint64_t nwid,
uint64_t sourceMac,
uint64_t destMac,
unsigned int etherType,
unsigned int vlanId,
SharedPtr< Buf > &frameData,
unsigned int frameLength,
volatile int64_t *nextBackgroundTaskDeadline);
ZT_ResultCode processBackgroundTasks(
void *tPtr,
int64_t now,
@ -98,8 +78,6 @@ public:
uint64_t multicastGroup,
unsigned long multicastAdi);
uint64_t address() const;
void status(
ZT_NodeStatus *status) const;
@ -160,72 +138,6 @@ public:
ZT_INLINE int64_t now() const noexcept
{ return m_now; }
/**
* Send packet to to the physical wire via callback
*
* @param tPtr Thread pointer
* @param localSocket Local socket or -1 to use all/any
* @param addr Destination address
* @param data Data to send
* @param len Length in bytes
* @param ttl TTL or 0 for default/max
* @return True if send appears successful
*/
ZT_INLINE bool putPacket(void *tPtr, const int64_t localSocket, const InetAddress &addr, const void *data, unsigned int len, unsigned int ttl = 0) noexcept
{
return (m_cb.wirePacketSendFunction(
reinterpret_cast<ZT_Node *>(this),
m_uPtr,
tPtr,
localSocket,
reinterpret_cast<const ZT_InetAddress *>(&addr.as.ss),
data,
len,
ttl) == 0);
}
/**
* Inject frame into virtual Ethernet tap
*
* @param tPtr Thread pointer
* @param nwid Network ID
* @param nuptr Network-associated user pointer
* @param source Source MAC address
* @param dest Destination MAC address
* @param etherType 16-bit Ethernet type
* @param vlanId Ethernet VLAN ID (currently unused)
* @param data Ethernet frame data
* @param len Ethernet frame length in bytes
*/
ZT_INLINE void putFrame(void *tPtr, uint64_t nwid, void **nuptr, const MAC &source, const MAC &dest, unsigned int etherType, unsigned int vlanId, const void *data, unsigned int len) noexcept
{
m_cb.virtualNetworkFrameFunction(
reinterpret_cast<ZT_Node *>(this),
m_uPtr,
tPtr,
nwid,
nuptr,
source.toInt(),
dest.toInt(),
etherType,
vlanId,
data,
len);
}
/**
* @param nwid Network ID
* @return Network associated with ID
*/
ZT_INLINE SharedPtr< Network > network(const uint64_t nwid) const noexcept
{
RWMutex::RLock l(m_networks_l);
Map< uint64_t, SharedPtr< Network > >::const_iterator n(m_networks.find(nwid));
if (likely(n != m_networks.end()))
return n->second;
return SharedPtr< Network >();
}
/**
* @return Known local interface addresses for this node
*/
@ -244,7 +156,7 @@ public:
* @param mdSize Size of event data
*/
ZT_INLINE void postEvent(void *tPtr, ZT_Event ev, const void *md = nullptr, const unsigned int mdSize = 0) noexcept
{ m_cb.eventCallback(reinterpret_cast<ZT_Node *>(this), m_uPtr, tPtr, ev, md, mdSize); }
{ RR->cb.eventCallback(reinterpret_cast<ZT_Node *>(this), RR->uPtr, tPtr, ev, md, mdSize); }
/**
* Post network port configuration via external callback
@ -256,51 +168,7 @@ public:
* @param nc Network config info
*/
ZT_INLINE void configureVirtualNetworkPort(void *tPtr, uint64_t nwid, void **nuptr, ZT_VirtualNetworkConfigOperation op, const ZT_VirtualNetworkConfig *nc) noexcept
{ m_cb.virtualNetworkConfigFunction(reinterpret_cast<ZT_Node *>(this), m_uPtr, tPtr, nwid, nuptr, op, nc); }
/**
* @return True if node appears online
*/
ZT_INLINE bool online() const noexcept
{ return m_online; }
/**
* Get a state object
*
* @param tPtr Thread pointer
* @param type Object type to get
* @param id Object ID or NULL if this type does not use one
* @return Vector containing data or empty vector if not found or empty
*/
Vector< uint8_t > stateObjectGet(void *tPtr, ZT_StateObjectType type, const uint64_t *id, unsigned int idSize);
/**
* Store a state object
*
* @param tPtr Thread pointer
* @param type Object type to get
* @param id Object ID
* @param data Data to store
* @param len Length of data
*/
ZT_INLINE void stateObjectPut(void *const tPtr, ZT_StateObjectType type, const uint64_t *const id, const unsigned int idSize, const void *const data, const unsigned int len) noexcept
{
if (m_cb.statePutFunction)
m_cb.statePutFunction(reinterpret_cast<ZT_Node *>(this), m_uPtr, tPtr, type, id, idSize, data, (int)len);
}
/**
* Delete a state object
*
* @param tPtr Thread pointer
* @param type Object type to delete
* @param id Object ID
*/
ZT_INLINE void stateObjectDelete(void *const tPtr, ZT_StateObjectType type, const uint64_t *const id, const unsigned int idSize) noexcept
{
if (m_cb.statePutFunction)
m_cb.statePutFunction(reinterpret_cast<ZT_Node *>(this), m_uPtr, tPtr, type, id, idSize, nullptr, -1);
}
{ RR->cb.virtualNetworkConfigFunction(reinterpret_cast<ZT_Node *>(this), RR->uPtr, tPtr, nwid, nuptr, op, nc); }
/**
* Check whether a path should be used for ZeroTier traffic
@ -332,70 +200,27 @@ public:
ZT_INLINE const Identity &identity() const noexcept
{ return m_RR.identity; }
/**
* Check whether a local controller has authorized a member on a network
*
* This is used by controllers to avoid needless certificate checks when we already
* know if this has occurred. It's a bit of a hack but saves a massive amount of
* controller CPU. It's easiest to put this here, and it imposes no overhead on
* non-controllers.
*
* @param now Current time
* @param nwid Network ID
* @param addr Member address to check
* @return True if member has been authorized
*/
bool localControllerHasAuthorized(int64_t now, uint64_t nwid, const Address &addr) const;
// Implementation of NetworkController::Sender interface
virtual void ncSendConfig(uint64_t nwid, uint64_t requestPacketId, const Address &destination, const NetworkConfig &nc, bool sendLegacyFormatConfig);
virtual void ncSendRevocation(const Address &destination, const RevocationCredential &rev);
virtual void ncSendError(uint64_t nwid, uint64_t requestPacketId, const Address &destination, NetworkController::ErrorCode errorCode);
private:
RuntimeEnvironment m_RR;
RuntimeEnvironment *const RR;
public:
const RuntimeEnvironment *const RR;
private:
// Data store wrapper
Store m_store;
// Pointer to a struct defined in Node that holds instances of core objects.
void *m_objects;
// Function pointers to C callbacks supplied via the API.
ZT_Node_Callbacks m_cb;
// A user-specified opaque pointer passed back via API callbacks.
void *m_uPtr;
// Cache that remembers whether or not the locally running network controller (if any) has authorized
// someone on their most recent query. This is used by the network controller as a memoization optimization
// to elide unnecessary signature verifications. It might get moved in the future since this is sort of a
// weird place to put it.
struct p_LocalControllerAuth
{
uint64_t nwid, address;
ZT_INLINE p_LocalControllerAuth(const uint64_t nwid_, const Address &address_) noexcept: nwid(nwid_), address(address_.toInt())
{}
ZT_INLINE unsigned long hashCode() const noexcept
{ return (unsigned long)(nwid + address); }
ZT_INLINE bool operator==(const p_LocalControllerAuth &a) const noexcept
{ return ((a.nwid == nwid) && (a.address == address)); }
ZT_INLINE bool operator!=(const p_LocalControllerAuth &a) const noexcept
{ return ((a.nwid != nwid) || (a.address != address)); }
ZT_INLINE bool operator<(const p_LocalControllerAuth &a) const noexcept
{ return ((a.nwid < nwid) || ((a.nwid == nwid) && (a.address < address))); }
};
Map< p_LocalControllerAuth, int64_t > m_localControllerAuthorizations;
Mutex m_localControllerAuthorizations_l;
// Locally joined networks by network ID.
Map< uint64_t, SharedPtr< Network > > m_networks;
RWMutex m_networks_l;
// This isn't the primary network lookup but holds a vector of networks for rapid iteration through all of them.
Vector< SharedPtr< Network > > m_networks;
Mutex m_networks_l;
// These are local interface addresses that have been configured via the API
// and can be pushed to other nodes.

View file

@ -19,7 +19,7 @@ namespace ZeroTier {
bool Path::send(const RuntimeEnvironment *const RR, void *const tPtr, const void *const data, const unsigned int len, const int64_t now) noexcept
{
if (likely(RR->node->putPacket(tPtr, m_localSocket, m_addr, data, len))) {
if (likely(RR->cb.wirePacketSendFunction(reinterpret_cast<ZT_Node *>(RR->node), RR->uPtr, tPtr, m_localSocket, reinterpret_cast<const ZT_InetAddress *>(&m_addr), data, len, 0) == 0)) {
m_lastOut = now;
m_outMeter.log(now, len);
return true;

View file

@ -226,7 +226,7 @@ unsigned int Peer::hello(void *tPtr, int64_t localSocket, const InetAddress &atA
p1305.finish(polyMac);
Utils::storeMachineEndian< uint64_t >(outp.unsafeData + ZT_PROTO_PACKET_MAC_INDEX, polyMac[0]);
return (likely(RR->node->putPacket(tPtr, localSocket, atAddress, outp.unsafeData, ii))) ? ii : 0;
return (likely(RR->cb.wirePacketSendFunction(reinterpret_cast<ZT_Node *>(RR->node), RR->uPtr, tPtr, localSocket, reinterpret_cast<const ZT_InetAddress *>(&atAddress), outp.unsafeData, ii, 0) == 0)) ? ii : 0;
}
void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
@ -443,7 +443,7 @@ void Peer::contact(void *tPtr, const int64_t now, const Endpoint &ep, int tries)
// traverse some NAT types. It has no effect otherwise.
if (ep.isInetAddr() && ep.ip().isV4()) {
++foo;
RR->node->putPacket(tPtr, -1, ep.ip(), &foo, 1, 2);
RR->cb.wirePacketSendFunction(reinterpret_cast<ZT_Node *>(RR->node), RR->uPtr, tPtr, -1, reinterpret_cast<const ZT_InetAddress *>(&ep.ip()), &foo, 1, 2);
}
// Make sure address is not already in the try queue. If so just update it.
@ -508,7 +508,7 @@ void Peer::save(void *tPtr) const
uint64_t id[2];
id[0] = m_id.address().toInt();
id[1] = 0;
RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_PEER, id, 1, buf, (unsigned int)len + 8);
RR->store->put(tPtr, ZT_STATE_OBJECT_PEER, id, 1, buf, (unsigned int)len + 8);
}
}
@ -528,9 +528,9 @@ int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
// SECURITY: encryption in place is only to protect secrets if they are
// cached to local storage. It's not used over the wire. Dumb ECB is fine
// because secret keys are random and have no structure to reveal.
RR->localCacheSymmetric.encrypt(m_identityKey->secret, data + 1 + ZT_ADDRESS_LENGTH);
RR->localCacheSymmetric.encrypt(m_identityKey->secret + 16, data + 1 + ZT_ADDRESS_LENGTH + 16);
RR->localCacheSymmetric.encrypt(m_identityKey->secret + 32, data + 1 + ZT_ADDRESS_LENGTH + 32);
RR->localSecretCipher.encrypt(m_identityKey->secret, data + 1 + ZT_ADDRESS_LENGTH);
RR->localSecretCipher.encrypt(m_identityKey->secret + 16, data + 1 + ZT_ADDRESS_LENGTH + 16);
RR->localSecretCipher.encrypt(m_identityKey->secret + 32, data + 1 + ZT_ADDRESS_LENGTH + 32);
int p = 1 + ZT_ADDRESS_LENGTH + 48;
@ -593,9 +593,9 @@ int Peer::unmarshal(const uint8_t *restrict data, const int len) noexcept
if (Address(data + 1) == RR->identity.address()) {
uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
static_assert(ZT_SYMMETRIC_KEY_SIZE == 48, "marshal() and unmarshal() must be revisited if ZT_SYMMETRIC_KEY_SIZE is changed");
RR->localCacheSymmetric.decrypt(data + 1 + ZT_ADDRESS_LENGTH, k);
RR->localCacheSymmetric.decrypt(data + 1 + ZT_ADDRESS_LENGTH + 16, k + 16);
RR->localCacheSymmetric.decrypt(data + 1 + ZT_ADDRESS_LENGTH + 32, k + 32);
RR->localSecretCipher.decrypt(data + 1 + ZT_ADDRESS_LENGTH, k);
RR->localSecretCipher.decrypt(data + 1 + ZT_ADDRESS_LENGTH + 16, k + 16);
RR->localSecretCipher.decrypt(data + 1 + ZT_ADDRESS_LENGTH + 32, k + 32);
m_identityKey.set(new SymmetricKey(RR->node->now(), k));
Utils::burn(k, sizeof(k));
}
@ -715,11 +715,11 @@ unsigned int Peer::m_sendProbe(void *tPtr, int64_t localSocket, const InetAddres
InetAddress tmp(atAddress);
for (unsigned int i = 0; i < numPorts; ++i) {
tmp.setPort(ports[i]);
RR->node->putPacket(tPtr, -1, tmp, p, ZT_PROTO_MIN_PACKET_LENGTH);
RR->cb.wirePacketSendFunction(reinterpret_cast<ZT_Node *>(RR->node), RR->uPtr, tPtr, -1, reinterpret_cast<const ZT_InetAddress *>(&tmp), p, ZT_PROTO_MIN_PACKET_LENGTH, 0);
}
return ZT_PROTO_MIN_PACKET_LENGTH * numPorts;
} else {
RR->node->putPacket(tPtr, -1, atAddress, p, ZT_PROTO_MIN_PACKET_LENGTH);
RR->cb.wirePacketSendFunction(reinterpret_cast<ZT_Node *>(RR->node), RR->uPtr, tPtr, -1, reinterpret_cast<const ZT_InetAddress *>(&atAddress), p, ZT_PROTO_MIN_PACKET_LENGTH, 0);
return ZT_PROTO_MIN_PACKET_LENGTH;
}
}

View file

@ -18,6 +18,8 @@
#include "Utils.hpp"
#include "Identity.hpp"
#include "AES.hpp"
#include "TinyMap.hpp"
#include "SharedPtr.hpp"
namespace ZeroTier {
@ -30,6 +32,8 @@ class SelfAwareness;
class Trace;
class Expect;
class TrustStore;
class Store;
class Network;
/**
* ZeroTier::Node execution context
@ -44,7 +48,9 @@ public:
ZT_INLINE RuntimeEnvironment(Node *const n) noexcept:
instanceId(Utils::getSecureRandomU64()),
node(n),
uPtr(nullptr),
localNetworkController(nullptr),
store(nullptr),
t(nullptr),
expect(nullptr),
vl2(nullptr),
@ -68,9 +74,17 @@ public:
// Node instance that owns this RuntimeEnvironment
Node *const node;
// Callbacks specified by caller who created node
ZT_Node_Callbacks cb;
// User pointer specified by external code via API
void *uPtr;
// This is set externally to an instance of this base class
NetworkController *localNetworkController;
Store *store;
TinyMap< SharedPtr< Network > > *networks;
Trace *t;
Expect *expect;
VL2 *vl2;
@ -84,8 +98,8 @@ public:
char publicIdentityStr[ZT_IDENTITY_STRING_BUFFER_LENGTH];
char secretIdentityStr[ZT_IDENTITY_STRING_BUFFER_LENGTH];
// AES keyed with a hash of this node's identity secret keys for local cache encryption at rest (where needed).
AES localCacheSymmetric;
// Symmetric key for encrypting secrets at rest on this system.
AES localSecretCipher;
// Privileged ports from 1 to 1023 in a random order (for IPv4 NAT traversal)
uint16_t randomPrivilegedPortOrder[1023];

View file

@ -20,11 +20,10 @@
namespace ZeroTier {
/**
* Simple zero-overhead introspective reference counted pointer
* An introspective reference counted pointer.
*
* This is an introspective shared pointer. Classes that need to be reference
* counted must list this as a 'friend' and must have a private instance of
* atomic<int> called __refCount.
* Classes must have an atomic<int> field called __refCount and set this class
* as a friend to be used with this.
*/
template< typename T >
class SharedPtr : public TriviallyCopyable

View file

@ -32,6 +32,24 @@
class Spinlock
{
public:
/**
* Pause current thread using whatever methods might be available
*
* This is broken out since it's used in a few other places where
* spinlock-like constructions are used.
*/
ZT_INLINE static void pause() noexcept
{
#ifdef ZT_ARCH_X64
_mm_pause();
#endif
#ifdef __LINUX__
sched_yield();
#else
std::this_thread::yield();
#endif
}
ZT_INLINE Spinlock() noexcept: m_locked(false)
{}
@ -39,11 +57,7 @@ public:
{
if (unlikely(m_locked.test_and_set(std::memory_order_acquire))) {
do {
#ifdef __LINUX__
sched_yield();
#else
std::this_thread::yield();
#endif
Spinlock::pause();
} while (m_locked.test_and_set(std::memory_order_acquire));
}
}

84
core/Store.hpp Normal file
View file

@ -0,0 +1,84 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_STORE_HPP
#define ZT_STORE_HPP
#include "Constants.hpp"
#include "Containers.hpp"
#include "RuntimeEnvironment.hpp"
namespace ZeroTier {
/**
* Wrapper around API callbacks for data store
*/
class Store
{
public:
ZT_INLINE Store(const RuntimeEnvironment *const renv): RR(renv)
{}
/**
* Get a state object
*
* @param tPtr Thread pointer to pass through
* @param type Object type
* @param id Object ID
* @param idSize Size of object ID in qwords
* @return Data or empty vector if not found
*/
ZT_INLINE Vector< uint8_t > get(void *tPtr, ZT_StateObjectType type, const uint64_t *id, unsigned int idSize) const
{
Vector< uint8_t > dv;
void *data = nullptr;
void (*freeFunc)(void *) = nullptr;
const int r = RR->cb.stateGetFunction(reinterpret_cast<ZT_Node *>(RR->node), RR->uPtr, tPtr, type, id, idSize, &data, &freeFunc);
if (r > 0)
dv.assign(reinterpret_cast<const uint8_t *>(data), reinterpret_cast<const uint8_t *>(data) + r);
if ((data) && (freeFunc))
freeFunc(data);
return dv;
}
/**
* Store a state object
*
* @param tPtr Thread pointer to pass through
* @param type Object type
* @param id Object ID
* @param idSize Size of object ID in qwords
* @param data Data to store
* @param len Length of data
*/
ZT_INLINE void put(void *const tPtr, ZT_StateObjectType type, const uint64_t *const id, const unsigned int idSize, const void *const data, const unsigned int len) noexcept
{ RR->cb.statePutFunction(reinterpret_cast<ZT_Node *>(this), RR->uPtr, tPtr, type, id, idSize, data, (int)len); }
/**
* Erase a state object from the object store
*
* @param tPtr Thread pointer to pass through
* @param type Object type
* @param id Object ID
* @param idSize Size of object ID in qwords
*/
ZT_INLINE void erase(void *const tPtr, ZT_StateObjectType type, const uint64_t *const id, const unsigned int idSize) noexcept
{ RR->cb.statePutFunction(reinterpret_cast<ZT_Node *>(this), RR->uPtr, tPtr, type, id, idSize, nullptr, -1); }
private:
const RuntimeEnvironment *RR;
};
} // namespace ZeroTier
#endif

148
core/TinyMap.hpp Normal file
View file

@ -0,0 +1,148 @@
/*
* Copyright (c)2013-2021 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2026-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_TINYMAP_HPP
#define ZT_TINYMAP_HPP
#include "Constants.hpp"
#include "Containers.hpp"
#include "SharedPtr.hpp"
#include "Network.hpp"
#include "Spinlock.hpp"
#define ZT_TINYMAP_BUCKETS 1024
#define ZT_TINYMAP_BUCKET_MASK 1023
#define ZT_TINYMAP_LOCKED_POINTER (~((uintptr_t)0))
namespace ZeroTier {
/**
* A small, simple, and very fast hash map with a fixed bucket count.
*
* This is used where it's necessary to keep small numbers of items indexed by
* an integer, such as networks mapping to network IDs. It's optimized for very
* fast lookup, with lookups sometimes requiring only a few instructions. It
* uses a "lock free" (actually pointer-as-spinlock) design.
*/
template< typename V >
class TinyMap
{
private:
typedef Vector< std::pair< uint64_t, V > > EV;
public:
ZT_INLINE TinyMap()
{}
ZT_INLINE ~TinyMap()
{ this->clear(); }
ZT_INLINE void clear()
{
for(unsigned int i=0; i < ZT_TINYMAP_BUCKETS; ++i) {
for(;;) {
const uintptr_t vptr = m_buckets[i].exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
if (vptr != 0)
delete reinterpret_cast<EV *>(vptr);
m_buckets[i].store(0, std::memory_order_release);
break;
} else {
Spinlock::pause();
}
}
}
}
ZT_INLINE V get(const uint64_t key) noexcept
{
V tmp;
std::atomic<uintptr_t> &bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKET_MASK];
for(;;) {
const uintptr_t vptr = bucket.exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
if (likely(vptr != 0)) {
for(typename EV::const_iterator n(reinterpret_cast<const EV *>(vptr)->begin()); n != reinterpret_cast<const EV *>(vptr)->end(); ++n) {
if (likely(n->first == key)) {
tmp = n->second;
break;
}
}
}
bucket.store(vptr, std::memory_order_release);
return tmp;
} else {
Spinlock::pause();
}
}
}
ZT_INLINE void set(const uint64_t key, const V &value)
{
std::atomic<uintptr_t> &bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKET_MASK];
for(;;) {
uintptr_t vptr = bucket.exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
if (vptr == 0) {
vptr = reinterpret_cast<uintptr_t>(new EV());
} else {
for (typename EV::iterator n(reinterpret_cast<EV *>(vptr)->begin()); n != reinterpret_cast<EV *>(vptr)->end(); ++n) {
if (n->first == key) {
n->second = value;
bucket.store(vptr, std::memory_order_release);
return;
}
}
}
reinterpret_cast<EV *>(vptr)->push_back(std::pair< uint64_t, V >(key, value));
bucket.store(vptr, std::memory_order_release);
return;
} else {
Spinlock::pause();
}
}
}
ZT_INLINE void erase(const uint64_t key)
{
std::atomic<uintptr_t> &bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKET_MASK];
for(;;) {
uintptr_t vptr = bucket.exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
if (likely(vptr != 0)) {
for (typename EV::iterator n(reinterpret_cast<EV *>(vptr)->begin()); n != reinterpret_cast<EV *>(vptr)->end(); ++n) {
if (n->first == key) {
reinterpret_cast<EV *>(vptr)->erase(n);
break;
}
}
if (reinterpret_cast<EV *>(vptr)->empty()) {
delete reinterpret_cast<EV *>(vptr);
vptr = 0;
}
}
bucket.store(vptr, std::memory_order_release);
return;
} else {
Spinlock::pause();
}
}
}
private:
std::atomic<uintptr_t> m_buckets[ZT_TINYMAP_BUCKETS];
};
} // namespace ZeroTier
#endif

View file

@ -170,7 +170,7 @@ void Topology::m_loadCached(void *tPtr, const Address &zta, SharedPtr< Peer > &p
uint64_t id[2];
id[0] = zta.toInt();
id[1] = 0;
Vector< uint8_t > data(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_PEER, id, 1));
Vector< uint8_t > data(RR->store->get(tPtr, ZT_STATE_OBJECT_PEER, id, 1));
if (data.size() > 8) {
const uint8_t *d = data.data();
int dl = (int)data.size();

View file

@ -24,7 +24,6 @@
#include "SharedPtr.hpp"
#include "ScopedPtr.hpp"
#include "Fingerprint.hpp"
#include "Blob.hpp"
#include "FCV.hpp"
#include "Certificate.hpp"
#include "Containers.hpp"

View file

@ -21,10 +21,10 @@ TrustStore::TrustStore()
TrustStore::~TrustStore()
{}
SharedPtr< TrustStore::Entry > TrustStore::get(const SHA384Hash &serial) const
SharedPtr< TrustStore::Entry > TrustStore::get(const H384 &serial) const
{
RWMutex::RLock l(m_lock);
Map< SHA384Hash, SharedPtr< Entry > >::const_iterator i(m_bySerial.find(serial));
Map< H384, SharedPtr< Entry > >::const_iterator i(m_bySerial.find(serial));
return (i != m_bySerial.end()) ? i->second : SharedPtr< TrustStore::Entry >();
}
@ -55,7 +55,7 @@ Vector< SharedPtr< TrustStore::Entry > > TrustStore::all(const bool includeRejec
RWMutex::RLock l(m_lock);
Vector< SharedPtr< Entry > > r;
r.reserve(m_bySerial.size());
for (Map< SHA384Hash, SharedPtr< Entry > >::const_iterator i(m_bySerial.begin()); i != m_bySerial.end(); ++i) {
for (Map< H384, SharedPtr< Entry > >::const_iterator i(m_bySerial.begin()); i != m_bySerial.end(); ++i) {
if ((includeRejectedCertificates) || (i->second->error() == ZT_CERTIFICATE_ERROR_NONE))
r.push_back(i->second);
}
@ -66,7 +66,7 @@ Vector< SharedPtr< TrustStore::Entry > > TrustStore::rejects() const
{
RWMutex::RLock l(m_lock);
Vector< SharedPtr< Entry > > r;
for (Map< SHA384Hash, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
for (Map< H384, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
if (c->second->error() != ZT_CERTIFICATE_ERROR_NONE)
r.push_back(c->second);
}
@ -79,7 +79,7 @@ void TrustStore::add(const Certificate &cert, const unsigned int localTrust)
m_addQueue.push_front(SharedPtr< Entry >(new Entry(cert, localTrust)));
}
void TrustStore::erase(const SHA384Hash &serial)
void TrustStore::erase(const H384 &serial)
{
RWMutex::Lock l(m_lock);
m_deleteQueue.push_front(serial);
@ -88,12 +88,12 @@ void TrustStore::erase(const SHA384Hash &serial)
// Recursive function to trace a certificate up the chain to a CA, returning true
// if the CA is reached and the path length is less than the maximum. Note that only
// non-rejected (no errors) certificates will be in bySignedCert.
static bool p_validatePath(const Map< SHA384Hash, Vector< SharedPtr< TrustStore::Entry > > > &bySignedCert, const SharedPtr< TrustStore::Entry > &entry, unsigned int pathLength)
static bool p_validatePath(const Map< H384, Vector< SharedPtr< TrustStore::Entry > > > &bySignedCert, const SharedPtr< TrustStore::Entry > &entry, unsigned int pathLength)
{
if (((entry->localTrust() & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ROOT_CA) != 0) && (pathLength <= entry->certificate().maxPathLength))
return true;
if (pathLength < ZT_CERTIFICATE_MAX_PATH_LENGTH) {
const Map< SHA384Hash, Vector< SharedPtr< TrustStore::Entry > > >::const_iterator signers(bySignedCert.find(SHA384Hash(entry->certificate().serialNo)));
const Map< H384, Vector< SharedPtr< TrustStore::Entry > > >::const_iterator signers(bySignedCert.find(H384(entry->certificate().serialNo)));
if (signers != bySignedCert.end()) {
for (Vector< SharedPtr< TrustStore::Entry > >::const_iterator signer(signers->second.begin()); signer != signers->second.end(); ++signer) {
if ((*signer != entry) && (p_validatePath(bySignedCert, *signer, pathLength + 1)))
@ -111,7 +111,7 @@ void TrustStore::update(const int64_t clock, Vector< SharedPtr< Entry > > *const
// (Re)compute error codes for existing certs, but we don't have to do a full
// signature check here since that's done when they're taken out of the add queue.
bool errorStateModified = false;
for (Map< SHA384Hash, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
for (Map< H384, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
const ZT_CertificateError err = c->second->m_certificate.verify(clock, false);
errorStateModified |= (c->second->m_error.exchange((int)err, std::memory_order_relaxed) != (int)err);
}
@ -127,7 +127,7 @@ void TrustStore::update(const int64_t clock, Vector< SharedPtr< Entry > > *const
// performed here.
while (!m_addQueue.empty()) {
m_addQueue.front()->m_error.store((int)m_addQueue.front()->m_certificate.verify(clock, true), std::memory_order_relaxed);
m_bySerial[SHA384Hash(m_addQueue.front()->m_certificate.serialNo)].move(m_addQueue.front());
m_bySerial[H384(m_addQueue.front()->m_certificate.serialNo)].move(m_addQueue.front());
m_addQueue.pop_front();
}
@ -137,19 +137,19 @@ void TrustStore::update(const int64_t clock, Vector< SharedPtr< Entry > > *const
m_deleteQueue.pop_front();
}
Map< SHA384Hash, Vector< SharedPtr< Entry > > > bySignedCert;
Map< H384, Vector< SharedPtr< Entry > > > bySignedCert;
for (;;) {
// Create a reverse lookup mapping from signed certs to signer certs for certificate
// path validation. Only include good certificates.
for (Map< SHA384Hash, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
for (Map< H384, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
if (c->second->error() == ZT_CERTIFICATE_ERROR_NONE) {
for (unsigned int j = 0; j < c->second->m_certificate.subject.certificateCount; ++j)
bySignedCert[SHA384Hash(c->second->m_certificate.subject.certificates[j])].push_back(c->second);
bySignedCert[H384(c->second->m_certificate.subject.certificates[j])].push_back(c->second);
}
}
// Validate certificate paths and reject any certificates that do not trace back to a CA.
for (Map< SHA384Hash, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
for (Map< H384, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
if (c->second->error() == ZT_CERTIFICATE_ERROR_NONE) {
if (!p_validatePath(bySignedCert, c->second, 0))
c->second->m_error.store((int)ZT_CERTIFICATE_ERROR_INVALID_CHAIN, std::memory_order_relaxed);
@ -160,7 +160,7 @@ void TrustStore::update(const int64_t clock, Vector< SharedPtr< Entry > > *const
// that have been superseded by newly issued certificates with the same subject.
bool exitLoop = true;
m_bySubjectUniqueId.clear();
for (Map< SHA384Hash, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end();) {
for (Map< H384, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end();) {
if (c->second->error() == ZT_CERTIFICATE_ERROR_NONE) {
const unsigned int uniqueIdSize = c->second->m_certificate.subject.uniqueIdSize;
if ((uniqueIdSize > 0) && (uniqueIdSize <= 1024)) { // 1024 is a sanity check value, actual unique IDs are <100 bytes
@ -200,7 +200,7 @@ void TrustStore::update(const int64_t clock, Vector< SharedPtr< Entry > > *const
// Populate mapping of identities to certificates whose subjects reference them.
m_bySubjectIdentity.clear();
for (Map< SHA384Hash, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
for (Map< H384, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end(); ++c) {
if (c->second->error() == ZT_CERTIFICATE_ERROR_NONE) {
for (unsigned int i = 0; i < c->second->m_certificate.subject.identityCount; ++i) {
const Identity *const id = reinterpret_cast<const Identity *>(c->second->m_certificate.subject.identities[i].identity);
@ -212,7 +212,7 @@ void TrustStore::update(const int64_t clock, Vector< SharedPtr< Entry > > *const
// Purge and return purged certificates if this option is selected.
if (purge) {
for (Map< SHA384Hash, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end();) {
for (Map< H384, SharedPtr< Entry > >::const_iterator c(m_bySerial.begin()); c != m_bySerial.end();) {
if (c->second->error() != ZT_CERTIFICATE_ERROR_NONE) {
purge->push_back(c->second);
m_bySerial.erase(c++);

View file

@ -18,7 +18,6 @@
#include "RuntimeEnvironment.hpp"
#include "Containers.hpp"
#include "Certificate.hpp"
#include "Blob.hpp"
#include "SHA512.hpp"
#include "SharedPtr.hpp"
#include "Identity.hpp"
@ -106,7 +105,7 @@ public:
* @param serial SHA384 hash of certificate
* @return Entry or empty/nil if not found
*/
SharedPtr< Entry > get(const SHA384Hash &serial) const;
SharedPtr< Entry > get(const H384 &serial) const;
/**
* Get roots specified by root set certificates in the local store.
@ -153,7 +152,7 @@ public:
*
* @param serial Serial of certificate to delete
*/
void erase(const SHA384Hash &serial);
void erase(const H384 &serial);
/**
* Validate all certificates and their certificate chains
@ -166,11 +165,11 @@ public:
void update(int64_t clock, Vector< SharedPtr< Entry > > *purge);
private:
Map< SHA384Hash, SharedPtr< Entry > > m_bySerial; // all certificates
Map< H384, SharedPtr< Entry > > m_bySerial; // all certificates
Map< Vector< uint8_t >, SharedPtr< Entry > > m_bySubjectUniqueId; // non-rejected certificates only
Map< Fingerprint, Vector< SharedPtr< Entry > > > m_bySubjectIdentity; // non-rejected certificates only
ForwardList< SharedPtr< Entry > > m_addQueue;
ForwardList< SHA384Hash > m_deleteQueue;
ForwardList< H384 > m_deleteQueue;
RWMutex m_lock;
};

View file

@ -286,7 +286,7 @@ static ZT_INLINE bool allZero(const void *const b, unsigned int l) noexcept
static ZT_INLINE char *stok(char *str, const char *delim, char **saveptr) noexcept
{
#ifdef __WINDOWS__
return strtok_s(str,delim,saveptr);
return strtok_s(str, delim, saveptr);
#else
return strtok_r(str, delim, saveptr);
#endif
@ -319,6 +319,7 @@ static ZT_INLINE unsigned int countBits(const uint64_t v) noexcept
{ return (unsigned int)__builtin_popcountll((unsigned long long)v); }
#else
template<typename T>
static ZT_INLINE unsigned int countBits(T v) noexcept
{
@ -327,6 +328,7 @@ static ZT_INLINE unsigned int countBits(T v) noexcept
v = (v + (v >> 4)) & (T)~(T)0/255*15;
return (unsigned int)((v * ((~((T)0))/((T)255))) >> ((sizeof(T) - 1) * 8));
}
#endif
/**

View file

@ -13,7 +13,6 @@
#include "VL1.hpp"
#include "RuntimeEnvironment.hpp"
#include "Node.hpp"
#include "Topology.hpp"
#include "VL2.hpp"
#include "AES.hpp"

View file

@ -13,7 +13,6 @@
#include "VL2.hpp"
#include "RuntimeEnvironment.hpp"
#include "Node.hpp"
#include "VL1.hpp"
#include "Topology.hpp"
#include "Peer.hpp"