mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-06-07 13:03:45 +02:00
A bunch of Topology simplification, integrate some cert and root changes.
This commit is contained in:
parent
0d58865061
commit
407f737212
18 changed files with 378 additions and 644 deletions
|
@ -60,10 +60,6 @@ Commands:
|
|||
blacklist cidr <IP/bits> <boolean> Toggle physical path blacklisting
|
||||
blacklist if <prefix> <boolean> Toggle interface prefix blacklisting
|
||||
portmap <boolean> Toggle use of uPnP or NAT-PMP
|
||||
roots List root peers
|
||||
root [command] - Root management commands
|
||||
trust <identity | url> [endpoint] Add a root or a set of roots
|
||||
remove <address | url | serial> Remove a root or set of roots
|
||||
controller <command> [option] - Local controller management commands
|
||||
networks List networks run by local controller
|
||||
new Create a new network
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
/*
|
||||
* Copyright (c)2013-2020 ZeroTier, Inc.
|
||||
*
|
||||
* Use of this software is governed by the Business Source License included
|
||||
* in the LICENSE.TXT file in the project's root directory.
|
||||
*
|
||||
* Change Date: 2024-01-01
|
||||
*
|
||||
* On the date above, in accordance with the Business Source License, use
|
||||
* of this software will be governed by version 2.0 of the Apache License.
|
||||
*/
|
||||
/****/
|
||||
|
||||
package cli
|
||||
|
||||
func Root(basePath, authToken string, args []string, jsonOutput bool) {
|
||||
if len(args) > 0 {
|
||||
switch args[0] {
|
||||
|
||||
case "add":
|
||||
|
||||
case "remove":
|
||||
|
||||
}
|
||||
}
|
||||
}
|
|
@ -13,8 +13,6 @@
|
|||
|
||||
package main
|
||||
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
@ -137,8 +135,6 @@ func main() {
|
|||
authTokenRequired(basePath, *tflag, *tTflag)
|
||||
case "roots":
|
||||
cli.Peers(basePath, authTokenRequired(basePath, *tflag, *tTflag), cmdArgs, *jflag, true)
|
||||
case "root":
|
||||
cli.Root(basePath, authTokenRequired(basePath, *tflag, *tTflag), cmdArgs, *jflag)
|
||||
case "controller":
|
||||
case "set":
|
||||
cli.Set(basePath, authTokenRequired(basePath, *tflag, *tTflag), cmdArgs)
|
||||
|
|
|
@ -33,6 +33,23 @@
|
|||
|
||||
namespace ZeroTier {
|
||||
|
||||
template< typename V >
|
||||
class Vector : public std::vector< V >
|
||||
{
|
||||
public:
|
||||
ZT_INLINE Vector()
|
||||
{}
|
||||
|
||||
template< typename I >
|
||||
ZT_INLINE Vector(I begin,I end) :
|
||||
std::vector< V >(begin, end)
|
||||
{}
|
||||
};
|
||||
|
||||
template< typename V >
|
||||
class List : public std::list< V >
|
||||
{};
|
||||
|
||||
#ifdef __CPP11__
|
||||
|
||||
struct intl_MapHasher
|
||||
|
@ -40,12 +57,16 @@ struct intl_MapHasher
|
|||
template< typename O >
|
||||
std::size_t operator()(const O &obj) const noexcept
|
||||
{ return (std::size_t)obj.hashCode(); }
|
||||
|
||||
std::size_t operator()(const uint64_t i) const noexcept
|
||||
{ return (std::size_t)Utils::hash64(i ^ Utils::s_mapNonce); }
|
||||
|
||||
std::size_t operator()(const int64_t i) const noexcept
|
||||
{ return (std::size_t)Utils::hash64((uint64_t)i ^ Utils::s_mapNonce); }
|
||||
|
||||
std::size_t operator()(const uint32_t i) const noexcept
|
||||
{ return (std::size_t)Utils::hash32(i ^ (uint32_t)Utils::s_mapNonce); }
|
||||
|
||||
std::size_t operator()(const int32_t i) const noexcept
|
||||
{ return (std::size_t)Utils::hash32((uint32_t)i ^ (uint32_t)Utils::s_mapNonce); }
|
||||
};
|
||||
|
@ -74,23 +95,6 @@ template< typename K, typename V >
|
|||
class SortedMap : public std::map< K, V >
|
||||
{};
|
||||
|
||||
template< typename V >
|
||||
class Vector : public std::vector< V >
|
||||
{
|
||||
public:
|
||||
ZT_INLINE Vector()
|
||||
{}
|
||||
|
||||
template< typename I >
|
||||
ZT_INLINE Vector(I begin,I end) :
|
||||
std::vector< V >(begin, end)
|
||||
{}
|
||||
};
|
||||
|
||||
template< typename V >
|
||||
class List : public std::list< V >
|
||||
{};
|
||||
|
||||
#ifdef __CPP11__
|
||||
|
||||
template< typename V >
|
||||
|
|
|
@ -14,4 +14,10 @@
|
|||
#include "Defaults.hpp"
|
||||
|
||||
namespace ZeroTier {
|
||||
namespace Defaults {
|
||||
|
||||
const unsigned int CERTIFICATES_BYTES = 0;
|
||||
const uint8_t CERTIFICATES[4] = {0,0,0,0};
|
||||
|
||||
} // namespace Defaults
|
||||
} // namespace ZeroTier
|
||||
|
|
|
@ -14,8 +14,15 @@
|
|||
#ifndef ZT_DEFAULTS_HPP
|
||||
#define ZT_DEFAULTS_HPP
|
||||
|
||||
namespace ZeroTier {
|
||||
#include "Constants.hpp"
|
||||
|
||||
namespace ZeroTier {
|
||||
namespace Defaults {
|
||||
|
||||
extern const unsigned int CERTIFICATES_BYTES;
|
||||
extern const uint8_t CERTIFICATES[];
|
||||
|
||||
} // namespace Defaults
|
||||
} // namespace ZeroTier
|
||||
|
||||
#endif
|
||||
|
|
|
@ -241,7 +241,7 @@ ZT_ResultCode Node::processBackgroundTasks(
|
|||
ZT_SPEW("running pulse() on each peer...");
|
||||
try {
|
||||
Vector< SharedPtr< Peer > > allPeers, rootPeers;
|
||||
RR->topology->getAllPeers(allPeers, rootPeers);
|
||||
RR->topology->allPeers(allPeers, rootPeers);
|
||||
|
||||
bool online = false;
|
||||
for (Vector< SharedPtr< Peer > >::iterator p(allPeers.begin()); p != allPeers.end(); ++p) {
|
||||
|
@ -250,8 +250,6 @@ ZT_ResultCode Node::processBackgroundTasks(
|
|||
online |= ((isRoot || rootPeers.empty()) && (*p)->directlyConnected(now));
|
||||
}
|
||||
|
||||
RR->topology->rankRoots();
|
||||
|
||||
if (m_online.exchange(online) != online)
|
||||
postEvent(tPtr, online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
|
||||
} catch (...) {
|
||||
|
@ -382,21 +380,6 @@ ZT_ResultCode Node::multicastUnsubscribe(
|
|||
} else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
|
||||
}
|
||||
|
||||
ZT_ResultCode Node::addRoot(
|
||||
void *tPtr,
|
||||
const ZT_Identity *id)
|
||||
{
|
||||
return (RR->topology->addRoot(tPtr, *reinterpret_cast<const Identity *>(id))) ? ZT_RESULT_OK : ZT_RESULT_ERROR_BAD_PARAMETER;
|
||||
}
|
||||
|
||||
ZT_ResultCode Node::removeRoot(
|
||||
void *tPtr,
|
||||
const uint64_t address)
|
||||
{
|
||||
RR->topology->removeRoot(tPtr, Address(address));
|
||||
return ZT_RESULT_OK;
|
||||
}
|
||||
|
||||
uint64_t Node::address() const
|
||||
{
|
||||
return RR->identity.address().toInt();
|
||||
|
@ -413,8 +396,9 @@ void Node::status(ZT_NodeStatus *status) const
|
|||
|
||||
ZT_PeerList *Node::peers() const
|
||||
{
|
||||
Vector< SharedPtr< Peer > > peers;
|
||||
RR->topology->getAllPeers(peers);
|
||||
Vector< SharedPtr< Peer > > peers, rootPeers;
|
||||
RR->topology->allPeers(peers, rootPeers);
|
||||
|
||||
std::sort(peers.begin(), peers.end(), _sortPeerPtrsByAddress());
|
||||
|
||||
const unsigned int bufSize =
|
||||
|
@ -458,7 +442,7 @@ ZT_PeerList *Node::peers() const
|
|||
p->versionRev = -1;
|
||||
}
|
||||
p->latency = (*pi)->latency();
|
||||
p->root = RR->topology->isRoot((*pi)->identity()) ? 1 : 0;
|
||||
p->root = (std::find(rootPeers.begin(), rootPeers.end(), *pi) != rootPeers.end()) ? 1 : 0;
|
||||
|
||||
p->networkCount = 0;
|
||||
// TODO: enumerate network memberships
|
||||
|
@ -988,28 +972,6 @@ enum ZT_ResultCode ZT_Node_multicastUnsubscribe(ZT_Node *node, uint64_t nwid, ui
|
|||
}
|
||||
}
|
||||
|
||||
enum ZT_ResultCode ZT_Node_addRoot(ZT_Node *node, void *tptr, const ZT_Identity *id)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->addRoot(tptr, id);
|
||||
} catch (std::bad_alloc &exc) {
|
||||
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
} catch (...) {
|
||||
return ZT_RESULT_ERROR_INTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
enum ZT_ResultCode ZT_Node_removeRoot(ZT_Node *node, void *tptr, const uint64_t address)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->removeRoot(tptr, address);
|
||||
} catch (std::bad_alloc &exc) {
|
||||
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
} catch (...) {
|
||||
return ZT_RESULT_ERROR_INTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t ZT_Node_address(ZT_Node *node)
|
||||
{
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->address();
|
||||
|
|
|
@ -109,14 +109,6 @@ public:
|
|||
uint64_t multicastGroup,
|
||||
unsigned long multicastAdi);
|
||||
|
||||
ZT_ResultCode addRoot(
|
||||
void *tptr,
|
||||
const ZT_Identity *id);
|
||||
|
||||
ZT_ResultCode removeRoot(
|
||||
void *tptr,
|
||||
const uint64_t address);
|
||||
|
||||
uint64_t address() const;
|
||||
|
||||
void status(
|
||||
|
|
|
@ -156,7 +156,7 @@ void Peer::send(void *tPtr, int64_t now, const void *data, unsigned int len) noe
|
|||
if (via) {
|
||||
via->send(RR, tPtr, data, len, now);
|
||||
} else {
|
||||
const SharedPtr< Peer > root(RR->topology->root());
|
||||
const SharedPtr< Peer > root(RR->topology->root(now));
|
||||
if ((root) && (root.ptr() != this)) {
|
||||
via = root->path(now);
|
||||
if (via) {
|
||||
|
@ -397,7 +397,7 @@ void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
|
|||
|
||||
// Send a HELLO indirectly if we were not able to send one via any direct path.
|
||||
if (needHello) {
|
||||
const SharedPtr< Peer > root(RR->topology->root());
|
||||
const SharedPtr< Peer > root(RR->topology->root(now));
|
||||
if (root) {
|
||||
const SharedPtr< Path > via(root->path(now));
|
||||
if (via) {
|
||||
|
|
|
@ -75,8 +75,10 @@ void SelfAwareness::iam(void *tPtr, const Identity &reporter, const int64_t rece
|
|||
}
|
||||
|
||||
// Reset all paths within this scope and address family
|
||||
_ResetWithinScope rset(tPtr, now, myPhysicalAddress.family(), (InetAddress::IpScope)scope);
|
||||
RR->topology->eachPeer< _ResetWithinScope & >(rset);
|
||||
Vector< SharedPtr< Peer > > peers, rootPeers;
|
||||
RR->topology->allPeers(peers, rootPeers);
|
||||
for(Vector< SharedPtr< Peer > >::const_iterator p(peers.begin());p!=peers.end();++p)
|
||||
(*p)->resetWithinScope(tPtr, (InetAddress::IpScope)scope, myPhysicalAddress.family(), now);
|
||||
|
||||
RR->t->resettingPathsInScope(tPtr, 0x9afff100, reporter, reporterPhysicalAddress, entry.mySurface, myPhysicalAddress, scope);
|
||||
} else {
|
||||
|
|
|
@ -12,21 +12,24 @@
|
|||
/****/
|
||||
|
||||
#include "Topology.hpp"
|
||||
#include "Defaults.hpp"
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
static const SharedPtr< const Certificate > s_nullCert;
|
||||
|
||||
Topology::Topology(const RuntimeEnvironment *renv, void *tPtr, const int64_t now) :
|
||||
RR(renv)
|
||||
RR(renv),
|
||||
m_lastRankedRoots(0)
|
||||
{
|
||||
char tmp[32];
|
||||
Vector< uint8_t > trustData(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_TRUST_STORE, Utils::ZERO256));
|
||||
|
||||
Dictionary d;
|
||||
|
||||
Vector< uint8_t > trustData(RR->node->stateObjectGet(tPtr, ZT_STATE_OBJECT_TRUST_STORE, Utils::ZERO256));
|
||||
if (trustData.empty() || (!d.decode(trustData.data(), (unsigned int)trustData.size()))) {
|
||||
// TODO: import default certificates including default root set
|
||||
} else {
|
||||
if (!d.decode(Defaults::CERTIFICATES, Defaults::CERTIFICATES_BYTES))
|
||||
d.clear();
|
||||
}
|
||||
|
||||
if (!d.empty()) {
|
||||
const unsigned long certCount = (unsigned long)d.getUI("c$");
|
||||
for (unsigned long idx = 0; idx < certCount; ++idx) {
|
||||
uint64_t id[6];
|
||||
|
@ -39,19 +42,9 @@ Topology::Topology(const RuntimeEnvironment *renv, void *tPtr, const int64_t now
|
|||
addCertificate(tPtr, cert, now, (unsigned int)d.getUI(Dictionary::arraySubscript(tmp, sizeof(tmp), "c$.lt", idx)), false, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
const unsigned long localRootCount = (unsigned long)d.getUI("lr$");
|
||||
for (unsigned long idx = 0; idx < localRootCount; ++idx) {
|
||||
Identity lr;
|
||||
if (d.getO(Dictionary::arraySubscript(tmp, sizeof(tmp), "lr$.i", idx), lr)) {
|
||||
if (lr)
|
||||
m_roots[lr].insert(s_nullCert);
|
||||
}
|
||||
}
|
||||
m_cleanCertificates(tPtr, now);
|
||||
m_updateRootPeers(tPtr, now);
|
||||
}
|
||||
|
||||
m_cleanCertificates_l_certs(now);
|
||||
m_updateRootPeers_l_roots_certs(tPtr);
|
||||
}
|
||||
|
||||
SharedPtr< Peer > Topology::add(void *tPtr, const SharedPtr< Peer > &peer)
|
||||
|
@ -67,50 +60,190 @@ SharedPtr< Peer > Topology::add(void *tPtr, const SharedPtr< Peer > &peer)
|
|||
return peer;
|
||||
}
|
||||
|
||||
SharedPtr< Peer > Topology::addRoot(void *const tPtr, const Identity &id)
|
||||
void Topology::allPeers(Vector< SharedPtr< Peer > > &allPeers, Vector< SharedPtr< Peer > > &rootPeers) const
|
||||
{
|
||||
if ((id != RR->identity) && id.locallyValidate()) {
|
||||
RWMutex::Lock l1(m_roots_l);
|
||||
|
||||
// A null pointer in the set of certificates specifying a root indicates that
|
||||
// the root has been directly added.
|
||||
m_roots[id].insert(s_nullCert);
|
||||
|
||||
{
|
||||
Mutex::Lock certsLock(m_certs_l);
|
||||
m_updateRootPeers_l_roots_certs(tPtr);
|
||||
m_writeTrustStore_l_roots_certs(tPtr);
|
||||
}
|
||||
|
||||
for (Vector< SharedPtr< Peer > >::const_iterator p(m_rootPeers.begin()); p != m_rootPeers.end(); ++p) {
|
||||
if ((*p)->identity() == id)
|
||||
return *p;
|
||||
}
|
||||
allPeers.clear();
|
||||
{
|
||||
RWMutex::RLock l(m_peers_l);
|
||||
allPeers.reserve(m_peers.size());
|
||||
for (Map< Address, SharedPtr< Peer > >::const_iterator i(m_peers.begin()); i != m_peers.end(); ++i)
|
||||
allPeers.push_back(i->second);
|
||||
}
|
||||
{
|
||||
RWMutex::RLock l(m_roots_l);
|
||||
rootPeers = m_roots;
|
||||
}
|
||||
return SharedPtr< Peer >();
|
||||
}
|
||||
|
||||
bool Topology::removeRoot(void *const tPtr, Address address)
|
||||
void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
|
||||
{
|
||||
RWMutex::Lock l1(m_roots_l);
|
||||
bool removed = false;
|
||||
for (Map< Identity, Set< SharedPtr< const Certificate > > >::iterator r(m_roots.begin()); r != m_roots.end();) {
|
||||
if (r->first.address() == address) {
|
||||
r->second.erase(s_nullCert);
|
||||
if (r->second.empty()) {
|
||||
m_roots.erase(r++);
|
||||
{
|
||||
Mutex::Lock certsLock(m_certs_l);
|
||||
m_updateRootPeers_l_roots_certs(tPtr);
|
||||
m_writeTrustStore_l_roots_certs(tPtr);
|
||||
}
|
||||
removed = true;
|
||||
} else {
|
||||
++r;
|
||||
}
|
||||
} else ++r;
|
||||
// Peer and path delete operations are batched to avoid holding write locks on
|
||||
// these structures for any length of time. A list is compiled in read mode,
|
||||
// then the write lock is acquired for each delete. This adds overhead if there
|
||||
// are a lot of deletions, but that's not common.
|
||||
|
||||
// Clean any expired certificates
|
||||
{
|
||||
Mutex::Lock l1(m_certs_l);
|
||||
if (m_cleanCertificates(tPtr, now)) {
|
||||
RWMutex::Lock l3(m_peers_l);
|
||||
RWMutex::Lock l2(m_roots_l);
|
||||
m_updateRootPeers(tPtr, now);
|
||||
}
|
||||
}
|
||||
return removed;
|
||||
|
||||
// Delete peers that are stale or offline and are not roots.
|
||||
{
|
||||
Vector< uintptr_t > rootLookup;
|
||||
{
|
||||
RWMutex::RLock l2(m_roots_l);
|
||||
rootLookup.reserve(m_roots.size());
|
||||
for (Vector< SharedPtr< Peer > >::const_iterator r(m_roots.begin()); r != m_roots.end(); ++r)
|
||||
rootLookup.push_back((uintptr_t)r->ptr());
|
||||
}
|
||||
std::sort(rootLookup.begin(), rootLookup.end());
|
||||
|
||||
Vector< Address > toDelete;
|
||||
{
|
||||
RWMutex::RLock l1(m_peers_l);
|
||||
for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end(); ++i) {
|
||||
// TODO: also delete if the peer has not exchanged meaningful communication in a while, such as
|
||||
// a network frame or non-trivial control packet.
|
||||
if (((now - i->second->lastReceive()) > ZT_PEER_ALIVE_TIMEOUT) && (!std::binary_search(rootLookup.begin(), rootLookup.end(), (uintptr_t)i->second.ptr())))
|
||||
toDelete.push_back(i->first);
|
||||
}
|
||||
}
|
||||
for (Vector< Address >::iterator i(toDelete.begin()); i != toDelete.end(); ++i) {
|
||||
RWMutex::Lock l1(m_peers_l);
|
||||
const Map< Address, SharedPtr< Peer > >::iterator p(m_peers.find(*i));
|
||||
if (likely(p != m_peers.end())) {
|
||||
p->second->save(tPtr);
|
||||
m_peers.erase(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete paths that are no longer held by anyone else ("weak reference" type behavior).
|
||||
{
|
||||
Vector< UniqueID > toDelete;
|
||||
{
|
||||
RWMutex::RLock l1(m_paths_l);
|
||||
for (Map< UniqueID, SharedPtr< Path > >::iterator i(m_paths.begin()); i != m_paths.end(); ++i) {
|
||||
if (i->second.weakGC())
|
||||
toDelete.push_back(i->first);
|
||||
}
|
||||
}
|
||||
for (Vector< UniqueID >::iterator i(toDelete.begin()); i != toDelete.end(); ++i) {
|
||||
RWMutex::Lock l1(m_paths_l);
|
||||
const Map< UniqueID, SharedPtr< Path > >::iterator p(m_paths.find(*i));
|
||||
if (likely(p != m_paths.end()))
|
||||
m_paths.erase(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Topology::saveAll(void *tPtr)
|
||||
{
|
||||
{
|
||||
RWMutex::RLock l(m_peers_l);
|
||||
for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end(); ++i) {
|
||||
i->second->save(tPtr);
|
||||
}
|
||||
}
|
||||
{
|
||||
char tmp[32];
|
||||
Dictionary d;
|
||||
{
|
||||
Mutex::Lock l(m_certs_l);
|
||||
unsigned long idx = 0;
|
||||
d.add("c$", (uint64_t)m_certs.size());
|
||||
for (Map< SHA384Hash, std::pair< SharedPtr< const Certificate >, unsigned int > >::const_iterator c(m_certs.begin()); c != m_certs.end(); ++c) {
|
||||
d[Dictionary::arraySubscript(tmp, sizeof(tmp), "c$.s", idx)].assign(c->first.data, c->first.data + ZT_SHA384_DIGEST_SIZE);
|
||||
d.add(Dictionary::arraySubscript(tmp, sizeof(tmp), "c$.lt", idx), (uint64_t)c->second.second);
|
||||
++idx;
|
||||
}
|
||||
}
|
||||
Vector< uint8_t > trustStore;
|
||||
d.encode(trustStore);
|
||||
RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_TRUST_STORE, Utils::ZERO256, trustStore.data(), (unsigned int)trustStore.size());
|
||||
}
|
||||
}
|
||||
|
||||
ZT_CertificateError Topology::addCertificate(void *tPtr, const Certificate &cert, const int64_t now, const unsigned int localTrust, const bool writeToLocalStore, const bool refreshRootSets, const bool verify)
|
||||
{
|
||||
{
|
||||
Mutex::Lock l1(m_certs_l);
|
||||
|
||||
// Check to see if we already have this specific certificate.
|
||||
const SHA384Hash serial(cert.serialNo);
|
||||
if (m_certs.find(serial) != m_certs.end())
|
||||
return ZT_CERTIFICATE_ERROR_NONE;
|
||||
|
||||
// Verify certificate all the way to a trusted root. This also verifies inner
|
||||
// signatures such as those of locators or the subject unique ID.
|
||||
if (verify) {
|
||||
const ZT_CertificateError err = m_verifyCertificate(cert, now, localTrust, false);
|
||||
if (err != ZT_CERTIFICATE_ERROR_NONE)
|
||||
return err;
|
||||
}
|
||||
|
||||
// Create entry containing copy of certificate and trust flags.
|
||||
const std::pair< SharedPtr< const Certificate >, unsigned int > certEntry(SharedPtr< const Certificate >(new Certificate(cert)), localTrust);
|
||||
|
||||
// If the subject contains a unique ID, check if we already have a cert for the
|
||||
// same uniquely identified subject. If so, check its subject timestamp and keep
|
||||
// the one we have if newer. Otherwise replace it. Note that the verification
|
||||
// function will have checked the unique ID proof signature already if a unique
|
||||
// ID was present.
|
||||
if ((cert.subject.uniqueId) && (cert.subject.uniqueIdSize > 0)) {
|
||||
SHA384Hash uniqueIdHash;
|
||||
SHA384(uniqueIdHash.data, cert.subject.uniqueId, cert.subject.uniqueIdSize);
|
||||
std::pair< SharedPtr< const Certificate >, unsigned int > &bySubjectUniqueId = m_certsBySubjectUniqueId[uniqueIdHash];
|
||||
if (bySubjectUniqueId.first) {
|
||||
if (bySubjectUniqueId.first->subject.timestamp >= cert.subject.timestamp)
|
||||
return ZT_CERTIFICATE_ERROR_HAVE_NEWER_CERT;
|
||||
m_eraseCertificate(tPtr, bySubjectUniqueId.first, &uniqueIdHash);
|
||||
m_certsBySubjectUniqueId[uniqueIdHash] = certEntry;
|
||||
} else {
|
||||
bySubjectUniqueId = certEntry;
|
||||
}
|
||||
}
|
||||
|
||||
// Save certificate by serial number.
|
||||
m_certs[serial] = certEntry;
|
||||
|
||||
// Add certificate to sets of certificates whose subject references a given identity.
|
||||
for (unsigned int i = 0; i < cert.subject.identityCount; ++i) {
|
||||
const Identity *const ii = reinterpret_cast<const Identity *>(cert.subject.identities[i].identity);
|
||||
if (ii)
|
||||
m_certsBySubjectIdentity[ii->fingerprint()].insert(certEntry);
|
||||
}
|
||||
|
||||
// Clean any certificates whose chains are now broken, which can happen if there was
|
||||
// an update that replaced an old cert with a given unique ID. Otherwise this generally
|
||||
// does nothing here. Skip if verify is false since this means we're mindlessly loading
|
||||
// certificates, which right now only happens on startup when they're loaded from the
|
||||
// local certificate cache.
|
||||
if (verify)
|
||||
m_cleanCertificates(tPtr, now);
|
||||
|
||||
// Refresh the root peers lists, since certs may enumerate roots.
|
||||
if (refreshRootSets) {
|
||||
RWMutex::Lock l3(m_peers_l);
|
||||
RWMutex::Lock l2(m_roots_l);
|
||||
m_updateRootPeers(tPtr, now);
|
||||
}
|
||||
}
|
||||
|
||||
if (writeToLocalStore) {
|
||||
// Write certificate data prefixed by local trust flags as a 32-bit integer.
|
||||
Vector< uint8_t > certData(cert.encode());
|
||||
uint64_t id[6];
|
||||
Utils::copy< 48 >(id, cert.serialNo);
|
||||
RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_CERT, id, certData.data(), (unsigned int)certData.size());
|
||||
}
|
||||
|
||||
return ZT_CERTIFICATE_ERROR_NONE;
|
||||
}
|
||||
|
||||
struct p_RootRankingComparisonOperator
|
||||
|
@ -136,162 +269,24 @@ struct p_RootRankingComparisonOperator
|
|||
}
|
||||
};
|
||||
|
||||
void Topology::rankRoots()
|
||||
void Topology::m_rankRoots(const int64_t now)
|
||||
{
|
||||
RWMutex::Lock l1(m_roots_l);
|
||||
std::sort(m_rootPeers.begin(), m_rootPeers.end(), p_RootRankingComparisonOperator());
|
||||
// assumes m_roots is locked
|
||||
m_lastRankedRoots = now;
|
||||
std::sort(m_roots.begin(), m_roots.end(), p_RootRankingComparisonOperator());
|
||||
}
|
||||
|
||||
void Topology::doPeriodicTasks(void *tPtr, const int64_t now)
|
||||
{
|
||||
// Peer and path delete operations are batched to avoid holding write locks on
|
||||
// these structures for any length of time. A list is compiled in read mode,
|
||||
// then the write lock is acquired for each delete. This adds overhead if there
|
||||
// are a lot of deletions, but that's not common.
|
||||
|
||||
// Clean any expired certificates
|
||||
{
|
||||
Mutex::Lock l1(m_certs_l);
|
||||
if (m_cleanCertificates_l_certs(now)) {
|
||||
RWMutex::Lock l2(m_roots_l);
|
||||
m_updateRootPeers_l_roots_certs(tPtr);
|
||||
}
|
||||
}
|
||||
|
||||
// Delete peers that are stale or offline.
|
||||
{
|
||||
Vector< Address > toDelete;
|
||||
{
|
||||
RWMutex::RLock l1(m_peers_l);
|
||||
RWMutex::RLock l2(m_roots_l);
|
||||
for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end();
|
||||
++i) {
|
||||
// TODO: also delete if the peer has not exchanged meaningful communication in a while, such as
|
||||
// a network frame or non-trivial control packet.
|
||||
if (((now - i->second->lastReceive()) > ZT_PEER_ALIVE_TIMEOUT) && (m_roots.find(i->second->identity()) == m_roots.end()))
|
||||
toDelete.push_back(i->first);
|
||||
}
|
||||
}
|
||||
for (Vector< Address >::iterator i(toDelete.begin()); i != toDelete.end(); ++i) {
|
||||
RWMutex::Lock l1(m_peers_l);
|
||||
const Map< Address, SharedPtr< Peer > >::iterator p(m_peers.find(*i));
|
||||
if (likely(p != m_peers.end())) {
|
||||
p->second->save(tPtr);
|
||||
m_peers.erase(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete paths that are no longer held by anyone else ("weak reference" type behavior).
|
||||
{
|
||||
Vector< UniqueID > toDelete;
|
||||
{
|
||||
RWMutex::RLock l1(m_paths_l);
|
||||
for (Map< UniqueID, SharedPtr< Path > >::iterator i(m_paths.begin()); i != m_paths.end();
|
||||
++i) {
|
||||
if (i->second.weakGC())
|
||||
toDelete.push_back(i->first);
|
||||
}
|
||||
}
|
||||
for (Vector< UniqueID >::iterator i(toDelete.begin()); i != toDelete.end(); ++i) {
|
||||
RWMutex::Lock l1(m_paths_l);
|
||||
const Map< UniqueID, SharedPtr< Path > >::iterator p(m_paths.find(*i));
|
||||
if (likely(p != m_paths.end()))
|
||||
m_paths.erase(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Topology::saveAll(void *tPtr)
|
||||
{
|
||||
RWMutex::RLock l(m_peers_l);
|
||||
for (Map< Address, SharedPtr< Peer > >::iterator i(m_peers.begin()); i != m_peers.end();
|
||||
++i)
|
||||
i->second->save(tPtr);
|
||||
}
|
||||
|
||||
ZT_CertificateError Topology::addCertificate(void *tPtr, const Certificate &cert, const int64_t now, const unsigned int localTrust, const bool writeToLocalStore, const bool refreshRootSets, const bool verify)
|
||||
{
|
||||
{
|
||||
Mutex::Lock certsLock(m_certs_l);
|
||||
|
||||
// Check to see if we already have this specific certificate.
|
||||
const SHA384Hash serial(cert.serialNo);
|
||||
if (m_certs.find(serial) != m_certs.end())
|
||||
return ZT_CERTIFICATE_ERROR_NONE;
|
||||
|
||||
// Verify certificate all the way to a trusted root. This also verifies inner
|
||||
// signatures such as those of locators or the subject unique ID.
|
||||
if (verify) {
|
||||
const ZT_CertificateError err = m_verifyCertificate_l_certs(cert, now, localTrust, false);
|
||||
if (err != ZT_CERTIFICATE_ERROR_NONE)
|
||||
return err;
|
||||
}
|
||||
|
||||
// Create entry containing copy of certificate and trust flags.
|
||||
const std::pair< SharedPtr< const Certificate >, unsigned int > certEntry(SharedPtr< const Certificate >(new Certificate(cert)), localTrust);
|
||||
|
||||
// If the subject contains a unique ID, check if we already have a cert for the
|
||||
// same uniquely identified subject. If so, check its subject timestamp and keep
|
||||
// the one we have if newer. Otherwise replace it. Note that the verification
|
||||
// function will have checked the unique ID proof signature already if a unique
|
||||
// ID was present.
|
||||
if ((cert.subject.uniqueId) && (cert.subject.uniqueIdSize > 0)) {
|
||||
const Vector< uint8_t > uniqueId(cert.subject.uniqueId, cert.subject.uniqueId + cert.subject.uniqueIdSize);
|
||||
std::pair< SharedPtr< const Certificate >, unsigned int > &bySubjectUniqueId = m_certsBySubjectUniqueId[uniqueId];
|
||||
if (bySubjectUniqueId.first) {
|
||||
if (bySubjectUniqueId.first->subject.timestamp >= cert.subject.timestamp)
|
||||
return ZT_CERTIFICATE_ERROR_HAVE_NEWER_CERT;
|
||||
m_eraseCertificate_l_certs(bySubjectUniqueId.first);
|
||||
m_certsBySubjectUniqueId[uniqueId] = certEntry; // reference bySubjectUniqueId no longer valid
|
||||
} else {
|
||||
bySubjectUniqueId = certEntry;
|
||||
}
|
||||
}
|
||||
|
||||
// Save certificate by serial number.
|
||||
m_certs[serial] = certEntry;
|
||||
|
||||
// Add certificate to sets of certificates whose subject references a given identity.
|
||||
for (unsigned int i = 0; i < cert.subject.identityCount; ++i) {
|
||||
const Identity *const ii = reinterpret_cast<const Identity *>(cert.subject.identities[i].identity);
|
||||
m_certsBySubjectIdentity[ii->fingerprint()].insert(certEntry);
|
||||
}
|
||||
|
||||
// Clean any certificates whose chains are now broken, which can happen if there was
|
||||
// an update that replaced an old cert with a given unique ID. Otherwise this generally
|
||||
// does nothing here. Skip if verify is false since this means we're mindlessly loading
|
||||
// certificates, which right now only happens on startup when they're loaded from the
|
||||
// local certificate cache.
|
||||
if (verify)
|
||||
m_cleanCertificates_l_certs(now);
|
||||
|
||||
// Refresh the root peers lists, since certs may enumerate roots.
|
||||
if (refreshRootSets) {
|
||||
RWMutex::Lock rootsLock(m_roots_l);
|
||||
m_updateRootPeers_l_roots_certs(tPtr);
|
||||
}
|
||||
}
|
||||
|
||||
if (writeToLocalStore) {
|
||||
// Write certificate data prefixed by local trust flags as a 32-bit integer.
|
||||
Vector< uint8_t > certData(cert.encode());
|
||||
uint64_t id[6];
|
||||
Utils::copy< 48 >(id, cert.serialNo);
|
||||
RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_CERT, id, certData.data(), (unsigned int)certData.size());
|
||||
}
|
||||
|
||||
return ZT_CERTIFICATE_ERROR_NONE;
|
||||
}
|
||||
|
||||
void Topology::m_eraseCertificate_l_certs(const SharedPtr< const Certificate > &cert)
|
||||
void Topology::m_eraseCertificate(void *tPtr, const SharedPtr< const Certificate > &cert, const SHA384Hash *uniqueIdHash)
|
||||
{
|
||||
// assumes m_certs is locked for writing
|
||||
|
||||
m_certs.erase(SHA384Hash(cert->serialNo));
|
||||
const SHA384Hash serialNo(cert->serialNo);
|
||||
m_certs.erase(serialNo);
|
||||
|
||||
if (cert->subject.uniqueIdSize > 0)
|
||||
m_certsBySubjectUniqueId.erase(Vector< uint8_t >(cert->subject.uniqueId, cert->subject.uniqueId + cert->subject.uniqueIdSize));
|
||||
RR->node->stateObjectDelete(tPtr, ZT_STATE_OBJECT_CERT, serialNo.data);
|
||||
|
||||
if (uniqueIdHash)
|
||||
m_certsBySubjectUniqueId.erase(*uniqueIdHash);
|
||||
|
||||
for (unsigned int i = 0; i < cert->subject.identityCount; ++i) {
|
||||
const Identity *const ii = reinterpret_cast<const Identity *>(cert->subject.identities[i].identity);
|
||||
|
@ -305,7 +300,7 @@ void Topology::m_eraseCertificate_l_certs(const SharedPtr< const Certificate > &
|
|||
}
|
||||
}
|
||||
|
||||
bool Topology::m_cleanCertificates_l_certs(int64_t now)
|
||||
bool Topology::m_cleanCertificates(void *tPtr, int64_t now)
|
||||
{
|
||||
// assumes m_certs is locked for writing
|
||||
|
||||
|
@ -316,24 +311,31 @@ bool Topology::m_cleanCertificates_l_certs(int64_t now)
|
|||
// Verify, but the last boolean option tells it to skip signature checks as this would
|
||||
// already have been done. This will therefore just check the path and validity times
|
||||
// of the certificate.
|
||||
const ZT_CertificateError err = m_verifyCertificate_l_certs(*(c->second.first), now, c->second.second, true);
|
||||
const ZT_CertificateError err = m_verifyCertificate(*(c->second.first), now, c->second.second, true);
|
||||
if (err != ZT_CERTIFICATE_ERROR_NONE)
|
||||
toDelete.push_back(c->second.first);
|
||||
}
|
||||
|
||||
if (toDelete.empty())
|
||||
break;
|
||||
|
||||
deleted = true;
|
||||
for (Vector< SharedPtr< const Certificate > >::iterator c(toDelete.begin()); c != toDelete.end(); ++c)
|
||||
m_eraseCertificate_l_certs(*c);
|
||||
|
||||
SHA384Hash uniqueIdHash;
|
||||
for (Vector< SharedPtr< const Certificate > >::iterator c(toDelete.begin()); c != toDelete.end(); ++c) {
|
||||
if ((*c)->subject.uniqueId) {
|
||||
SHA384(uniqueIdHash.data, (*c)->subject.uniqueId, (*c)->subject.uniqueIdSize);
|
||||
m_eraseCertificate(tPtr, *c, &uniqueIdHash);
|
||||
} else {
|
||||
m_eraseCertificate(tPtr, *c, nullptr);
|
||||
}
|
||||
}
|
||||
toDelete.clear();
|
||||
}
|
||||
|
||||
return deleted;
|
||||
}
|
||||
|
||||
bool Topology::m_verifyCertificateChain_l_certs(const Certificate *current, const int64_t now) const
|
||||
bool Topology::m_verifyCertificateChain(const Certificate *current, const int64_t now) const
|
||||
{
|
||||
// assumes m_certs is at least locked for reading
|
||||
|
||||
|
@ -350,7 +352,7 @@ bool Topology::m_verifyCertificateChain_l_certs(const Certificate *current, cons
|
|||
) {
|
||||
if ((cc->second & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ROOT_CA) != 0)
|
||||
return true;
|
||||
if (m_verifyCertificateChain_l_certs(cc->first.ptr(), now))
|
||||
if (m_verifyCertificateChain(cc->first.ptr(), now))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -359,7 +361,7 @@ bool Topology::m_verifyCertificateChain_l_certs(const Certificate *current, cons
|
|||
return false;
|
||||
}
|
||||
|
||||
ZT_CertificateError Topology::m_verifyCertificate_l_certs(const Certificate &cert, const int64_t now, unsigned int localTrust, bool skipSignatureCheck) const
|
||||
ZT_CertificateError Topology::m_verifyCertificate(const Certificate &cert, const int64_t now, unsigned int localTrust, bool skipSignatureCheck) const
|
||||
{
|
||||
// assumes m_certs is at least locked for reading
|
||||
|
||||
|
@ -378,7 +380,7 @@ ZT_CertificateError Topology::m_verifyCertificate_l_certs(const Certificate &cer
|
|||
// If this is a root CA, we can skip this as we're already there. Otherwise we
|
||||
// recurse up the tree until we hit a root CA.
|
||||
if ((localTrust & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ROOT_CA) == 0) {
|
||||
if (!m_verifyCertificateChain_l_certs(&cert, now))
|
||||
if (!m_verifyCertificateChain(&cert, now))
|
||||
return ZT_CERTIFICATE_ERROR_INVALID_CHAIN;
|
||||
}
|
||||
|
||||
|
@ -430,69 +432,44 @@ SharedPtr< Peer > Topology::m_peerFromCached(void *tPtr, const Address &zta)
|
|||
return p;
|
||||
}
|
||||
|
||||
void Topology::m_updateRootPeers_l_roots_certs(void *tPtr)
|
||||
SharedPtr< Path > Topology::m_newPath(const int64_t l, const InetAddress &r, const UniqueID &k)
|
||||
{
|
||||
// assumes m_roots_l and m_certs_l are locked for write
|
||||
|
||||
// Clear m_roots but preserve locally added roots (indicated by a null cert ptr entry).
|
||||
for (Map< Identity, Set< SharedPtr< const Certificate > > >::iterator r(m_roots.begin()); r != m_roots.end();) {
|
||||
if (r->second.find(s_nullCert) == r->second.end()) {
|
||||
m_roots.erase(r++);
|
||||
} else {
|
||||
r->second.clear();
|
||||
r->second.insert(s_nullCert);
|
||||
++r;
|
||||
}
|
||||
}
|
||||
|
||||
// Populate m_roots from certificate subject identities from certificates flagged
|
||||
// as local root set certificates.
|
||||
for (SortedMap< Vector< uint8_t >, std::pair< SharedPtr< const Certificate >, unsigned int > >::const_iterator c(m_certsBySubjectUniqueId.begin()); c != m_certsBySubjectUniqueId.end(); ++c) {
|
||||
if ((c->second.second & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ZEROTIER_ROOT_SET) != 0) {
|
||||
for (unsigned int i = 0; i < c->second.first->subject.identityCount; ++i)
|
||||
m_roots[*reinterpret_cast<const Identity *>(c->second.first->subject.identities[i].identity)].insert(c->second.first);
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new rootPeers vector and swap.
|
||||
Vector< SharedPtr< Peer >> newRootPeers;
|
||||
newRootPeers.reserve(m_roots.size());
|
||||
for (Map< Identity, Set< SharedPtr< const Certificate > > >::iterator r(m_roots.begin()); r != m_roots.end();) {
|
||||
const SharedPtr< Peer > p(this->peer(tPtr, r->first.address(), true));
|
||||
if ((p) && (p->identity() == r->first))
|
||||
newRootPeers.push_back(p);
|
||||
}
|
||||
std::sort(newRootPeers.begin(), newRootPeers.end(), p_RootRankingComparisonOperator());
|
||||
m_rootPeers.swap(newRootPeers);
|
||||
SharedPtr< Path > p(new Path(l, r));
|
||||
RWMutex::Lock lck(m_paths_l);
|
||||
SharedPtr< Path > &p2 = m_paths[k];
|
||||
if (p2)
|
||||
return p2;
|
||||
p2 = p;
|
||||
return p;
|
||||
}
|
||||
|
||||
void Topology::m_writeTrustStore_l_roots_certs(void *tPtr) const
|
||||
void Topology::m_updateRootPeers(void *tPtr, const int64_t now)
|
||||
{
|
||||
// assumes m_roots_l and m_certs_l are locked for write
|
||||
// assumes m_certs_l, m_peers_l, and m_roots_l are locked for write
|
||||
|
||||
char tmp[32];
|
||||
Dictionary d;
|
||||
|
||||
d.add("v", (uint64_t)0); // version
|
||||
|
||||
unsigned long idx = 0;
|
||||
d.add("c$", (uint64_t)m_certs.size());
|
||||
for (Map< SHA384Hash, std::pair< SharedPtr< const Certificate >, unsigned int > >::const_iterator c(m_certs.begin()); c != m_certs.end(); ++c) {
|
||||
d[Dictionary::arraySubscript(tmp, sizeof(tmp), "c$.s", idx)].assign(c->first.data, c->first.data + ZT_SHA384_DIGEST_SIZE);
|
||||
d.add(Dictionary::arraySubscript(tmp, sizeof(tmp), "c$.lt", idx), (uint64_t)c->second.second);
|
||||
++idx;
|
||||
Set< Identity > rootIdentities;
|
||||
for (Map< Fingerprint, Map< SharedPtr< const Certificate >, unsigned int > >::const_iterator c(m_certsBySubjectIdentity.begin()); c != m_certsBySubjectIdentity.end(); ++c) {
|
||||
for (Map< SharedPtr< const Certificate >, unsigned int >::const_iterator cc(c->second.begin()); cc != c->second.end(); ++cc) {
|
||||
if ((cc->second & ZT_CERTIFICATE_LOCAL_TRUST_FLAG_ZEROTIER_ROOT_SET) != 0) {
|
||||
for (unsigned int i = 0; i < cc->first->subject.identityCount; ++i) {
|
||||
if (cc->first->subject.identities[i].identity)
|
||||
rootIdentities.insert(*reinterpret_cast<const Identity *>(cc->first->subject.identities[i].identity));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsigned long localRootCount = 0;
|
||||
for (Map< Identity, Set< SharedPtr< const Certificate > > >::const_iterator r(m_roots.begin()); r != m_roots.end();) {
|
||||
if (r->second.find(s_nullCert) != r->second.end())
|
||||
d.addO(Dictionary::arraySubscript(tmp, sizeof(tmp), "lr$.i", localRootCount++), r->first);
|
||||
m_roots.clear();
|
||||
for (Set< Identity >::const_iterator i(rootIdentities.begin()); i != rootIdentities.end(); ++i) {
|
||||
SharedPtr< Peer > &p = m_peers[i->address()];
|
||||
if ((!p) || (p->identity() != *i)) {
|
||||
p.set(new Peer(RR));
|
||||
p->init(*i);
|
||||
}
|
||||
m_roots.push_back(p);
|
||||
}
|
||||
d.add("lr$", (uint64_t)localRootCount);
|
||||
|
||||
Vector< uint8_t > trustStore;
|
||||
d.encode(trustStore);
|
||||
RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_TRUST_STORE, Utils::ZERO256, trustStore.data(), (unsigned int)trustStore.size());
|
||||
m_rankRoots(now);
|
||||
}
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
|
|
@ -90,104 +90,28 @@ public:
|
|||
if (likely(p != m_paths.end()))
|
||||
return p->second;
|
||||
}
|
||||
{
|
||||
SharedPtr< Path > p(new Path(l, r));
|
||||
RWMutex::Lock lck(m_paths_l);
|
||||
SharedPtr< Path > &p2 = m_paths[k];
|
||||
if (p2)
|
||||
return p2;
|
||||
p2 = p;
|
||||
return p;
|
||||
}
|
||||
return m_newPath(l, r, k);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Current best root server
|
||||
* @return Current best root (lowest latency active root)
|
||||
*/
|
||||
ZT_INLINE SharedPtr< Peer > root() const
|
||||
ZT_INLINE SharedPtr< Peer > root(const int64_t now)
|
||||
{
|
||||
RWMutex::RLock l(m_roots_l);
|
||||
if (unlikely(m_rootPeers.empty()))
|
||||
RWMutex::RMaybeWLock l(m_roots_l);
|
||||
if (unlikely(m_roots.empty()))
|
||||
return SharedPtr< Peer >();
|
||||
return m_rootPeers.front();
|
||||
}
|
||||
|
||||
/**
|
||||
* @param id Identity to check
|
||||
* @return True if this identity corresponds to a root
|
||||
*/
|
||||
ZT_INLINE bool isRoot(const Identity &id) const
|
||||
{
|
||||
RWMutex::RLock l(m_roots_l);
|
||||
return (m_roots.find(id) != m_roots.end());
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply a function or function object to all peers
|
||||
*
|
||||
* This locks the peer map during execution, so calls to get() etc. during
|
||||
* eachPeer() will deadlock.
|
||||
*
|
||||
* @param f Function to apply
|
||||
* @tparam F Function or function object type
|
||||
*/
|
||||
template< typename F >
|
||||
ZT_INLINE void eachPeer(F f) const
|
||||
{
|
||||
RWMutex::RLock l(m_peers_l);
|
||||
for (Map< Address, SharedPtr< Peer > >::const_iterator i(m_peers.begin()); i != m_peers.end(); ++i)
|
||||
f(i->second);
|
||||
if (unlikely((now - m_lastRankedRoots) > (ZT_PATH_KEEPALIVE_PERIOD / 2))) {
|
||||
l.writing();
|
||||
m_rankRoots(now);
|
||||
}
|
||||
return m_roots.front();
|
||||
}
|
||||
|
||||
/**
|
||||
* @param allPeers vector to fill with all current peers
|
||||
*/
|
||||
ZT_INLINE void getAllPeers(Vector< SharedPtr< Peer > > &allPeers) const
|
||||
{
|
||||
allPeers.clear();
|
||||
RWMutex::RLock l(m_peers_l);
|
||||
allPeers.reserve(m_peers.size());
|
||||
for (Map< Address, SharedPtr< Peer > >::const_iterator i(m_peers.begin()); i != m_peers.end(); ++i)
|
||||
allPeers.push_back(i->second);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param allPeers vector to fill with all current peers
|
||||
*/
|
||||
ZT_INLINE void getAllPeers(Vector< SharedPtr< Peer > > &allPeers, Vector< SharedPtr< Peer > > &rootPeers) const
|
||||
{
|
||||
allPeers.clear();
|
||||
RWMutex::RLock l(m_peers_l);
|
||||
allPeers.reserve(m_peers.size());
|
||||
for (Map< Address, SharedPtr< Peer > >::const_iterator i(m_peers.begin()); i != m_peers.end(); ++i)
|
||||
allPeers.push_back(i->second);
|
||||
rootPeers = m_rootPeers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Flag a peer as a root, adding the peer if it is not known
|
||||
*
|
||||
* @param tPtr Thread pointer
|
||||
* @param id Root identity (will be locally validated)
|
||||
* @return Root peer or NULL if some problem occurred
|
||||
*/
|
||||
SharedPtr< Peer > addRoot(void *tPtr, const Identity &id);
|
||||
|
||||
/**
|
||||
* Remove a root server's identity from the root server set
|
||||
*
|
||||
* @param tPtr Thread pointer
|
||||
* @param address Root address
|
||||
* @return True if root found and removed, false if not found
|
||||
*/
|
||||
bool removeRoot(void *tPtr, Address address);
|
||||
|
||||
/**
|
||||
* Sort roots in ascending order of apparent latency
|
||||
*
|
||||
* @param now Current time
|
||||
*/
|
||||
void rankRoots();
|
||||
void allPeers(Vector< SharedPtr< Peer > > &allPeers, Vector< SharedPtr< Peer > > &rootPeers) const;
|
||||
|
||||
/**
|
||||
* Do periodic tasks such as database cleanup
|
||||
|
@ -219,39 +143,31 @@ public:
|
|||
ZT_CertificateError addCertificate(void *tPtr, const Certificate &cert, const int64_t now, unsigned int localTrust, bool writeToLocalStore, bool refreshRootSets = true, bool verify = true);
|
||||
|
||||
private:
|
||||
void m_eraseCertificate_l_certs(const SharedPtr< const Certificate > &cert);
|
||||
|
||||
bool m_cleanCertificates_l_certs(int64_t now);
|
||||
|
||||
bool m_verifyCertificateChain_l_certs(const Certificate *current, const int64_t now) const;
|
||||
|
||||
ZT_CertificateError m_verifyCertificate_l_certs(const Certificate &cert, const int64_t now, unsigned int localTrust, bool skipSignatureCheck) const;
|
||||
|
||||
void m_rankRoots(int64_t now);
|
||||
void m_eraseCertificate(void *tPtr, const SharedPtr< const Certificate > &cert, const SHA384Hash *uniqueIdHash);
|
||||
bool m_cleanCertificates(void *tPtr, int64_t now);
|
||||
bool m_verifyCertificateChain(const Certificate *current, const int64_t now) const;
|
||||
ZT_CertificateError m_verifyCertificate(const Certificate &cert, const int64_t now, unsigned int localTrust, bool skipSignatureCheck) const;
|
||||
void m_loadCached(void *tPtr, const Address &zta, SharedPtr< Peer > &peer);
|
||||
|
||||
SharedPtr< Peer > m_peerFromCached(void *tPtr, const Address &zta);
|
||||
|
||||
void m_updateRootPeers_l_roots_certs(void *tPtr);
|
||||
|
||||
void m_writeTrustStore_l_roots_certs(void *tPtr) const;
|
||||
SharedPtr< Path > m_newPath(const int64_t l, const InetAddress &r, const UniqueID &k);
|
||||
void m_updateRootPeers(void *tPtr, int64_t now);
|
||||
|
||||
const RuntimeEnvironment *const RR;
|
||||
|
||||
RWMutex m_paths_l; // m_paths
|
||||
RWMutex m_peers_l; // m_peers
|
||||
RWMutex m_roots_l; // m_roots, m_rootPeers
|
||||
Mutex m_certs_l; // m_certs, m_certsBySubjectIdentity
|
||||
|
||||
Map< UniqueID, SharedPtr< Path > > m_paths;
|
||||
|
||||
int64_t m_lastRankedRoots;
|
||||
Vector< SharedPtr< Peer > > m_roots;
|
||||
Map< Address, SharedPtr< Peer > > m_peers;
|
||||
|
||||
Map< Identity, Set< SharedPtr< const Certificate > > > m_roots;
|
||||
Vector< SharedPtr< Peer > > m_rootPeers;
|
||||
Map< UniqueID, SharedPtr< Path > > m_paths;
|
||||
|
||||
Map< SHA384Hash, std::pair< SharedPtr< const Certificate >, unsigned int > > m_certs;
|
||||
Map< Fingerprint, Map< SharedPtr< const Certificate >, unsigned int > > m_certsBySubjectIdentity;
|
||||
SortedMap< Vector< uint8_t >, std::pair< SharedPtr< const Certificate >, unsigned int > > m_certsBySubjectUniqueId;
|
||||
Map< SHA384Hash, std::pair< SharedPtr< const Certificate >, unsigned int > > m_certsBySubjectUniqueId;
|
||||
|
||||
RWMutex m_paths_l; // m_paths
|
||||
RWMutex m_peers_l; // m_peers
|
||||
RWMutex m_roots_l; // m_roots and m_lastRankedRoots
|
||||
Mutex m_certs_l; // m_certs and friends
|
||||
};
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
|
|
@ -454,6 +454,15 @@ bool scopy(char *const dest, const unsigned int len, const char *const src) noex
|
|||
}
|
||||
}
|
||||
|
||||
uint32_t fnv1a32(const void *const data, const unsigned int len) noexcept
|
||||
{
|
||||
uint32_t h = 0x811c9dc5;
|
||||
const uint32_t p = 0x01000193;
|
||||
for (unsigned int i = 0; i < len; ++i)
|
||||
h = (h ^ (uint32_t)reinterpret_cast<const uint8_t *>(data)[i]) * p;
|
||||
return h;
|
||||
}
|
||||
|
||||
} // namespace Utils
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
|
115
core/Utils.hpp
115
core/Utils.hpp
|
@ -249,42 +249,6 @@ uint64_t random() noexcept;
|
|||
*/
|
||||
bool scopy(char *dest, unsigned int len, const char *src) noexcept;
|
||||
|
||||
/**
|
||||
* Mix bits in a 64-bit integer (non-cryptographic, for hash tables)
|
||||
*
|
||||
* https://nullprogram.com/blog/2018/07/31/
|
||||
*
|
||||
* @param x Integer to mix
|
||||
* @return Hashed value
|
||||
*/
|
||||
static ZT_INLINE uint64_t hash64(uint64_t x) noexcept
|
||||
{
|
||||
x ^= x >> 30U;
|
||||
x *= 0xbf58476d1ce4e5b9ULL;
|
||||
x ^= x >> 27U;
|
||||
x *= 0x94d049bb133111ebULL;
|
||||
x ^= x >> 31U;
|
||||
return x;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mix bits in a 32-bit integer (non-cryptographic, for hash tables)
|
||||
*
|
||||
* https://nullprogram.com/blog/2018/07/31/
|
||||
*
|
||||
* @param x Integer to mix
|
||||
* @return Hashed value
|
||||
*/
|
||||
static ZT_INLINE uint32_t hash32(uint32_t x) noexcept
|
||||
{
|
||||
x ^= x >> 16U;
|
||||
x *= 0x7feb352dU;
|
||||
x ^= x >> 15U;
|
||||
x *= 0x846ca68bU;
|
||||
x ^= x >> 16U;
|
||||
return x;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a buffer's contents are all zero
|
||||
*/
|
||||
|
@ -338,24 +302,6 @@ static ZT_INLINE unsigned long long hexStrToU64(const char *s) noexcept
|
|||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute 32-bit FNV-1a checksum
|
||||
*
|
||||
* See: http://www.isthe.com/chongo/tech/comp/fnv/
|
||||
*
|
||||
* @param data Data to checksum
|
||||
* @param len Length of data
|
||||
* @return FNV1a checksum
|
||||
*/
|
||||
static ZT_INLINE uint32_t fnv1a32(const void *const data, const unsigned int len) noexcept
|
||||
{
|
||||
uint32_t h = 0x811c9dc5;
|
||||
const uint32_t p = 0x01000193;
|
||||
for (unsigned int i = 0; i < len; ++i)
|
||||
h = (h ^ (uint32_t)reinterpret_cast<const uint8_t *>(data)[i]) * p;
|
||||
return h;
|
||||
}
|
||||
|
||||
#ifdef __GNUC__
|
||||
|
||||
static ZT_INLINE unsigned int countBits(const uint8_t v) noexcept
|
||||
|
@ -698,20 +644,6 @@ static ZT_INLINE void storeLittleEndian(void *const p, const I i) noexcept
|
|||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Note on copy() and zero():
|
||||
*
|
||||
* On X64, rep/movsb and rep/stosb are almost always faster for small memory
|
||||
* regions on all but the oldest microarchitectures (and even there the
|
||||
* difference is not large). While more aggressive memcpy() implementations
|
||||
* may be faster in micro-benchmarks, these fail to account for real world
|
||||
* context such as instruction cache and pipeline pressure. A simple
|
||||
* instruction like rep/movsb takes up only a few spots in caches and pipelines
|
||||
* and requires no branching or function calls. Specialized memcpy() can still
|
||||
* be faster for large memory regions, but ZeroTier doesn't copy anything
|
||||
* much larger than 16KiB.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Copy memory block whose size is known at compile time.
|
||||
*
|
||||
|
@ -778,6 +710,53 @@ static ZT_INLINE void zero(void *dest, unsigned long len) noexcept
|
|||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute 32-bit FNV-1a checksum
|
||||
*
|
||||
* See: http://www.isthe.com/chongo/tech/comp/fnv/
|
||||
*
|
||||
* @param data Data to checksum
|
||||
* @param len Length of data
|
||||
* @return FNV1a checksum
|
||||
*/
|
||||
uint32_t fnv1a32(const void *const data, const unsigned int len) noexcept;
|
||||
|
||||
/**
|
||||
* Mix bits in a 64-bit integer (non-cryptographic, for hash tables)
|
||||
*
|
||||
* https://nullprogram.com/blog/2018/07/31/
|
||||
*
|
||||
* @param x Integer to mix
|
||||
* @return Hashed value
|
||||
*/
|
||||
static ZT_INLINE uint64_t hash64(uint64_t x) noexcept
|
||||
{
|
||||
x ^= x >> 30U;
|
||||
x *= 0xbf58476d1ce4e5b9ULL;
|
||||
x ^= x >> 27U;
|
||||
x *= 0x94d049bb133111ebULL;
|
||||
x ^= x >> 31U;
|
||||
return x;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mix bits in a 32-bit integer (non-cryptographic, for hash tables)
|
||||
*
|
||||
* https://nullprogram.com/blog/2018/07/31/
|
||||
*
|
||||
* @param x Integer to mix
|
||||
* @return Hashed value
|
||||
*/
|
||||
static ZT_INLINE uint32_t hash32(uint32_t x) noexcept
|
||||
{
|
||||
x ^= x >> 16U;
|
||||
x *= 0x7feb352dU;
|
||||
x ^= x >> 15U;
|
||||
x *= 0x846ca68bU;
|
||||
x ^= x >> 16U;
|
||||
return x;
|
||||
}
|
||||
|
||||
} // namespace Utils
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
|
|
@ -437,7 +437,7 @@ void VL1::m_relay(void *tPtr, const SharedPtr< Path > &path, Address destination
|
|||
|
||||
void VL1::m_sendPendingWhois(void *tPtr, int64_t now)
|
||||
{
|
||||
const SharedPtr< Peer > root(RR->topology->root());
|
||||
const SharedPtr< Peer > root(RR->topology->root(now));
|
||||
if (unlikely(!root))
|
||||
return;
|
||||
const SharedPtr< Path > rootPath(root->path(now));
|
||||
|
|
|
@ -2230,42 +2230,6 @@ ZT_SDK_API enum ZT_ResultCode ZT_Node_multicastUnsubscribe(
|
|||
uint64_t multicastGroup,
|
||||
unsigned long multicastAdi);
|
||||
|
||||
/**
|
||||
* Designate a peer as a root, adding if not already known
|
||||
*
|
||||
* ZeroTier does not take possession of the 'id' object. It still must be
|
||||
* deleted if it was allocated.
|
||||
*
|
||||
* @param node Node instance
|
||||
* @param tptr Thread pointer to pass to functions/callbacks resulting from this call
|
||||
* @param id Identity of root to add
|
||||
* @return OK (0) or error code if an error condition has occurred
|
||||
*/
|
||||
ZT_SDK_API enum ZT_ResultCode ZT_Node_addRoot(
|
||||
ZT_Node *node,
|
||||
void *tptr,
|
||||
const ZT_Identity *id);
|
||||
|
||||
/**
|
||||
* Un-designate a peer as a root
|
||||
*
|
||||
* This doesn't fully remove the peer from the peer list. It just removes
|
||||
* its root trust flag. If there is no longer any need to communicate with it
|
||||
* it may gradually time out and be removed.
|
||||
*
|
||||
* The removeRoot() only takes an address since the identity is by definition
|
||||
* already known and pinned.
|
||||
*
|
||||
* @param node Node instance
|
||||
* @param tptr Thread pointer to pass to functions/callbacks resulting from this call
|
||||
* @param address ZeroTier address to remove
|
||||
* @return OK (0) or error code if an error condition has occurred
|
||||
*/
|
||||
ZT_SDK_API enum ZT_ResultCode ZT_Node_removeRoot(
|
||||
ZT_Node *node,
|
||||
void *tptr,
|
||||
const uint64_t address);
|
||||
|
||||
/**
|
||||
* Get this node's 40-bit ZeroTier address
|
||||
*
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
secrand "crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -346,22 +345,6 @@ func createAPIServer(basePath string, node *Node) (*http.Server, *http.Server, e
|
|||
}
|
||||
apiSetStandardHeaders(out)
|
||||
|
||||
if req.URL.Path == "/peer/_addroot" {
|
||||
if req.Method == http.MethodPost || req.Method == http.MethodPut {
|
||||
rsdata, err := ioutil.ReadAll(io.LimitReader(req.Body, 16384))
|
||||
if err != nil || len(rsdata) == 0 {
|
||||
_ = apiSendObj(out, req, http.StatusBadRequest, &APIErr{"read error"})
|
||||
} else {
|
||||
// TODO
|
||||
_ = apiSendObj(out, req, http.StatusOK, nil)
|
||||
}
|
||||
} else {
|
||||
out.Header().Set("Allow", "POST, PUT")
|
||||
_ = apiSendObj(out, req, http.StatusMethodNotAllowed, &APIErr{"no root spec supplied"})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var queriedStr string
|
||||
var queriedID Address
|
||||
var queriedFP *Fingerprint
|
||||
|
@ -400,22 +383,7 @@ func createAPIServer(basePath string, node *Node) (*http.Server, *http.Server, e
|
|||
}
|
||||
}
|
||||
|
||||
if req.Method == http.MethodPost || req.Method == http.MethodPut {
|
||||
if peer != nil {
|
||||
var posted Peer
|
||||
if apiReadObj(out, req, &posted) == nil {
|
||||
if posted.Root && !peer.Root {
|
||||
_ = apiSendObj(out, req, http.StatusBadRequest, &APIErr{"root spec must be submitted to /peer/_addroot, post to peers can only be used to clear the root flag"})
|
||||
} else if !posted.Root && peer.Root {
|
||||
peer.Root = false
|
||||
node.RemoveRoot(peer.Address)
|
||||
_ = apiSendObj(out, req, http.StatusOK, peer)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
_ = apiSendObj(out, req, http.StatusNotFound, &APIErr{"peer not found"})
|
||||
}
|
||||
} else if req.Method == http.MethodGet || req.Method == http.MethodHead || req.Method == http.MethodPost || req.Method == http.MethodPut {
|
||||
if req.Method == http.MethodGet || req.Method == http.MethodHead || req.Method == http.MethodPost || req.Method == http.MethodPut {
|
||||
if peer != nil {
|
||||
_ = apiSendObj(out, req, http.StatusOK, peer)
|
||||
} else if len(queriedStr) > 0 {
|
||||
|
@ -424,7 +392,7 @@ func createAPIServer(basePath string, node *Node) (*http.Server, *http.Server, e
|
|||
_ = apiSendObj(out, req, http.StatusOK, peers)
|
||||
}
|
||||
} else {
|
||||
out.Header().Set("Allow", "GET, HEAD, POST, PUT")
|
||||
out.Header().Set("Allow", "GET, HEAD")
|
||||
_ = apiSendObj(out, req, http.StatusMethodNotAllowed, &APIErr{"unsupported method"})
|
||||
}
|
||||
})
|
||||
|
|
|
@ -447,24 +447,6 @@ func (n *Node) Leave(nwid NetworkID) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// AddRoot designates a peer as root, adding it if missing.
|
||||
func (n *Node) AddRoot(id *Identity) (*Peer, error) {
|
||||
rc := C.ZT_Node_addRoot(n.zn, nil, id.cIdentity())
|
||||
if rc != 0 {
|
||||
return nil, ErrInvalidParameter
|
||||
}
|
||||
p := n.Peer(id.Fingerprint())
|
||||
if p == nil {
|
||||
return nil, ErrInvalidParameter
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// RemoveRoot un-designates a peer as root.
|
||||
func (n *Node) RemoveRoot(address Address) {
|
||||
C.ZT_Node_removeRoot(n.zn, nil, C.uint64_t(address))
|
||||
}
|
||||
|
||||
// Network looks up a network by ID or returns nil if not joined
|
||||
func (n *Node) Network(nwid NetworkID) *Network {
|
||||
n.networksLock.RLock()
|
||||
|
|
Loading…
Add table
Reference in a new issue