Work in progress... clean up memcpy and create an annotation for that, lots more porting to new Buf/Protocol code, etc.

This commit is contained in:
Adam Ierymenko 2020-02-03 13:00:13 -08:00
parent cdc6c42375
commit df346a6df6
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
42 changed files with 1796 additions and 958 deletions

66
attic/Fingerprint.hpp Normal file
View file

@ -0,0 +1,66 @@
/*
* Copyright (c)2013-2020 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2024-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_FINGERPRINT_HPP
#define ZT_FINGERPRINT_HPP
#include "Constants.hpp"
#include "Identity.hpp"
namespace ZeroTier {
/**
* A short address and a longer identity hash for extra confirmation of a node's identity.
*/
struct Fingerprint
{
ZT_ALWAYS_INLINE Fingerprint() : address() { memset(hash,0,ZT_IDENTITY_HASH_SIZE); }
explicit ZT_ALWAYS_INLINE Fingerprint(const Identity &id) : address(id.address()) { memcpy(hash,id.hash(),ZT_IDENTITY_HASH_SIZE); }
ZT_ALWAYS_INLINE Fingerprint &operator=(const Identity &id)
{
address = id.address();
memcpy(hash,id.hash(),ZT_IDENTITY_HASH_SIZE);
return *this;
}
ZT_ALWAYS_INLINE bool operator==(const Fingerprint &fp) const { return ((address == fp.address)&&(memcmp(hash,fp.hash,ZT_IDENTITY_HASH_SIZE) == 0)); }
ZT_ALWAYS_INLINE bool operator!=(const Fingerprint &fp) const { return ((address != fp.address)||(memcmp(hash,fp.hash,ZT_IDENTITY_HASH_SIZE) != 0)); }
ZT_ALWAYS_INLINE bool operator<(const Fingerprint &fp) const { return ((address < fp.address)||((address == fp.address)&&(memcmp(hash,fp.hash,ZT_IDENTITY_HASH_SIZE) < 0))); }
ZT_ALWAYS_INLINE bool operator>(const Fingerprint &fp) const { return (fp < *this); }
ZT_ALWAYS_INLINE bool operator<=(const Fingerprint &fp) const { return !(fp < *this); }
ZT_ALWAYS_INLINE bool operator>=(const Fingerprint &fp) const { return !(*this < fp); }
ZT_ALWAYS_INLINE bool operator==(const Identity &id) const { return ((address == id.address())&&(memcmp(hash,id.hash(),ZT_IDENTITY_HASH_SIZE) == 0)); }
ZT_ALWAYS_INLINE bool operator!=(const Identity &id) const { return ((address != id.address())||(memcmp(hash,id.hash(),ZT_IDENTITY_HASH_SIZE) != 0)); }
ZT_ALWAYS_INLINE bool operator<(const Identity &id) const { return ((address < id.address())||((address == id.address())&&(memcmp(hash,id.hash(),ZT_IDENTITY_HASH_SIZE) < 0))); }
ZT_ALWAYS_INLINE bool operator>(const Identity &id) const { return (Fingerprint(id) < *this); }
ZT_ALWAYS_INLINE bool operator<=(const Identity &id) const { return !(Fingerprint(id) < *this); }
ZT_ALWAYS_INLINE bool operator>=(const Identity &id) const { return !(*this < id); }
ZT_ALWAYS_INLINE operator bool() const { return (address); }
/**
* Short ZeroTier address
*/
Address address;
/**
* SHA-384 hash of public portions of identity key(s)
*/
uint8_t hash[ZT_IDENTITY_HASH_SIZE];
};
} // namespace ZeroTier
#endif

View file

@ -26,13 +26,14 @@
#include "Constants.hpp"
#include "Utils.hpp"
#include "TriviallyCopyable.hpp"
namespace ZeroTier {
/**
* A ZeroTier address
*/
class Address
class Address : public TriviallyCopyable
{
public:
ZT_ALWAYS_INLINE Address() : _a(0) {}

View file

@ -23,40 +23,43 @@
namespace ZeroTier {
/**
* Simple atomic counter supporting increment and decrement
* Simple atomic counter
*
* This is used as the reference counter in reference counted objects that
* work with SharedPtr<>.
* @tparam T Type of underlying integer (default: int)
*/
template<typename T = int>
class AtomicCounter
{
public:
ZT_ALWAYS_INLINE AtomicCounter() : _v(0) {}
explicit ZT_ALWAYS_INLINE AtomicCounter(T iv = T(0)) : _v(iv) {}
ZT_ALWAYS_INLINE int load() const
ZT_ALWAYS_INLINE T load() const
{
#ifdef __GNUC__
return _v;
return __sync_or_and_fetch(&_v,0);
#else
return _v.load();
#endif
}
ZT_ALWAYS_INLINE void zero() { _v = 0; }
ZT_ALWAYS_INLINE void zero()
{
_v = T(0);
}
ZT_ALWAYS_INLINE int operator++()
ZT_ALWAYS_INLINE T operator++()
{
#ifdef __GNUC__
return __sync_add_and_fetch((int *)&_v,1);
return __sync_add_and_fetch(&_v,1);
#else
return ++_v;
#endif
}
ZT_ALWAYS_INLINE int operator--()
ZT_ALWAYS_INLINE T operator--()
{
#ifdef __GNUC__
return __sync_sub_and_fetch((int *)&_v,1);
return __sync_sub_and_fetch(&_v,1);
#else
return --_v;
#endif
@ -67,9 +70,9 @@ private:
ZT_ALWAYS_INLINE const AtomicCounter &operator=(const AtomicCounter &) { return *this; }
#ifdef __GNUC__
volatile int _v;
T _v;
#else
std::atomic_int _v;
typename std::atomic<T> _v;
#endif
};

View file

@ -68,7 +68,7 @@ void *_Buf_get()
#endif
b = (Buf<> *)malloc(sizeof(Buf<>));
if (!b)
return nullptr;
throw std::bad_alloc();
} else {
b = (Buf<> *)bb;
#ifdef __GNUC__

View file

@ -54,6 +54,11 @@ void *_Buf_get();
*/
void freeBufPool();
/**
* Macro to declare and get a new buffer templated with the given type
*/
#define ZT_GET_NEW_BUF(vvv,xxx) SharedPtr< Buf<xxx> > vvv(reinterpret_cast< Buf<xxx> * >(_Buf_get()))
/**
* Buffer and methods for branch-free bounds-checked data assembly and parsing
*
@ -102,28 +107,48 @@ class Buf
friend void *_Buf_get();
friend void freeBufPool();
private:
// Direct construction isn't allowed; use get().
public:
static void operator delete(void *ptr,std::size_t sz) { _Buf_release(ptr,sz); }
/**
* Slice is almost exactly like the built-in slice data structure in Go
*/
struct Slice
{
ZT_ALWAYS_INLINE Slice(const SharedPtr<Buf> &b_,const unsigned int s_,const unsigned int e_) : b(b_),s(s_),e(e_) {}
ZT_ALWAYS_INLINE Slice() : b(),s(0),e(0) {}
ZT_ALWAYS_INLINE operator bool() const { return (b); }
ZT_ALWAYS_INLINE unsigned int size() const { return (e - s); }
ZT_ALWAYS_INLINE void zero() { b.zero(); s = 0; e = 0; }
/**
* Buffer holding slice data
*/
SharedPtr<Buf> b;
/**
* Index of start of data in slice
*/
unsigned int s;
/**
* Index of end of data in slice (make sure it's greater than or equal to 's'!)
*/
unsigned int e;
};
ZT_ALWAYS_INLINE Buf() {}
template<typename X>
ZT_ALWAYS_INLINE Buf(const Buf<X> &b) { memcpy(data.bytes,b.data.bytes,ZT_BUF_MEM_SIZE); }
public:
static void operator delete(void *ptr,std::size_t sz) { _Buf_release(ptr,sz); }
/**
* Get obtains a buffer from the pool or allocates a new buffer if the pool is empty
*
* @return Buffer instance
*/
static ZT_ALWAYS_INLINE SharedPtr< Buf<U> > get()
{
void *const b = _Buf_get();
if (b)
return SharedPtr<Buf>((Buf *)b);
throw std::bad_alloc();
}
static ZT_ALWAYS_INLINE SharedPtr< Buf<U> > get() { return SharedPtr<Buf>((Buf *)_Buf_get()); }
/**
* Check for overflow beyond the size of the buffer
@ -148,13 +173,6 @@ public:
*/
static ZT_ALWAYS_INLINE bool readOverflow(const int &ii,const unsigned int size) { return ((ii - (int)size) > 0); }
template<typename X>
ZT_ALWAYS_INLINE Buf &operator=(const Buf<X> &b) const
{
memcpy(data.bytes,b.data.bytes,ZT_BUF_MEM_SIZE);
return *this;
}
/**
* Shortcut to cast between buffers whose data can be viewed through a different struct type
*
@ -342,12 +360,11 @@ public:
* @param len Length of buffer
* @return Pointer to data or NULL on overflow or error
*/
ZT_ALWAYS_INLINE void *rB(int &ii,void *bytes,unsigned int len) const
ZT_ALWAYS_INLINE uint8_t *rB(int &ii,void *bytes,unsigned int len) const
{
const void *const b = (const void *)(data.bytes + ii);
if ((ii += (int)len) <= ZT_BUF_MEM_SIZE) {
memcpy(bytes,b,len);
return bytes;
memcpy(bytes,data.bytes + ii,len);
return reinterpret_cast<uint8_t *>(bytes);
}
return nullptr;
}
@ -365,9 +382,9 @@ public:
* @param len Length of data field to obtain a pointer to
* @return Pointer to field or NULL on overflow
*/
ZT_ALWAYS_INLINE const void *rBnc(int &ii,unsigned int len) const
ZT_ALWAYS_INLINE const uint8_t *rBnc(int &ii,unsigned int len) const
{
const void *const b = (const void *)(data.bytes + ii);
const uint8_t *const b = data.bytes + ii;
return ((ii += (int)len) <= ZT_BUF_MEM_SIZE) ? b : nullptr;
}
@ -498,6 +515,26 @@ public:
memcpy(data.bytes + s,bytes,len);
}
template<typename X>
ZT_ALWAYS_INLINE Buf &operator=(const Buf<X> &b) const
{
memcpy(data.bytes,b.data.bytes,ZT_BUF_MEM_SIZE);
return *this;
}
template<typename X>
ZT_ALWAYS_INLINE bool operator==(const Buf<X> &b) const { return (memcmp(data.bytes,b.data.bytes,ZT_BUF_MEM_SIZE) == 0); }
template<typename X>
ZT_ALWAYS_INLINE bool operator!=(const Buf<X> &b) const { return (memcmp(data.bytes,b.data.bytes,ZT_BUF_MEM_SIZE) != 0); }
template<typename X>
ZT_ALWAYS_INLINE bool operator<(const Buf<X> &b) const { return (memcmp(data.bytes,b.data.bytes,ZT_BUF_MEM_SIZE) < 0); }
template<typename X>
ZT_ALWAYS_INLINE bool operator<=(const Buf<X> &b) const { return (memcmp(data.bytes,b.data.bytes,ZT_BUF_MEM_SIZE) <= 0); }
template<typename X>
ZT_ALWAYS_INLINE bool operator>(const Buf<X> &b) const { return (memcmp(data.bytes,b.data.bytes,ZT_BUF_MEM_SIZE) > 0); }
template<typename X>
ZT_ALWAYS_INLINE bool operator>=(const Buf<X> &b) const { return (memcmp(data.bytes,b.data.bytes,ZT_BUF_MEM_SIZE) >= 0); }
/**
* Raw data and fields (if U template parameter is set)
*
@ -511,7 +548,7 @@ public:
private:
volatile uintptr_t __nextInPool; // next item in free pool if this Buf is in Buf_pool
AtomicCounter __refCount;
AtomicCounter<int> __refCount;
};
} // namespace ZeroTier

View file

@ -12,6 +12,7 @@ set(core_headers
CertificateOfOwnership.hpp
Constants.hpp
Credential.hpp
Defragmenter.hpp
Dictionary.hpp
ECC384.hpp
Hashtable.hpp
@ -42,6 +43,7 @@ set(core_headers
Tag.hpp
Topology.hpp
Trace.hpp
TriviallyCopyable.hpp
Utils.hpp
)

View file

@ -64,16 +64,7 @@ class Capability : public Credential
public:
static ZT_ALWAYS_INLINE ZT_CredentialType credentialType() { return ZT_CREDENTIAL_TYPE_CAPABILITY; }
ZT_ALWAYS_INLINE Capability() :
_nwid(0),
_ts(0),
_id(0),
_maxCustodyChainLength(0),
_ruleCount(0)
{
memset(_rules,0,sizeof(_rules));
memset(_custody,0,sizeof(_custody));
}
ZT_ALWAYS_INLINE Capability() { memoryZero(this); }
/**
* @param id Capability ID

View file

@ -102,9 +102,7 @@ public:
/**
* Create an empty certificate of membership
*/
ZT_ALWAYS_INLINE CertificateOfMembership() :
_qualifierCount(0),
_signatureLength(0) {}
ZT_ALWAYS_INLINE CertificateOfMembership() { memoryZero(this); }
/**
* Create from required fields common to all networks

View file

@ -57,10 +57,7 @@ public:
THING_IPV6_ADDRESS = 3
};
ZT_ALWAYS_INLINE CertificateOfOwnership()
{
memset(reinterpret_cast<void *>(this),0,sizeof(CertificateOfOwnership));
}
ZT_ALWAYS_INLINE CertificateOfOwnership() { memoryZero(this); }
ZT_ALWAYS_INLINE CertificateOfOwnership(const uint64_t nwid,const int64_t ts,const Address &issuedTo,const uint32_t id)
{
@ -95,7 +92,7 @@ public:
ZT_ALWAYS_INLINE bool owns(const MAC &mac) const
{
uint8_t tmp[6];
mac.copyTo(tmp,6);
mac.copyTo(tmp);
return this->_owns(THING_MAC_ADDRESS,tmp,6);
}

View file

@ -56,6 +56,11 @@
*/
#define ZT_MAX_PACKET_FRAGMENTS 11
/**
* Sanity limit on the maximum size of a network config object
*/
#define ZT_MAX_NETWORK_CONFIG_BYTES 131072
/**
* Size of RX queue in packets
*/

View file

@ -21,28 +21,50 @@
#include "Revocation.hpp"
#include "Switch.hpp"
#include "Network.hpp"
#include "ScopedPtr.hpp"
// These are compile-time asserts to make sure temporary marshal buffers here and
// also in NtworkConfig.cpp are always large enough to marshal all credential types.
#if ZT_TAG_MARSHAL_SIZE_MAX > ZT_BUF_MEM_SIZE
#error ZT_TAG_MARSHAL_SIZE_MAX exceeds maximum buffer size
#endif
#if ZT_CAPABILITY_MARSHAL_SIZE_MAX > ZT_BUF_MEM_SIZE
#error ZT_CAPABILITY_MARSHAL_SIZE_MAX exceeds maximum buffer size
#endif
#if ZT_REVOCATION_MARSHAL_SIZE_MAX > ZT_BUF_MEM_SIZE
#error ZT_REVOCATION_MARSHAL_SIZE_MAX exceeds maximum buffer size
#endif
#if ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX > ZT_BUF_MEM_SIZE
#error ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX exceeds maximum buffer size
#endif
#if ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX > ZT_BUF_MEM_SIZE
#error ZT_CERTIFICATEOFMEMBERSHIP_MARSHAL_SIZE_MAX exceeds maximum buffer size
#endif
namespace ZeroTier {
template<typename CRED>
static inline Credential::VerifyResult _credVerify(const RuntimeEnvironment *const RR,void *tPtr,CRED credential)
static ZT_ALWAYS_INLINE Credential::VerifyResult _credVerify(const RuntimeEnvironment *RR,void *tPtr,CRED credential)
{
uint8_t tmp[ZT_BUF_MEM_SIZE + 16];
const Address signedBy(credential.signer());
const uint64_t networkId = credential.networkId();
if ((!signedBy)||(signedBy != Network::controllerFor(networkId)))
return Credential::VERIFY_BAD_SIGNATURE;
const SharedPtr<Peer> peer(RR->topology->get(tPtr,signedBy));
if (!peer) {
RR->sw->requestWhois(tPtr,RR->node->now(),signedBy);
return Credential::VERIFY_NEED_IDENTITY;
}
try {
ScopedPtr< Buffer<(sizeof(CRED) + 64)> > tmp(new Buffer<(sizeof(CRED) + 64)>());
credential.serialize(*tmp,true);
const Credential::VerifyResult result = (peer->identity().verify(tmp->data(),tmp->size(),credential.signature(),credential.signatureLength()) ? Credential::VERIFY_OK : Credential::VERIFY_BAD_SIGNATURE);
return result;
int l = credential.marshal(tmp,true);
if (l <= 0)
return Credential::VERIFY_BAD_SIGNATURE;
return (peer->identity().verify(tmp,(unsigned int)l,credential.signature(),credential.signatureLength()) ? Credential::VERIFY_OK : Credential::VERIFY_BAD_SIGNATURE);
} catch ( ... ) {}
return Credential::VERIFY_BAD_SIGNATURE;
}
@ -74,14 +96,17 @@ Credential::VerifyResult Credential::_verify(const RuntimeEnvironment *const RR,
Credential::VerifyResult Credential::_verify(const RuntimeEnvironment *RR,void *tPtr,const Capability &credential) const
{
uint8_t tmp[ZT_CAPABILITY_MARSHAL_SIZE_MAX + 16];
try {
// There must be at least one entry, and sanity check for bad chain max length
if ((credential._maxCustodyChainLength < 1)||(credential._maxCustodyChainLength > ZT_MAX_CAPABILITY_CUSTODY_CHAIN_LENGTH))
return Credential::VERIFY_BAD_SIGNATURE;
int l = credential.marshal(tmp,true);
if (l <= 0)
return Credential::VERIFY_BAD_SIGNATURE;
// Validate all entries in chain of custody
Buffer<(sizeof(Capability) * 2)> tmp;
credential.serialize(tmp,true);
for(unsigned int c=0;c<credential._maxCustodyChainLength;++c) {
if (c == 0) {
if ((!credential._custody[c].to)||(!credential._custody[c].from)||(credential._custody[c].from != Network::controllerFor(credential._nwid)))
@ -95,7 +120,7 @@ Credential::VerifyResult Credential::_verify(const RuntimeEnvironment *RR,void *
const SharedPtr<Peer> peer(RR->topology->get(tPtr,credential._custody[c].from));
if (peer) {
if (!peer->identity().verify(tmp.data(),tmp.size(),credential._custody[c].signature,credential._custody[c].signatureLength))
if (!peer->identity().verify(tmp,(unsigned int)l,credential._custody[c].signature,credential._custody[c].signatureLength))
return Credential::VERIFY_BAD_SIGNATURE;
} else {
RR->sw->requestWhois(tPtr,RR->node->now(),credential._custody[c].from);

View file

@ -24,6 +24,7 @@
#include <cstring>
#include "Constants.hpp"
#include "TriviallyCopyable.hpp"
namespace ZeroTier {
@ -37,7 +38,7 @@ class RuntimeEnvironment;
/**
* Base class for credentials
*/
class Credential
class Credential : public TriviallyCopyable
{
public:
/**

383
node/Defragmenter.hpp Normal file
View file

@ -0,0 +1,383 @@
/*
* Copyright (c)2013-2020 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2024-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_DEFRAGMENTER_HPP
#define ZT_DEFRAGMENTER_HPP
#include "Constants.hpp"
#include "Buf.hpp"
#include "AtomicCounter.hpp"
#include "SharedPtr.hpp"
#include "Hashtable.hpp"
#include "Mutex.hpp"
#include "Path.hpp"
#include <cstring>
#include <cstdlib>
#include <vector>
namespace ZeroTier {
/**
* Generalized putter back together-er for fragmented messages
*
* This is used both for packet fragment assembly and multi-chunk network config
* assembly. This is abstracted out of the code that uses it because it's a bit of
* a hairy and difficult thing to get both correct and fast, and because its
* hairiness makes it very desirable to be able to test and fuzz this code
* independently.
*
* Here be dragons!
*
* @tparam MF Maximum number of fragments that each message can possess
*/
template<unsigned int MF>
class Defragmenter
{
public:
/**
* Error codes for assemble()
*/
enum ErrorCode
{
/**
* No error occurred
*/
ERR_NONE,
/**
* This fragment duplicates another with the same fragment number for this message
*/
ERR_DUPLICATE_FRAGMENT,
/**
* The fragment is invalid, such as e.g. having a fragment number beyond the expected count.
*/
ERR_INVALID_FRAGMENT,
/**
* Too many fragments are in flight for this path
*
* The message will be marked as if it's done (all fragments received) but will
* be abandoned. Subsequent fragments will generate a DUPLICATE_FRAGMENT error.
*
* This is an anti-denial-of-service feature to limit the number of inbound
* fragments that can be in flight over a given physical network path.
*/
ERR_TOO_MANY_FRAGMENTS_FOR_PATH,
/**
* Memory (or some other limit) exhausted
*/
ERR_OUT_OF_MEMORY
};
/**
* Return tuple for assemble()
*/
struct Result
{
ZT_ALWAYS_INLINE Result() : message(),messageFragmentCount(0),error(Defragmenter::ERR_NONE) {}
/**
* Fully assembled message as a series of slices of fragments
*/
Buf<>::Slice message[MF];
/**
* Fully assembled message fragment count (number of slices)
*
* This will be nonzero if the message is fully assembled.
*/
unsigned int messageFragmentCount;
/**
* Error code or ERR_NONE if none
*/
Defragmenter::ErrorCode error;
};
/**
* Process a fragment of a multi-part message
*
* The message ID is arbitrary but must be something that can uniquely
* group fragments for a given final message. The total fragments expected
* value is expectded to be the same for all fragments in a message. Results
* are undefined and probably wrong if this value changes across a message.
* Fragment numbers must be sequential starting with 0 and going up to
* one minus total fragments expected (non-inclusive range).
*
* Fragments can arrive in any order. Duplicates are dropped and ignored.
*
* It's the responsibility of the caller to do whatever validation needs to
* be done before considering a fragment valid and to make sure the fragment
* data index and size parameters are valid.
*
* The fragment supplied to this function is kept and held under the supplied
* message ID until or unless (1) the message is fully assembled, (2) the
* message is orphaned and its entry is taken by a new message, or (3) the
* clear() function is called to forget all incoming messages. The pointer
* at the 'fragment' reference will be zeroed since this pointer is handed
* off, so the SharedPtr<> passed in as 'fragment' will be NULL after this
* function is called.
*
* The result returned by this function is a structure containing a series
* of assembled and complete fragments, a fragment count, and an error.
* If the message fragment count is non-zero then the message has been
* successfully assembled. If the fragment count is zero then an error may
* have occurred or the message may simply not yet be complete.
*
* The calling code must decide what to do with the assembled and ordered
* fragments, such as memcpy'ing them into a contiguous buffer or handling
* them as a vector of fragments.
*
* The 'via' parameter causes this fragment to be registered with a path and
* unregistered when done or abandoned. It's only used the first time it's
* supplied (the first non-NULL) for a given message ID. This is a mitigation
* against memory exhausting DOS attacks.
*
* Lastly the message queue size target and GC trigger parameters control
* garbage collection of defragmenter message queue entries. If the size
* target parameter is non-zero then the message queue is cleaned when its
* size reaches the GC trigger parameter, which MUST be larger than the size
* target. Cleaning is done by sorting all entries by their last modified
* timestamp and removing the oldest N entries so as to bring the size down
* to under the size target. The use of a trigger size that is larger than
* the size target reduces CPU-wasting thrashing. A good value for the trigger
* is 2X the size target, causing cleanups to happen only occasionally.
*
* If the GC parameters are set to zero then clear() must be called from time
* to time or memory use will grow without bound.
*
* @tparam X Template parameter type for Buf<> containing fragment (inferred)
* @param messageId Message ID (a unique ID identifying this message)
* @param fragment Buffer containing fragment that will be filed under this message's ID
* @param fragmentDataIndex Index of data in fragment's data.bytes (fragment's data.fields type is ignored)
* @param fragmentDataSize Length of data in fragment's data.bytes (fragment's data.fields type is ignored)
* @param fragmentNo Number of fragment (0..totalFragmentsExpected, non-inclusive)
* @param totalFragmentsExpected Total number of expected fragments in this message
* @param now Current time
* @param via If non-NULL this is the path on which this message fragment was received
* @param maxIncomingFragmentsPerPath If via is non-NULL this is a cutoff for maximum fragments in flight via this path
* @param messageQueueSizeTarget If non-zero periodically clean the message queue to bring it under this size
* @param messageQueueSizeGCTrigger A value larger than messageQueueSizeTarget that is when cleaning is performed
* @return Result buffer (pointer to 'result' or newly allocated buffer) or NULL if message not complete
*/
ZT_ALWAYS_INLINE Result assemble(
const uint64_t messageId,
SharedPtr< Buf<> > &fragment,
const unsigned int fragmentDataIndex,
const unsigned int fragmentDataSize,
const unsigned int fragmentNo,
const unsigned int totalFragmentsExpected,
const int64_t now,
const SharedPtr< Path > &via,
const unsigned int maxIncomingFragmentsPerPath,
const unsigned long messageQueueSizeTarget,
const unsigned long messageQueueSizeGCTrigger)
{
Result r;
// Sanity checks for malformed fragments or invalid input parameters.
if ((fragmentNo >= totalFragmentsExpected)||(totalFragmentsExpected > MF)||(totalFragmentsExpected == 0)) {
r.error = ERR_INVALID_FRAGMENT;
return r;
}
// If there is only one fragment just return that fragment and we are done.
if (totalFragmentsExpected < 2) {
if (fragmentNo == 0) {
r.message[0].b.move(fragment);
r.message[0].s = fragmentDataIndex;
r.message[0].e = fragmentDataSize;
r.messageFragmentCount = 1;
return r;
} else {
r.error = ERR_INVALID_FRAGMENT;
return r;
}
}
// Lock messages for read and look up current entry. Also check the
// GC trigger and if we've exceeded that threshold then older message
// entries are garbage collected.
_messages_l.rlock();
if (messageQueueSizeTarget > 0) {
if (_messages.size() >= messageQueueSizeGCTrigger) {
try {
// Scan messages with read lock still locked first and make a sorted list of
// message entries by last modified time. Then lock for writing and delete
// the oldest entries to bring the size of the messages hash table down to
// under the target size. This tries to minimize the amount of time the write
// lock is held since many threads can hold the read lock but all threads must
// wait if someone holds the write lock.
std::vector< std::pair<int64_t,uint64_t> > messagesByLastUsedTime;
messagesByLastUsedTime.reserve(_messages.size());
typename Hashtable<uint64_t,_E>::Iterator i(_messages);
uint64_t *mk = nullptr;
_E *mv = nullptr;
while (i.next(mk,mv))
messagesByLastUsedTime.push_back(std::pair<int64_t,uint64_t>(mv->lastUsed,*mk));
std::sort(messagesByLastUsedTime.begin(),messagesByLastUsedTime.end());
_messages_l.runlock();
_messages_l.lock();
for (unsigned long x = 0,y = (messagesByLastUsedTime.size() - messageQueueSizeTarget); x <= y; ++x)
_messages.erase(messagesByLastUsedTime[x].second);
_messages_l.unlock();
_messages_l.rlock();
} catch (...) {
// The only way something in that code can throw is if a bad_alloc occurs when
// reserve() is called in the vector. In this case we flush the entire queue
// and error out. This is very rare and on some platforms impossible.
_messages_l.runlock();
_messages_l.lock();
_messages.clear();
_messages_l.unlock();
r.error = ERR_OUT_OF_MEMORY;
return r;
}
}
}
_E *e = _messages.get(messageId);
_messages_l.runlock();
// If no entry exists we must briefly lock messages for write and create a new one.
if (!e) {
try {
RWMutex::Lock ml(_messages_l);
e = &(_messages[messageId]);
} catch ( ... ) {
r.error = ERR_OUT_OF_MEMORY;
return r;
}
e->id = messageId;
}
// Now handle this fragment within this individual message entry.
Mutex::Lock el(e->lock);
// Note: it's important that _messages_l is not locked while the entry
// is locked or a deadlock could occur due to GC or clear() being called
// in another thread.
// If there is a path associated with this fragment make sure we've registered
// ourselves as in flight, check the limit, and abort if exceeded.
if ((via)&&(!e->via)) {
e->via = via;
bool tooManyPerPath = false;
via->_inboundFragmentedMessages_l.lock();
try {
if (via->_inboundFragmentedMessages.size() < maxIncomingFragmentsPerPath) {
via->_inboundFragmentedMessages.insert(messageId);
} else {
tooManyPerPath = true;
}
} catch ( ... ) {
// This would indicate something like bad_alloc thrown by the set. Treat
// it as limit exceeded.
tooManyPerPath = true;
}
via->_inboundFragmentedMessages_l.unlock();
if (tooManyPerPath) {
r.error = ERR_TOO_MANY_FRAGMENTS_FOR_PATH;
return r;
}
}
// Update last-activity timestamp for this entry.
e->lastUsed = now;
// If we already have fragment number X, abort. Note that we do not
// actually compare data here. Two same-numbered fragments with different
// data would just mean the transfer is corrupt and would be detected
// later e.g. by packet MAC check. Other use cases of this code like
// network configs check each fragment so this basically can't happen.
Buf<>::Slice &s = e->fragment[fragmentNo];
if (s.b) {
r.error = ERR_DUPLICATE_FRAGMENT;
return r;
}
// Take ownership of fragment, setting 'fragment' pointer to NULL. The simple
// transfer of the pointer avoids a synchronized increment/decrement of the object's
// reference count.
s.b.move(fragment);
s.s = fragmentDataIndex;
s.e = fragmentDataIndex + fragmentDataSize;
// If we now have all fragments then assemble them.
if (++e->fragmentCount >= totalFragmentsExpected) {
// This message is done so de-register it with its path if one is associated.
if (e->via) {
e->via->_inboundFragmentedMessages_l.lock();
e->via->_inboundFragmentedMessages.erase(messageId);
e->via->_inboundFragmentedMessages_l.unlock();
e->via.zero();
}
// PERFORMANCE HACK: SharedPtr<> is introspective and only holds a pointer, so we
// can 'move' the pointers it holds very quickly by bulk copying the source
// slices and then zeroing the originals. This is only okay if the destination
// currently holds no pointers, which should always be the case. Don't try this
// at home kids.
unsigned int msize = e->fragmentCount * sizeof(Buf<>::Slice);
memcpy(reinterpret_cast<void *>(r.message),reinterpret_cast<const void *>(e->fragment),msize);
memset(reinterpret_cast<void *>(e->fragment),0,msize);
r.messageFragmentCount = e->fragmentCount;
}
return r;
}
/**
* Erase all message entries in the internal queue
*/
ZT_ALWAYS_INLINE void clear()
{
RWMutex::Lock ml(_messages_l);
_messages.clear();
}
private:
struct _E
{
ZT_ALWAYS_INLINE _E() : id(0),lastUsed(0),via(),fragmentCount(0) {}
ZT_ALWAYS_INLINE ~_E()
{
// Ensure that this entry is not in use while it is being deleted!
lock.lock();
if (via) {
via->_inboundFragmentedMessages_l.lock();
via->_inboundFragmentedMessages.erase(id);
via->_inboundFragmentedMessages_l.unlock();
}
lock.unlock();
}
uint64_t id;
volatile int64_t lastUsed;
SharedPtr<Path> via;
Buf<>::Slice fragment[MF];
unsigned int fragmentCount;
Mutex lock;
};
Hashtable< uint64_t,_E > _messages;
RWMutex _messages_l;
};
} // namespace ZeroTier
#endif

View file

@ -23,6 +23,7 @@
#include "InetAddress.hpp"
#include "Address.hpp"
#include "Utils.hpp"
#include "TriviallyCopyable.hpp"
// max name size + type byte + port (for DNS name/port) + 3x 16-bit coordinate for location
#define ZT_ENDPOINT_MARSHAL_SIZE_MAX (ZT_ENDPOINT_MAX_NAME_SIZE+1+2+2+2+2)
@ -35,7 +36,7 @@ namespace ZeroTier {
* This data structure supports a number of types that are not yet actually used:
* DNSNAME, URL, and ETHERNET. These are present to reserve them for future use.
*/
class Endpoint
class Endpoint : public TriviallyCopyable
{
public:
enum Type
@ -50,15 +51,7 @@ public:
UNRECOGNIZED = 255 // Unrecognized endpoint type encountered in stream
};
ZT_ALWAYS_INLINE Endpoint()
{
memset(reinterpret_cast<void *>(this),0,sizeof(Endpoint));
}
ZT_ALWAYS_INLINE Endpoint(const Endpoint &ep)
{
memcpy(reinterpret_cast<void *>(this),reinterpret_cast<const void *>(&ep),sizeof(Endpoint));
}
ZT_ALWAYS_INLINE Endpoint() { memoryZero(this); }
explicit ZT_ALWAYS_INLINE Endpoint(const InetAddress &sa)
{
@ -85,12 +78,6 @@ public:
Utils::scopy(_v.url,sizeof(_v.url),url);
}
ZT_ALWAYS_INLINE Endpoint &operator=(const Endpoint &ep)
{
memcpy(reinterpret_cast<void *>(this),&ep,sizeof(Endpoint));
return *this;
}
ZT_ALWAYS_INLINE Endpoint &operator=(const InetAddress &sa)
{
switch(sa.ss_family) {
@ -159,7 +146,7 @@ public:
static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_ENDPOINT_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_ENDPOINT_MARSHAL_SIZE_MAX]) const;
int unmarshal(const uint8_t *restrict data,const int len);
int unmarshal(const uint8_t *restrict data,int len);
private:
Type _t;

View file

@ -23,6 +23,7 @@
#include "C25519.hpp"
#include "SHA512.hpp"
#include "ECC384.hpp"
#include "TriviallyCopyable.hpp"
#define ZT_IDENTITY_STRING_BUFFER_LENGTH 1024
@ -43,7 +44,7 @@ namespace ZeroTier {
* search for a different public key that duplicates an existing address. (See
* code for deriveAddress() for this algorithm.)
*/
class Identity
class Identity : public TriviallyCopyable
{
public:
/**
@ -55,7 +56,7 @@ public:
P384 = ZT_CRYPTO_ALG_P384 // Type 1 -- NIST P-384 with linked Curve25519/Ed25519 secondaries (2.x+)
};
ZT_ALWAYS_INLINE Identity() { memset(reinterpret_cast<void *>(this),0,sizeof(Identity)); }
ZT_ALWAYS_INLINE Identity() { memoryZero(this); }
ZT_ALWAYS_INLINE ~Identity() { Utils::burn(reinterpret_cast<void *>(&this->_priv),sizeof(this->_priv)); }
/**
@ -71,7 +72,7 @@ public:
/**
* Set identity to NIL value (all zero)
*/
ZT_ALWAYS_INLINE void zero() { memset(reinterpret_cast<void *>(this),0,sizeof(Identity)); }
ZT_ALWAYS_INLINE void zero() { memoryZero(this); }
/**
* @return Identity type (undefined if identity is null or invalid)

File diff suppressed because it is too large Load diff

View file

@ -14,11 +14,12 @@
#ifndef ZT_INCOMINGPACKET_HPP
#define ZT_INCOMINGPACKET_HPP
#include "Packet.hpp"
#include "Path.hpp"
#include "Utils.hpp"
#include "MulticastGroup.hpp"
#include "Peer.hpp"
#include "Buf.hpp"
#include "Protocol.hpp"
/*
* The big picture:
@ -41,41 +42,20 @@ namespace ZeroTier {
class RuntimeEnvironment;
class Network;
class IncomingPacket : public Packet
class IncomingPacket
{
public:
ZT_ALWAYS_INLINE IncomingPacket() : Packet(),_receiveTime(0),_path() {}
ZT_ALWAYS_INLINE IncomingPacket() {}
/**
* Create a new packet-in-decode
*
* @param data Packet data
* @param len Packet length
* @param path Path over which packet arrived
* @param now Current time
* @throws std::out_of_range Range error processing packet
*/
ZT_ALWAYS_INLINE IncomingPacket(const void *data,unsigned int len,const SharedPtr<Path> &path,int64_t now) :
Packet(data,len),
_receiveTime(now),
_path(path)
template<typename X>
ZT_ALWAYS_INLINE void set(const SharedPtr< Buf<X> > &pkt_,const unsigned int pktSize_,const SharedPtr<Path> &path_,const int64_t now_)
{
}
/**
* Init packet-in-decode in place
*
* @param data Packet data
* @param len Packet length
* @param path Path over which packet arrived
* @param now Current time
* @throws std::out_of_range Range error processing packet
*/
ZT_ALWAYS_INLINE void init(const void *data,unsigned int len,const SharedPtr<Path> &path,int64_t now)
{
copyFrom(data,len);
_receiveTime = now;
_path = path;
idBE = 0; // initially zero, set when decryption/auth occurs
receiveTime = now_;
path = path_;
pkt = reinterpret_cast< SharedPtr< Buf< Protocol::Header > > >(pkt_);
size = pktSize_;
hops = Protocol::packetHops(pkt->data.fields);
}
/**
@ -94,13 +74,34 @@ public:
bool tryDecode(const RuntimeEnvironment *RR,void *tPtr);
/**
* @return Time of packet receipt / start of decode
* Packet ID in big-endian byte order or 0 if not decrypted/dearmored yet
*/
ZT_ALWAYS_INLINE uint64_t receiveTime() const { return _receiveTime; }
uint64_t idBE;
private:
uint64_t _receiveTime;
SharedPtr<Path> _path;
/**
* Time packet was received
*/
int64_t receiveTime;
/**
* Path over which packet was received
*/
SharedPtr< Path > path;
/**
* Packet itself
*/
SharedPtr< Buf< Protocol::Header > > pkt;
/**
* Size of packet in bytes
*/
unsigned int size;
/**
* Hop count for received packet
*/
uint8_t hops;
};
} // namespace ZeroTier

View file

@ -21,14 +21,10 @@
#include "Constants.hpp"
#include "Utils.hpp"
#include "MAC.hpp"
#include "TriviallyCopyable.hpp"
namespace ZeroTier {
/**
* Maximum integer value of enum IpScope
*/
#define ZT_INETADDRESS_MAX_SCOPE 7
#define ZT_INETADDRESS_MARSHAL_SIZE_MAX 19
/**
@ -39,15 +35,16 @@ namespace ZeroTier {
* sockaddr_storage and used interchangeably. DO NOT change this by e.g.
* adding non-static fields, since much code depends on this identity.
*/
struct InetAddress : public sockaddr_storage
struct InetAddress : public sockaddr_storage,public TriviallyCopyable
{
private:
// Internal function to copy any sockaddr_X structure to this one even if it's smaller and unpadded.
template<typename SA>
ZT_ALWAYS_INLINE void copySockaddrToThis(const SA *sa)
{
memcpy(reinterpret_cast<void *>(this),sa,sizeof(SA));
if (sizeof(SA) < sizeof(InetAddress))
memset(reinterpret_cast<char *>(this) + sizeof(SA),0,sizeof(InetAddress) - sizeof(SA));
memset(reinterpret_cast<uint8_t *>(this) + sizeof(SA),0,sizeof(InetAddress) - sizeof(SA));
}
public:
@ -88,7 +85,8 @@ public:
// Hasher for unordered sets and maps in C++11
struct Hasher { ZT_ALWAYS_INLINE std::size_t operator()(const InetAddress &a) const { return (std::size_t)a.hashCode(); } };
ZT_ALWAYS_INLINE InetAddress() { memset(reinterpret_cast<void *>(this),0,sizeof(InetAddress)); }
ZT_ALWAYS_INLINE InetAddress() { memoryZero(this); }
ZT_ALWAYS_INLINE InetAddress(const InetAddress &a) { memoryCopy(this,&a); }
explicit ZT_ALWAYS_INLINE InetAddress(const struct sockaddr_storage &ss) { *this = ss; }
explicit ZT_ALWAYS_INLINE InetAddress(const struct sockaddr_storage *ss) { *this = ss; }
explicit ZT_ALWAYS_INLINE InetAddress(const struct sockaddr &sa) { *this = sa; }
@ -101,19 +99,19 @@ public:
ZT_ALWAYS_INLINE InetAddress(const uint32_t ipv4,unsigned int port) { this->set(&ipv4,4,port); }
explicit ZT_ALWAYS_INLINE InetAddress(const char *ipSlashPort) { this->fromString(ipSlashPort); }
ZT_ALWAYS_INLINE void clear() { memset(reinterpret_cast<void *>(this),0,sizeof(InetAddress)); }
ZT_ALWAYS_INLINE void clear() { memoryZero(this); }
ZT_ALWAYS_INLINE InetAddress &operator=(const struct sockaddr_storage &ss)
{
memcpy(reinterpret_cast<void *>(this),&ss,sizeof(InetAddress));
memoryCopyUnsafe(this,&ss);
return *this;
}
ZT_ALWAYS_INLINE InetAddress &operator=(const struct sockaddr_storage *ss)
{
if (ss)
memcpy(reinterpret_cast<void *>(this),ss,sizeof(InetAddress));
else memset(reinterpret_cast<void *>(this),0,sizeof(InetAddress));
memoryCopyUnsafe(this,ss);
else memoryZero(this);
return *this;
}
@ -162,9 +160,10 @@ public:
copySockaddrToThis(reinterpret_cast<const sockaddr_in *>(sa));
else if (sa->sa_family == AF_INET6)
copySockaddrToThis(reinterpret_cast<const sockaddr_in6 *>(sa));
return *this;
else memoryZero(this);
} else {
memoryZero(this);
}
memset(reinterpret_cast<void *>(this),0,sizeof(InetAddress));
return *this;
}
@ -338,7 +337,7 @@ public:
switch(ss_family) {
case AF_INET: return (const void *)&(reinterpret_cast<const struct sockaddr_in *>(this)->sin_addr.s_addr);
case AF_INET6: return (const void *)(reinterpret_cast<const struct sockaddr_in6 *>(this)->sin6_addr.s6_addr);
default: return 0;
default: return nullptr;
}
}
@ -449,11 +448,6 @@ public:
}
}
/**
* Set to null/zero
*/
ZT_ALWAYS_INLINE void zero() { memset(this,0,sizeof(InetAddress)); }
/**
* Check whether this is a network/route rather than an IP assignment
*
@ -495,7 +489,7 @@ public:
static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_INETADDRESS_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_INETADDRESS_MARSHAL_SIZE_MAX]) const;
int unmarshal(const uint8_t *restrict data,const int len);
int unmarshal(const uint8_t *restrict data,int len);
bool operator==(const InetAddress &a) const;
bool operator<(const InetAddress &a) const;

View file

@ -18,7 +18,7 @@
namespace ZeroTier {
int LZ4_compress_fast(const char *source,char *dest,int inputSize,int maxOutputSize,int acceleration);
int LZ4_compress_fast(const char *source,char *dest,int inputSize,int maxOutputSize,int acceleration = 1);
int LZ4_decompress_safe(const char *source,char *dest,int compressedSize,int maxDecompressedSize);
} // namespace ZeroTier

View file

@ -21,6 +21,7 @@
#include "Constants.hpp"
#include "Endpoint.hpp"
#include "Identity.hpp"
#include "TriviallyCopyable.hpp"
#define ZT_LOCATOR_MAX_ENDPOINTS 8
#define ZT_LOCATOR_MARSHAL_SIZE_MAX (8 + 2 + (ZT_ENDPOINT_MARSHAL_SIZE_MAX * ZT_LOCATOR_MAX_ENDPOINTS) + 2 + 2 + ZT_SIGNATURE_BUFFER_SIZE)
@ -33,7 +34,7 @@ namespace ZeroTier {
* A locator contains long-lived endpoints for a node such as IP/port pairs,
* URLs, or other nodes, and is signed by the node it describes.
*/
class Locator
class Locator : public TriviallyCopyable
{
public:
ZT_ALWAYS_INLINE Locator() { this->clear(); }

View file

@ -21,26 +21,22 @@
#include "Constants.hpp"
#include "Utils.hpp"
#include "Address.hpp"
#include "TriviallyCopyable.hpp"
namespace ZeroTier {
/**
* 48-byte Ethernet MAC address
*/
class MAC
class MAC : public TriviallyCopyable
{
public:
ZT_ALWAYS_INLINE MAC() : _m(0ULL) {}
ZT_ALWAYS_INLINE MAC(const unsigned char a,const unsigned char b,const unsigned char c,const unsigned char d,const unsigned char e,const unsigned char f) :
_m( ((((uint64_t)a) & 0xffULL) << 40U) |
((((uint64_t)b) & 0xffULL) << 32U) |
((((uint64_t)c) & 0xffULL) << 24U) |
((((uint64_t)d) & 0xffULL) << 16U) |
((((uint64_t)e) & 0xffULL) << 8U) |
(((uint64_t)f) & 0xffULL) ) {}
ZT_ALWAYS_INLINE MAC(const uint8_t a,const uint8_t b,const uint8_t c,const uint8_t d,const uint8_t e,const uint8_t f) :
_m( (((uint64_t)a) << 40U) | (((uint64_t)b) << 32U) | (((uint64_t)c) << 24U) | (((uint64_t)d) << 16U) | (((uint64_t)e) << 8U) | ((uint64_t)f) ) {}
explicit ZT_ALWAYS_INLINE MAC(const uint64_t m) : _m(m & 0xffffffffffffULL) {}
explicit ZT_ALWAYS_INLINE MAC(const uint8_t b[6]) { setTo(b); }
ZT_ALWAYS_INLINE MAC(const Address &ztaddr,uint64_t nwid) { fromAddress(ztaddr,nwid); }
explicit ZT_ALWAYS_INLINE MAC(const uint64_t m) : _m(m & 0xffffffffffffULL) {}
/**
* @return MAC in 64-bit integer

View file

@ -43,7 +43,7 @@ public:
const int64_t since = now - _ts;
if (since >= ZT_METER_HISTORY_TICK_DURATION) {
_ts = now;
_history[(unsigned int)(++_hptr) % ZT_METER_HISTORY_LENGTH] = (double)_count / ((double)since / 1000.0);
_history[++_hptr % ZT_METER_HISTORY_LENGTH] = (double)_count / ((double)since / 1000.0);
_count = (uint64_t)count;
} else {
_count += (uint64_t)count;
@ -69,7 +69,7 @@ private:
volatile double _history[ZT_METER_HISTORY_LENGTH];
volatile int64_t _ts;
volatile uint64_t _count;
AtomicCounter _hptr;
AtomicCounter<unsigned int> _hptr;
};
} // namespace ZeroTier

View file

@ -20,6 +20,7 @@
#include "MAC.hpp"
#include "InetAddress.hpp"
#include "Utils.hpp"
#include "TriviallyCopyable.hpp"
namespace ZeroTier {
@ -38,7 +39,7 @@ namespace ZeroTier {
*
* MulticastGroup behaves as an immutable value object.
*/
class MulticastGroup
class MulticastGroup : public TriviallyCopyable
{
public:
ZT_ALWAYS_INLINE MulticastGroup() : _mac(),_adi(0) {}

View file

@ -21,12 +21,11 @@
#include "Address.hpp"
#include "InetAddress.hpp"
#include "Switch.hpp"
#include "Buffer.hpp"
#include "Packet.hpp"
#include "NetworkController.hpp"
#include "Peer.hpp"
#include "Trace.hpp"
#include "ScopedPtr.hpp"
#include "Buf.hpp"
#include <set>
@ -202,10 +201,10 @@ _doZtFilterResult _doZtFilter(
thisRuleMatches = (uint8_t)(rules[rn].v.vlanDei == 0);
break;
case ZT_NETWORK_RULE_MATCH_MAC_SOURCE:
thisRuleMatches = (uint8_t)(MAC(rules[rn].v.mac,6) == macSource);
thisRuleMatches = (uint8_t)(MAC(rules[rn].v.mac) == macSource);
break;
case ZT_NETWORK_RULE_MATCH_MAC_DEST:
thisRuleMatches = (uint8_t)(MAC(rules[rn].v.mac,6) == macDest);
thisRuleMatches = (uint8_t)(MAC(rules[rn].v.mac) == macDest);
break;
case ZT_NETWORK_RULE_MATCH_IPV4_SOURCE:
if ((etherType == ZT_ETHERTYPE_IPV4)&&(frameLen >= 20)) {
@ -545,9 +544,6 @@ Network::Network(const RuntimeEnvironment *renv,void *tPtr,uint64_t nwid,void *u
_destroyed(false),
_netconfFailure(NETCONF_FAILURE_NONE)
{
for(int i=0;i<ZT_NETWORK_MAX_INCOMING_UPDATES;++i)
_incomingConfigChunks[i].ts = 0;
if (nconf) {
this->setConfiguration(tPtr,*nconf,false);
_lastConfigUpdate = 0; // still want to re-request since it's likely outdated
@ -556,15 +552,15 @@ Network::Network(const RuntimeEnvironment *renv,void *tPtr,uint64_t nwid,void *u
tmp[0] = nwid; tmp[1] = 0;
bool got = false;
ScopedPtr< Dictionary<ZT_NETWORKCONFIG_DICT_CAPACITY> > dict(new Dictionary<ZT_NETWORKCONFIG_DICT_CAPACITY>());
try {
Dictionary dict;
std::vector<uint8_t> nconfData(RR->node->stateObjectGet(tPtr,ZT_STATE_OBJECT_NETWORK_CONFIG,tmp));
if (nconfData.size() > 2) {
nconfData.push_back(0);
if (dict->load((const char *)nconfData.data())) {
if (dict.decode(nconfData.data(),(unsigned int)nconfData.size())) {
try {
ScopedPtr<NetworkConfig> nconf2(new NetworkConfig());
if (nconf2->fromDictionary(*dict)) {
if (nconf2->fromDictionary(dict)) {
this->setConfiguration(tPtr,*nconf2,false);
_lastConfigUpdate = 0; // still want to re-request an update since it's likely outdated
got = true;
@ -853,122 +849,134 @@ void Network::multicastUnsubscribe(const MulticastGroup &mg)
_myMulticastGroups.erase(i);
}
uint64_t Network::handleConfigChunk(void *tPtr,const uint64_t packetId,const Address &source,const Buffer<ZT_PROTO_MAX_PACKET_LENGTH> &chunk,unsigned int ptr)
uint64_t Network::handleConfigChunk(void *tPtr,uint64_t packetId,const Address &source,const Buf<> &chunk,int ptr,int size)
{
if (_destroyed)
return 0;
const unsigned int start = ptr;
const unsigned int chunkPayloadStart = ptr;
ptr += 8; // skip network ID, which is already obviously known
const unsigned int chunkLen = chunk.at<uint16_t>(ptr); ptr += 2;
const void *chunkData = chunk.field(ptr,chunkLen); ptr += chunkLen;
const unsigned int chunkLen = chunk.rI16(ptr);
const uint8_t *chunkData = chunk.rBnc(ptr,chunkLen);
if (Buf<>::readOverflow(ptr,size))
return 0;
Mutex::Lock l1(_config_l);
_IncomingConfigChunk *c = nullptr;
uint64_t configUpdateId;
{
Mutex::Lock l1(_config_l);
int totalLength = 0,chunkIndex = 0;
if (ptr < size) {
// If there is more data after the chunk / dictionary, it means this is a new controller
// that sends signed chunks. We still support really old controllers, but probably not forever.
const bool fastPropagate = ((chunk.rI8(ptr) & Protocol::NETWORK_CONFIG_FLAG_FAST_PROPAGATE) != 0);
configUpdateId = chunk.rI64(ptr);
totalLength = chunk.rI32(ptr);
chunkIndex = chunk.rI32(ptr);
++ptr; // skip unused signature type field
const unsigned int signatureSize = chunk.rI16(ptr);
const uint8_t *signature = chunk.rBnc(ptr,signatureSize);
if ((Buf<>::readOverflow(ptr,size))||((chunkIndex + chunkLen) > totalLength)||(totalLength >= ZT_MAX_NETWORK_CONFIG_BYTES)||(signatureSize > ZT_SIGNATURE_BUFFER_SIZE)||(!signature))
return 0;
const unsigned int chunkPayloadSize = (unsigned int)ptr - chunkPayloadStart;
_IncomingConfigChunk *c = nullptr;
uint64_t chunkId = 0;
unsigned long totalLength,chunkIndex;
if (ptr < chunk.size()) {
const bool fastPropagate = ((chunk[ptr++] & 0x01U) != 0);
configUpdateId = chunk.at<uint64_t>(ptr); ptr += 8;
totalLength = chunk.at<uint32_t>(ptr); ptr += 4;
chunkIndex = chunk.at<uint32_t>(ptr); ptr += 4;
// Find existing or new slot for this update and its chunk(s).
for(int i=0;i<ZT_NETWORK_MAX_INCOMING_UPDATES;++i) {
if (_incomingConfigChunks[i].updateId == configUpdateId) {
c = &(_incomingConfigChunks[i]);
if (c->chunks.find(chunkIndex) != c->chunks.end())
return 0; // we already have this chunk!
break;
} else if ((!c)||(_incomingConfigChunks[i].touchCtr < c->touchCtr)) {
c = &(_incomingConfigChunks[i]);
}
}
if (!c) // sanity check; should not be possible
return 0;
if (((chunkIndex + chunkLen) > totalLength)||(totalLength >= ZT_NETWORKCONFIG_DICT_CAPACITY)) // >= since we need room for a null at the end
return 0;
if ((chunk[ptr] != 1)||(chunk.at<uint16_t>(ptr + 1) != ZT_C25519_SIGNATURE_LEN))
return 0;
const uint8_t *sig = reinterpret_cast<const uint8_t *>(chunk.field(ptr + 3,ZT_C25519_SIGNATURE_LEN));
// Verify this chunk's signature
const SharedPtr<Peer> controllerPeer(RR->topology->get(tPtr,controller()));
if ((!controllerPeer)||(!controllerPeer->identity().verify(chunk.data.bytes + chunkPayloadStart,chunkPayloadSize,signature,signatureSize)))
return 0;
// We can use the signature, which is unique per chunk, to get a per-chunk ID for local deduplication use
for(unsigned int i=0;i<16;++i)
reinterpret_cast<uint8_t *>(&chunkId)[i & 7] ^= sig[i];
// New properly verified chunks can be flooded "virally" through the network via an aggressive
// exponential rumor mill algorithm.
if (fastPropagate) {
Mutex::Lock l2(_memberships_l);
Address *a = nullptr;
Membership *m = nullptr;
Hashtable<Address,Membership>::Iterator i(_memberships);
while (i.next(a,m)) {
if ((*a != source)&&(*a != controller())) {
ZT_GET_NEW_BUF(outp,Protocol::Header);
// Find existing or new slot for this update and check if this is a duplicate chunk
for(int i=0;i<ZT_NETWORK_MAX_INCOMING_UPDATES;++i) {
if (_incomingConfigChunks[i].updateId == configUpdateId) {
c = &(_incomingConfigChunks[i]);
outp->data.fields.packetId = Protocol::getPacketId();
a->copyTo(outp->data.fields.destination);
RR->identity.address().copyTo(outp->data.fields.source);
outp->data.fields.flags = 0;
outp->data.fields.verb = Protocol::VERB_NETWORK_CONFIG;
for(unsigned long j=0;j<c->haveChunks;++j) {
if (c->haveChunkIds[j] == chunkId)
return 0;
}
int outl = sizeof(Protocol::Header);
outp->wB(outl,chunk.data.bytes + chunkPayloadStart,chunkPayloadSize);
break;
} else if ((!c)||(_incomingConfigChunks[i].ts < c->ts)) {
c = &(_incomingConfigChunks[i]);
if (Buf<>::writeOverflow(outl)) // sanity check... it fit before!
break;
RR->sw->send(tPtr,outp,true);
}
}
}
} else if ((source == controller())||(!source)) {
// Legacy support for OK(NETWORK_CONFIG_REQUEST) from older controllers that don't sign chunks and don't
// support multiple chunks. Since old controllers don't sign chunks we only accept the message if it comes
// directly from the controller.
configUpdateId = packetId;
totalLength = (int)chunkLen;
if (totalLength > ZT_MAX_NETWORK_CONFIG_BYTES)
return 0;
// If it's not a duplicate, check chunk signature
const SharedPtr<Peer> controllerPeer(RR->topology->get(tPtr,controller()));
if (!controllerPeer)
return 0;
if (!controllerPeer->identity().verify(chunk.field(start,ptr - start),ptr - start,sig,ZT_C25519_SIGNATURE_LEN))
return 0;
for(int i=0;i<ZT_NETWORK_MAX_INCOMING_UPDATES;++i) {
if ((!c)||(_incomingConfigChunks[i].touchCtr < c->touchCtr))
c = &(_incomingConfigChunks[i]);
}
} else {
// Not signed, not from controller -> reject.
return 0;
}
// New properly verified chunks can be flooded "virally" through the network
if (fastPropagate) {
Mutex::Lock l2(_memberships_l);
Address *a = nullptr;
Membership *m = nullptr;
Hashtable<Address,Membership>::Iterator i(_memberships);
while (i.next(a,m)) {
if ((*a != source)&&(*a != controller())) {
Packet outp(*a,RR->identity.address(),Packet::VERB_NETWORK_CONFIG);
outp.append(reinterpret_cast<const uint8_t *>(chunk.data()) + start,chunk.size() - start);
RR->sw->send(tPtr,outp,true);
}
}
}
} else if ((source == controller())||(!source)) { // since old chunks aren't signed, only accept from controller itself (or via cluster backplane)
// Legacy support for OK(NETWORK_CONFIG_REQUEST) from older controllers
chunkId = packetId;
configUpdateId = chunkId;
totalLength = chunkLen;
chunkIndex = 0;
try {
++c->touchCtr;
if (c->updateId != configUpdateId) {
c->updateId = configUpdateId;
c->chunks.clear();
}
c->chunks[chunkIndex].assign(chunkData,chunkData + chunkLen);
if (totalLength >= ZT_NETWORKCONFIG_DICT_CAPACITY)
return 0;
for(int i=0;i<ZT_NETWORK_MAX_INCOMING_UPDATES;++i) {
if ((!c)||(_incomingConfigChunks[i].ts < c->ts))
c = &(_incomingConfigChunks[i]);
}
} else {
// Single-chunk unsigned legacy configs are only allowed from the controller itself
int haveLength = 0;
for(std::map< int,std::vector<uint8_t> >::const_iterator ch(c->chunks.begin());ch!=c->chunks.end();++ch)
haveLength += (int)ch->second.size();
if (haveLength > ZT_MAX_NETWORK_CONFIG_BYTES) {
c->touchCtr = 0;
c->updateId = 0;
c->chunks.clear();
return 0;
}
++c->ts; // newer is higher, that's all we need
if (haveLength == totalLength) {
std::vector<uint8_t> assembledConfig;
for(std::map< int,std::vector<uint8_t> >::const_iterator ch(c->chunks.begin());ch!=c->chunks.end();++ch)
assembledConfig.insert(assembledConfig.end(),ch->second.begin(),ch->second.end());
if (c->updateId != configUpdateId) {
c->updateId = configUpdateId;
c->haveChunks = 0;
c->haveBytes = 0;
}
if (c->haveChunks >= ZT_NETWORK_MAX_UPDATE_CHUNKS)
return false;
c->haveChunkIds[c->haveChunks++] = chunkId;
memcpy(c->data.unsafeData() + chunkIndex,chunkData,chunkLen);
c->haveBytes += chunkLen;
if (c->haveBytes == totalLength) {
c->data.unsafeData()[c->haveBytes] = (char)0; // ensure null terminated
ScopedPtr<NetworkConfig> nc(new NetworkConfig());
try {
if (nc->fromDictionary(c->data)) {
Dictionary dict;
if (dict.decode(assembledConfig.data(),(unsigned int)assembledConfig.size())) {
ScopedPtr<NetworkConfig> nc(new NetworkConfig());
if (nc->fromDictionary(dict)) {
this->setConfiguration(tPtr,*nc,true);
return configUpdateId;
}
} catch ( ... ) {}
}
}
}
} catch (...) {}
return 0;
}
@ -1004,11 +1012,13 @@ int Network::setConfiguration(void *tPtr,const NetworkConfig &nconf,bool saveToD
if (saveToDisk) {
try {
ScopedPtr< Dictionary<ZT_NETWORKCONFIG_DICT_CAPACITY> > d(new Dictionary<ZT_NETWORKCONFIG_DICT_CAPACITY>());
if (nconf.toDictionary(*d,false)) {
Dictionary d;
if (nconf.toDictionary(d,false)) {
uint64_t tmp[2];
tmp[0] = _id; tmp[1] = 0;
RR->node->stateObjectPut(tPtr,ZT_STATE_OBJECT_NETWORK_CONFIG,tmp,d->data(),d->sizeBytes());
std::vector<uint8_t> d2;
d.encode(d2);
RR->node->stateObjectPut(tPtr,ZT_STATE_OBJECT_NETWORK_CONFIG,tmp,d2.data(),(unsigned int)d2.size());
}
} catch ( ... ) {}
}
@ -1320,7 +1330,7 @@ void Network::_requestConfiguration(void *tPtr)
nconf->name[nn++] = '0';
nconf->name[nn++] = '.';
nconf->name[nn++] = '0';
nconf->name[nn++] = (char)0;
nconf->name[nn] = (char)0;
this->setConfiguration(tPtr,*nconf,false);
}
@ -1329,24 +1339,24 @@ void Network::_requestConfiguration(void *tPtr)
const Address ctrl(controller());
ScopedPtr< Dictionary<ZT_NETWORKCONFIG_METADATA_DICT_CAPACITY> > rmd(new Dictionary<ZT_NETWORKCONFIG_METADATA_DICT_CAPACITY>());
rmd->add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_VENDOR,(uint64_t)1); // 1 == ZeroTier, no other vendors at the moment
rmd->add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_PROTOCOL_VERSION,(uint64_t)ZT_PROTO_VERSION);
rmd->add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_MAJOR_VERSION,(uint64_t)ZEROTIER_ONE_VERSION_MAJOR);
rmd->add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_MINOR_VERSION,(uint64_t)ZEROTIER_ONE_VERSION_MINOR);
rmd->add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_REVISION,(uint64_t)ZEROTIER_ONE_VERSION_REVISION);
rmd->add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_MAX_NETWORK_RULES,(uint64_t)ZT_MAX_NETWORK_RULES);
rmd->add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_MAX_NETWORK_CAPABILITIES,(uint64_t)ZT_MAX_NETWORK_CAPABILITIES);
rmd->add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_MAX_CAPABILITY_RULES,(uint64_t)ZT_MAX_CAPABILITY_RULES);
rmd->add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_MAX_NETWORK_TAGS,(uint64_t)ZT_MAX_NETWORK_TAGS);
rmd->add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_FLAGS,(uint64_t)0);
rmd->add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_RULES_ENGINE_REV,(uint64_t)ZT_RULES_ENGINE_REVISION);
Dictionary rmd;
rmd.add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_VENDOR,(uint64_t)1); // 1 == ZeroTier, no other vendors at the moment
rmd.add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_PROTOCOL_VERSION,(uint64_t)ZT_PROTO_VERSION);
rmd.add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_MAJOR_VERSION,(uint64_t)ZEROTIER_ONE_VERSION_MAJOR);
rmd.add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_MINOR_VERSION,(uint64_t)ZEROTIER_ONE_VERSION_MINOR);
rmd.add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_REVISION,(uint64_t)ZEROTIER_ONE_VERSION_REVISION);
rmd.add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_MAX_NETWORK_RULES,(uint64_t)ZT_MAX_NETWORK_RULES);
rmd.add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_MAX_NETWORK_CAPABILITIES,(uint64_t)ZT_MAX_NETWORK_CAPABILITIES);
rmd.add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_MAX_CAPABILITY_RULES,(uint64_t)ZT_MAX_CAPABILITY_RULES);
rmd.add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_MAX_NETWORK_TAGS,(uint64_t)ZT_MAX_NETWORK_TAGS);
rmd.add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_FLAGS,(uint64_t)0);
rmd.add(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_RULES_ENGINE_REV,(uint64_t)ZT_RULES_ENGINE_REVISION);
RR->t->networkConfigRequestSent(tPtr,_id);
if (ctrl == RR->identity.address()) {
if (RR->localNetworkController) {
RR->localNetworkController->request(_id,InetAddress(),0xffffffffffffffffULL,RR->identity,*rmd);
RR->localNetworkController->request(_id,InetAddress(),0xffffffffffffffffULL,RR->identity,rmd);
} else {
this->setNotFound();
}

View file

@ -29,13 +29,13 @@
#include "AtomicCounter.hpp"
#include "MulticastGroup.hpp"
#include "MAC.hpp"
#include "Buf.hpp"
#include "Dictionary.hpp"
#include "Membership.hpp"
#include "NetworkConfig.hpp"
#include "CertificateOfMembership.hpp"
#define ZT_NETWORK_MAX_INCOMING_UPDATES 3
#define ZT_NETWORK_MAX_UPDATE_CHUNKS ((ZT_NETWORKCONFIG_DICT_CAPACITY / 1024) + 1)
namespace ZeroTier {
@ -181,20 +181,22 @@ public:
void multicastUnsubscribe(const MulticastGroup &mg);
/**
* Handle an inbound network config chunk
* Parse, verify, and handle an inbound network config chunk
*
* This is called from IncomingPacket to handle incoming network config
* chunks via OK(NETWORK_CONFIG_REQUEST) or NETWORK_CONFIG. It verifies
* each chunk and once assembled applies the configuration.
* chunks via OK(NETWORK_CONFIG_REQUEST) or NETWORK_CONFIG. It's a common
* bit of packet parsing code that also verifies chunks and replicates
* them (via rumor mill flooding) if their fast propagate flag is set.
*
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
* @param packetId Packet ID or 0 if none (e.g. via cluster path)
* @param source Address of sender of chunk or NULL if none (e.g. via cluster path)
* @param chunk Buffer containing chunk
* @param ptr Index of chunk and related fields in packet
* @param ptr Index of chunk and related fields in packet (starting with network ID)
* @param size Size of data in chunk buffer (total, not relative to ptr)
* @return Update ID if update was fully assembled and accepted or 0 otherwise
*/
uint64_t handleConfigChunk(void *tPtr,uint64_t packetId,const Address &source,const Buffer<ZT_PROTO_MAX_PACKET_LENGTH> &chunk,unsigned int ptr);
uint64_t handleConfigChunk(void *tPtr,uint64_t packetId,const Address &source,const Buf<> &chunk,int ptr,int size);
/**
* Set network configuration
@ -374,17 +376,14 @@ private:
Hashtable< MAC,Address > _remoteBridgeRoutes; // remote addresses where given MACs are reachable (for tracking devices behind remote bridges)
NetworkConfig _config;
volatile uint64_t _lastConfigUpdate;
volatile int64_t _lastConfigUpdate;
struct _IncomingConfigChunk
{
ZT_ALWAYS_INLINE _IncomingConfigChunk() : ts(0),updateId(0),haveChunks(0),haveBytes(0),data() {}
uint64_t ts;
ZT_ALWAYS_INLINE _IncomingConfigChunk() : touchCtr(0),updateId(0) {}
uint64_t touchCtr;
uint64_t updateId;
uint64_t haveChunkIds[ZT_NETWORK_MAX_UPDATE_CHUNKS];
unsigned long haveChunks;
unsigned long haveBytes;
Dictionary<ZT_NETWORKCONFIG_DICT_CAPACITY> data;
std::map< int,std::vector<uint8_t> > chunks;
};
_IncomingConfigChunk _incomingConfigChunks[ZT_NETWORK_MAX_INCOMING_UPDATES];

View file

@ -17,33 +17,13 @@
#include "NetworkConfig.hpp"
#include "ScopedPtr.hpp"
#include "Buf.hpp"
namespace ZeroTier {
NetworkConfig::NetworkConfig() :
networkId(0),
timestamp(0),
credentialTimeMaxDelta(0),
revision(0),
issuedTo(),
flags(0),
mtu(0),
multicastLimit(0),
specialistCount(0),
routeCount(0),
staticIpCount(0),
ruleCount(0),
capabilityCount(0),
tagCount(0),
certificateOfOwnershipCount(0),
type(ZT_NETWORK_TYPE_PRIVATE)
{
name[0] = 0;
}
bool NetworkConfig::toDictionary(Dictionary &d,bool includeLegacy) const
{
uint8_t tmp[16384];
uint8_t tmp[ZT_BUF_MEM_SIZE];
try {
d.clear();

View file

@ -34,6 +34,7 @@
#include "Identity.hpp"
#include "Utils.hpp"
#include "Trace.hpp"
#include "TriviallyCopyable.hpp"
namespace ZeroTier {
@ -79,16 +80,10 @@ namespace ZeroTier {
#define ZT_NETWORKCONFIG_SPECIALIST_TYPE_MULTICAST_REPLICATOR 0x0000040000000000ULL
/**
* Device that is allowed to remotely debug connectivity on this network
* Device that is allowed to remotely debug this network and query other peers for e.g. remote trace data
*/
#define ZT_NETWORKCONFIG_SPECIALIST_TYPE_DIAGNOSTICIAN 0x0000080000000000ULL
// Dictionary capacity needed for max size network config
#define ZT_NETWORKCONFIG_DICT_CAPACITY (1024 + (sizeof(ZT_VirtualNetworkRule) * ZT_MAX_NETWORK_RULES) + (sizeof(Capability) * ZT_MAX_NETWORK_CAPABILITIES) + (sizeof(Tag) * ZT_MAX_NETWORK_TAGS) + (sizeof(CertificateOfOwnership) * ZT_MAX_CERTIFICATES_OF_OWNERSHIP))
// Dictionary capacity needed for max size network meta-data
#define ZT_NETWORKCONFIG_METADATA_DICT_CAPACITY 8192
// Fields for meta-data sent with network config requests
// Protocol version (see Packet.hpp)
@ -164,9 +159,9 @@ namespace ZeroTier {
* This is a memcpy()'able structure and is safe (in a crash sense) to modify
* without locks.
*/
struct NetworkConfig
struct NetworkConfig : TriviallyCopyable
{
NetworkConfig();
ZT_ALWAYS_INLINE NetworkConfig() { memoryZero(this); }
/**
* Write this network config to a dictionary for transport
@ -232,7 +227,7 @@ struct NetworkConfig
* @param f Flags (OR of specialist role/type flags)
* @return True if successfully masked or added
*/
bool addSpecialist(const Address &a,const uint64_t f);
bool addSpecialist(const Address &a,uint64_t f);
ZT_ALWAYS_INLINE const Capability *capability(const uint32_t id) const
{
@ -277,6 +272,13 @@ struct NetworkConfig
*/
Address issuedTo;
/**
* Hash of identity public key(s) of node to whom this is issued
*
* TODO
*/
uint8_t issuedToIdentityHash[ZT_IDENTITY_HASH_SIZE];
/**
* Flags (64-bit)
*/

View file

@ -103,7 +103,7 @@ public:
const InetAddress &fromAddr,
uint64_t requestPacketId,
const Identity &identity,
const Dictionary<ZT_NETWORKCONFIG_METADATA_DICT_CAPACITY> &metaData) = 0;
const Dictionary &metaData) = 0;
};
} // namespace ZeroTier

View file

@ -17,9 +17,9 @@
#include <cstdint>
#include <cstring>
#include <cstdlib>
#include <stdexcept>
#include <algorithm>
#include <set>
#include "Constants.hpp"
#include "InetAddress.hpp"
@ -32,6 +32,9 @@ namespace ZeroTier {
class RuntimeEnvironment;
template<unsigned int MF>
class Defragmenter;
/**
* A path across the physical network
*/
@ -39,6 +42,10 @@ class Path
{
friend class SharedPtr<Path>;
// Allow defragmenter to access fragment in flight info stored in Path for performance reasons.
template<unsigned int MF>
friend class Defragmenter;
public:
/**
* Efficient unique key for paths in a Hashtable
@ -75,8 +82,7 @@ public:
_localSocket(l),
_lastIn(0),
_lastOut(0),
_addr(r),
__refCount()
_addr(r)
{
}
@ -156,7 +162,14 @@ private:
int64_t _lastIn;
int64_t _lastOut;
InetAddress _addr;
AtomicCounter __refCount;
// These fields belong to Defragmenter but are kept in Path for performance
// as it's much faster this way than having Defragmenter maintain another
// mapping from paths to inbound message IDs.
std::set<uint64_t> _inboundFragmentedMessages;
Mutex _inboundFragmentedMessages_l;
AtomicCounter<int> __refCount;
};
} // namespace ZeroTier

View file

@ -22,7 +22,6 @@
#include "Utils.hpp"
#include "Identity.hpp"
#include "InetAddress.hpp"
#include "Packet.hpp"
#include "SharedPtr.hpp"
#include "AtomicCounter.hpp"
#include "Hashtable.hpp"
@ -83,6 +82,15 @@ public:
*/
ZT_ALWAYS_INLINE const Identity &identity() const { return _id; }
/**
* @return Copy of current locator
*/
ZT_ALWAYS_INLINE Locator locator() const
{
RWMutex::RLock l(_lock);
return _locator;
}
/**
* Log receipt of an authenticated packet
*
@ -314,7 +322,7 @@ private:
volatile int64_t _lastPrioritizedPaths;
volatile unsigned int _latency;
AtomicCounter __refCount;
AtomicCounter<int> __refCount;
RWMutex _lock; // locks _alivePathCount, _paths, _locator, and _bootstrap.

View file

@ -40,7 +40,7 @@ const uint64_t ZEROES32[4] = { 0,0,0,0 };
* @param in Input key (32 bytes)
* @param out Output buffer (32 bytes)
*/
ZT_ALWAYS_INLINE void _salsa20MangleKey(const uint8_t *const in,uint8_t *const out,const Buf< Header<> > &packet,const unsigned int packetSize)
ZT_ALWAYS_INLINE void _salsa20MangleKey(const uint8_t *const in,uint8_t *const out,const Buf< Header > &packet,const unsigned int packetSize)
{
// IV and source/destination addresses. Using the addresses divides the
// key space into two halves-- A->B and B->A (since order will change).
@ -78,7 +78,7 @@ static std::atomic<unsigned long long> _packetIdCtr(_initPacketID());
} // anonymous namespace
void armor(Buf< Header<> > &packet,const unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH],const uint8_t cipherSuite)
void _armor(Buf< Header > &packet,const unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH],const uint8_t cipherSuite)
{
packet.data.fields.flags = (packet.data.fields.flags & 0xc7U) | ((cipherSuite << 3U) & 0x38U); // FFCCCHHH
if (cipherSuite == ZT_PROTO_CIPHER_SUITE__AES_GCM) {
@ -102,7 +102,7 @@ void armor(Buf< Header<> > &packet,const unsigned int packetSize,const uint8_t k
}
}
int dearmor(Buf< Header<> > &packet,const unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH])
int _dearmor(Buf< Header > &packet,const unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH])
{
const int cipherSuite = (int)(packet.data.fields.flags & 0x38U);
if (cipherSuite == ZT_PROTO_CIPHER_SUITE__AES_GCM) {
@ -128,7 +128,7 @@ int dearmor(Buf< Header<> > &packet,const unsigned int packetSize,const uint8_t
return cipherSuite;
}
unsigned int compress(Buf< Header<> > &packet,const unsigned int packetSize)
unsigned int _compress(Buf< Header > &packet,const unsigned int packetSize)
{
uint8_t tmp[ZT_BUF_MEM_SIZE + 32];
@ -140,8 +140,7 @@ unsigned int compress(Buf< Header<> > &packet,const unsigned int packetSize)
reinterpret_cast<const char *>(packet.data.bytes + ZT_PROTO_PACKET_PAYLOAD_START),
reinterpret_cast<char *>(tmp),
(int)uncompressedLen,
sizeof(tmp) - ZT_PROTO_PACKET_PAYLOAD_START,
2);
sizeof(tmp) - ZT_PROTO_PACKET_PAYLOAD_START);
if ((compressedLen > 0)&&(compressedLen < uncompressedLen)) {
packet.data.fields.verb |= ZT_PROTO_VERB_FLAG_COMPRESSED;
memcpy(packet.data.bytes + ZT_PROTO_PACKET_PAYLOAD_START,tmp,compressedLen);
@ -151,7 +150,7 @@ unsigned int compress(Buf< Header<> > &packet,const unsigned int packetSize)
return packetSize;
}
int uncompress(Buf< Header<> > &packet,const unsigned int packetSize)
int _uncompress(Buf< Header > &packet,const unsigned int packetSize)
{
uint8_t tmp[ZT_BUF_MEM_SIZE];

View file

@ -47,8 +47,7 @@
* + Tags and Capabilities
* + inline push of CertificateOfMembership deprecated
* 9 - 1.2.0 ... 1.2.14
* 10 - 1.4.0 ... 1.6.0
* + Multipath capability and load balancing
* 10 - 1.4.0 ... 1.4.6
* 11 - 2.0.0 ... CURRENT
* + Peer-to-peer multicast replication
* + Old planet/moon stuff is DEAD!
@ -58,13 +57,21 @@
*/
#define ZT_PROTO_VERSION 11
/**
* Minimum supported protocol version
*
* As of v2 we don't "officially" support anything older than 1.2.14, but this
* is the hard cutoff before which peers will be flat out rejected.
*/
#define ZT_PROTO_VERSION_MIN 6
/**
* Packet buffer size (can be changed)
*/
#define ZT_PROTO_MAX_PACKET_LENGTH (ZT_MAX_PACKET_FRAGMENTS * ZT_DEFAULT_PHYSMTU)
/**
* Minimum viable packet length (a.k.a. header length)
* Minimum viable packet length (outer header + verb)
*/
#define ZT_PROTO_MIN_PACKET_LENGTH 28
@ -73,7 +80,6 @@
*/
#define ZT_PROTO_PACKET_ENCRYPTED_SECTION_START 27
/**
* Index at which packet payload begins (after verb)
*/
@ -106,9 +112,9 @@
#define ZT_PROTO_CIPHER_SUITE__NONE 2
/**
* AES-GCM with AES-256
* AES-GCM-NRH (AES-GCM with nonce reuse hardening) w/AES-256
*/
#define ZT_PROTO_CIPHER_SUITE__AES_GCM 3
#define ZT_PROTO_CIPHER_SUITE__AES_GCM_NRH 3
/**
* Magic number indicating a fragment
@ -116,7 +122,7 @@
#define ZT_PACKET_FRAGMENT_INDICATOR 0xff
/**
* Minimum viable fragment length
* Minimum viable length for a fragment
*/
#define ZT_PROTO_MIN_FRAGMENT_LENGTH 16
@ -136,37 +142,37 @@
#define ZT_PROTO_VERB_FLAG_COMPRESSED 0x80
/**
* Signed locator for this node
* HELLO exchange meta-data: signed locator for this node
*/
#define ZT_PROTO_HELLO_NODE_META_LOCATOR "l"
/**
* Ephemeral C25519 public key
* HELLO exchange meta-data: ephemeral C25519 public key
*/
#define ZT_PROTO_HELLO_NODE_META_EPHEMERAL_KEY_C25519 "e0"
/**
* Ephemeral NIST P-384 public key
* HELLO exchange meta-data: ephemeral NIST P-384 public key
*/
#define ZT_PROTO_HELLO_NODE_META_EPHEMERAL_KEY_P384 "e1"
/**
* Addresses of ZeroTier nodes to whom this node will relay or one entry for 0000000000 if promiscuous.
* HELLO exchange meta-data: address(es) of nodes to whom this node will relay
*/
#define ZT_PROTO_HELLO_NODE_META_WILL_RELAY_TO "r"
#define ZT_PROTO_HELLO_NODE_META_WILL_RELAY_TO "wr"
/**
* X coordinate of your node (sent in OK(HELLO))
* HELLO exchange meta-data: X coordinate of your node (sent in OK(HELLO))
*/
#define ZT_PROTO_HELLO_NODE_META_LOCATION_X "gX"
/**
* Y coordinate of your node (sent in OK(HELLO))
* HELLO exchange meta-data: Y coordinate of your node (sent in OK(HELLO))
*/
#define ZT_PROTO_HELLO_NODE_META_LOCATION_Y "gY"
/**
* Z coordinate of your node (sent in OK(HELLO))
* HELLO exchange meta-data: Z coordinate of your node (sent in OK(HELLO))
*/
#define ZT_PROTO_HELLO_NODE_META_LOCATION_Z "gZ"
@ -206,11 +212,11 @@
* first fragment it receives.
*
* Fragments are sent with the following format:
* <[8] packet ID of packet whose fragment this belongs to>
* <[8] packet ID of packet to which this fragment belongs>
* <[5] destination ZT address>
* <[1] 0xff, a reserved address, signals that this isn't a normal packet>
* <[1] 0xff here signals that this is a fragment>
* <[1] total fragments (most significant 4 bits), fragment no (LS 4 bits)>
* <[1] ZT hop count (top 5 bits unused and must be zero)>
* <[1] ZT hop count (least significant 3 bits; others are reserved)>
* <[...] fragment data>
*
* The protocol supports a maximum of 16 fragments. If a fragment is received
@ -452,38 +458,12 @@ enum Verb
* <[8] 64-bit timestamp of netconf we currently have>
*
* This message requests network configuration from a node capable of
* providing it.
*
* Responses to this are always whole configs intended for the recipient.
* For patches and other updates a NETWORK_CONFIG is sent instead.
*
* It would be valid and correct as of 1.2.0 to use NETWORK_CONFIG always,
* but OK(NETWORK_CONFIG_REQUEST) should be sent for compatibility.
* providing it. Responses can be sent as OK(NETWORK_CONFIG_REQUEST)
* or NETWORK_CONFIG messages. NETWORK_CONFIG can also be sent by
* network controllers or other nodes unsolicited.
*
* OK response payload:
* <[8] 64-bit network ID>
* <[2] 16-bit length of network configuration dictionary chunk>
* <[...] network configuration dictionary (may be incomplete)>
* [ ... end of legacy single chunk response ... ]
* <[1] 8-bit flags>
* <[8] 64-bit config update ID (should never be 0)>
* <[4] 32-bit total length of assembled dictionary>
* <[4] 32-bit index of chunk>
* [ ... end signed portion ... ]
* <[1] 8-bit chunk signature type>
* <[2] 16-bit length of chunk signature>
* <[...] chunk signature>
*
* The chunk signature signs the entire payload of the OK response.
* Currently only one signature type is supported: ed25519 (1).
*
* Each config chunk is signed to prevent memory exhaustion or
* traffic crowding DOS attacks against config fragment assembly.
*
* If the packet is from the network controller it is permitted to end
* before the config update ID or other chunking related or signature
* fields. This is to support older controllers that don't include
* these fields and may be removed in the future.
* (same as VERB_NETWORK_CONFIG payload)
*
* ERROR response payload:
* <[8] 64-bit network ID>
@ -500,19 +480,19 @@ enum Verb
* <[4] 32-bit total length of assembled dictionary>
* <[4] 32-bit index of chunk>
* [ ... end signed portion ... ]
* <[1] 8-bit chunk signature type>
* <[1] 8-bit reserved field (legacy)>
* <[2] 16-bit length of chunk signature>
* <[...] chunk signature>
*
* This is a direct push variant for network config updates. It otherwise
* carries the same payload as OK(NETWORK_CONFIG_REQUEST) and has the same
* semantics.
*
* The legacy mode missing the additional chunking fields is not supported
* here.
* Network configurations can come from network controllers or theoretically
* any other node, but each chunk must be signed by the network controller
* that generated it originally. The config update ID is arbitrary and is merely
* used by the receiver to group chunks. Chunk indexes must be sequential and
* the total delivered chunks must yield a total network config equal to the
* specified total length.
*
* Flags:
* 0x01 - Use fast propagation
* 0x01 - Use fast propagation -- rumor mill flood this chunk to other members
*
* An OK should be sent if the config is successfully received and
* accepted.
@ -688,12 +668,15 @@ enum ErrorCode
/* Tried to join network, but you're not a member */
ERROR_NETWORK_ACCESS_DENIED_ = 0x07, /* extra _ at end to avoid Windows name conflict */
/* Cannot deliver a forwarded ZeroTier packet (e.g. hops exceeded, no routes) */
/* Cannot deliver a forwarded ZeroTier packet (for any reason) */
ERROR_CANNOT_DELIVER = 0x09
};
/**
* EXT_FRAME subtypes, which are packed into three bits in the flags field.
*
* This allows the node to know whether this is a normal frame or one generated
* by a special tee or redirect type flow rule.
*/
enum ExtFrameSubtype
{
@ -711,11 +694,30 @@ enum ExtFrameSubtype
*/
enum ExtFrameFlag
{
/**
* A certifiate of membership was included (no longer used but still accepted)
*/
EXT_FRAME_FLAG_COM_ATTACHED_deprecated = 0x01,
// bits 0x02, 0x04, and 0x08 are occupied by the ExtFrameSubtype
// bits 0x02, 0x04, and 0x08 are occupied by the 3-bit ExtFrameSubtype value.
/**
* An OK(EXT_FRAME) acknowledgement was requested by the sender.
*/
EXT_FRAME_FLAG_ACK_REQUESTED = 0x10
};
/**
* NETWORK_CONFIG (or OK(NETWORK_CONFIG_REQUEST)) flags
*/
enum NetworkConfigFlag
{
/**
* Indicates that this network config chunk should be fast propagated via rumor mill flooding.
*/
NETWORK_CONFIG_FLAG_FAST_PROPAGATE = 0x01
};
/****************************************************************************/
/*
@ -727,50 +729,114 @@ enum ExtFrameFlag
* All fields larger than one byte are in big-endian byte order on the wire.
*/
ZT_PACKED_STRUCT(struct HELLO {
/**
* Normal packet header
*
* @tparam PT Packet payload type (default: uint8_t[])
*/
ZT_PACKED_STRUCT(struct Header
{
uint64_t packetId;
uint8_t destination[5];
uint8_t source[5];
uint8_t flags;
uint64_t mac;
// --- begin encrypted envelope ---
uint8_t verb;
});
/**
* Packet fragment header
*/
ZT_PACKED_STRUCT(struct FragmentHeader
{
uint64_t packetId;
uint8_t destination[5];
uint8_t fragmentIndicator; // always 0xff for fragments
uint8_t counts; // total: most significant four bits, number: least significant four bits
uint8_t hops; // top 5 bits unused and must be zero
uint8_t p[];
});
ZT_PACKED_STRUCT(struct HELLO
{
Header h;
uint8_t versionProtocol;
uint8_t versionMajor;
uint8_t versionMinor;
uint16_t versionRev;
uint64_t timestamp;
uint8_t p[];
});
ZT_PACKED_STRUCT(struct RENDEZVOUS {
ZT_PACKED_STRUCT(struct RENDEZVOUS
{
Header h;
uint8_t flags;
uint8_t peerAddress[5];
uint16_t port;
uint8_t addressLength;
uint8_t address[16];
uint8_t address[];
});
ZT_PACKED_STRUCT(struct FRAME {
ZT_PACKED_STRUCT(struct FRAME
{
Header h;
uint64_t networkId;
uint16_t etherType;
uint8_t data[];
});
ZT_PACKED_STRUCT(struct EXT_FRAME {
ZT_PACKED_STRUCT(struct EXT_FRAME
{
Header h;
uint64_t networkId;
uint8_t flags;
uint8_t destMac[6];
uint8_t sourceMac[6];
uint16_t etherType;
uint8_t data[];
uint8_t p[];
});
ZT_PACKED_STRUCT(struct MULTICAST_LIKE_Entry {
uint64_t networkId;
uint8_t mac[6];
uint32_t adi;
});
ZT_PACKED_STRUCT(struct MULTICAST_LIKE
{
ZT_PACKED_STRUCT(struct Entry
{
uint64_t networkId;
uint8_t mac[6];
uint32_t adi;
});
ZT_PACKED_STRUCT(struct MULTICAST_LIKE {
MULTICAST_LIKE_Entry groups[];
Header h;
Entry groups[];
});
namespace OK {
ZT_PACKED_STRUCT(struct HELLO {
/**
* OK response header
*
* @tparam PT OK payload type (default: uint8_t[])
*/
ZT_PACKED_STRUCT(struct Header
{
uint8_t inReVerb;
uint64_t inRePacketId;
});
ZT_PACKED_STRUCT(struct WHOIS
{
Protocol::Header h;
OK::Header oh;
});
ZT_PACKED_STRUCT(struct ECHO
{
Protocol::Header h;
OK::Header oh;
});
ZT_PACKED_STRUCT(struct HELLO
{
Protocol::Header h;
OK::Header oh;
uint64_t timestampEcho;
uint8_t versionProtocol;
uint8_t versionMajor;
@ -778,7 +844,10 @@ ZT_PACKED_STRUCT(struct HELLO {
uint16_t versionRev;
});
ZT_PACKED_STRUCT(struct EXT_FRAME {
ZT_PACKED_STRUCT(struct EXT_FRAME
{
Protocol::Header h;
OK::Header oh;
uint64_t networkId;
uint8_t flags;
uint8_t destMac[6];
@ -786,20 +855,6 @@ ZT_PACKED_STRUCT(struct EXT_FRAME {
uint16_t etherType;
});
/**
* OK response header
*
* The OK header comes after the packet header but before type-specific payloads.
*
* @tparam PT OK payload type (default: uint8_t[])
*/
template<typename PT = uint8_t[]>
ZT_PACKED_STRUCT(struct Header {
uint8_t inReVerb;
uint64_t inRePacketId;
PT p;
});
} // namespace OK
namespace ERROR {
@ -811,68 +866,58 @@ namespace ERROR {
*
* @tparam PT Error payload type (default: uint8_t[])
*/
template<typename PT = uint8_t[]>
ZT_PACKED_STRUCT(struct Header {
uint8_t inReVerb;
ZT_PACKED_STRUCT(struct Header
{
int8_t inReVerb;
uint64_t inRePacketId;
uint8_t error;
PT p;
});
ZT_PACKED_STRUCT(struct NEED_MEMBERSHIP_CERTIFICATE
{
Protocol::Header h;
ERROR::Header eh;
uint64_t networkId;
});
ZT_PACKED_STRUCT(struct UNSUPPORTED_OPERATION__NETWORK_CONFIG_REQUEST
{
Protocol::Header h;
ERROR::Header eh;
uint64_t networkId;
});
} // namespace ERROR
/**
* Normal packet header
*
* @tparam PT Packet payload type (default: uint8_t[])
*/
template<typename PT = uint8_t[]>
ZT_PACKED_STRUCT(struct Header {
uint64_t packetId;
uint8_t destination[5];
uint8_t source[5];
uint8_t flags;
uint64_t mac;
// --- begin encrypted envelope ---
uint8_t verb;
PT p;
});
/**
* Packet fragment header
*/
ZT_PACKED_STRUCT(struct FragmentHeader {
uint64_t packetId;
uint8_t destination[5];
uint8_t fragmentIndicator; // always 0xff for fragments
uint8_t counts; // total: most significant four bits, number: least significant four bits
uint8_t hops; // top 5 bits unused and must be zero
uint8_t p[];
});
/****************************************************************************/
/**
* Increment the 3-bit hops field embedded in the packet flags field
*
* @return New hop count (can be greater than allowed if there is an overflow)
*/
template<typename X>
ZT_ALWAYS_INLINE unsigned int incrementPacketHops(Buf< Header<X> > &packet)
ZT_ALWAYS_INLINE unsigned int incrementPacketHops(Header &h)
{
uint8_t f = packet.data.fields.flags;
uint8_t h = f;
f &= 0xf8U;
++h;
packet.data.fields.flags = f | (h & 0x07U);
return h;
uint8_t flags = h.flags;
uint8_t hops = flags;
flags &= 0xf8U;
++hops;
h.flags = flags | (hops & 0x07U);
return (unsigned int)hops;
}
/**
* @return 3-bit hops field embedded in packet flags field
*/
template<typename X>
ZT_ALWAYS_INLINE unsigned int packetHops(Buf< Header<X> > &packet) const { return (packet.data.fields.flags & 0x07U); }
ZT_ALWAYS_INLINE uint8_t packetHops(const Header &h) { return (h.flags & 0x07U); }
/**
* @return 3-bit cipher field embedded in packet flags field
*/
ZT_ALWAYS_INLINE uint8_t packetCipher(const Header &h) { return ((h.flags >> 3U) & 0x07U); }
void _armor(Buf< Header > &packet,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH],uint8_t cipherSuite);
int _dearmor(Buf< Header > &packet,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH]);
unsigned int _compress(Buf< Header > &packet,unsigned int packetSize);
int _uncompress(Buf< Header > &packet,unsigned int packetSize);
/**
* Armor a packet for transport
@ -882,7 +927,9 @@ ZT_ALWAYS_INLINE unsigned int packetHops(Buf< Header<X> > &packet) const { retur
* @param key 256-bit symmetric key
* @param cipherSuite Cipher suite to apply
*/
void armor(Buf< Header<> > &packet,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH],uint8_t cipherSuite);
template<typename X>
static ZT_ALWAYS_INLINE void armor(Buf< X > &packet,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH],uint8_t cipherSuite)
{ _armor(reinterpret_cast< Buf< Header > & >(packet),packetSize,key,cipherSuite); }
/**
* Dearmor a packet and check message authentication code
@ -895,7 +942,9 @@ void armor(Buf< Header<> > &packet,unsigned int packetSize,const uint8_t key[ZT_
* @param key 256-bit symmetric key
* @return Cipher suite or -1 if MAC validation failed
*/
int dearmor(Buf< Header<> > &packet,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH]);
template<typename X>
static ZT_ALWAYS_INLINE int dearmor(Buf< X > &packet,unsigned int packetSize,const uint8_t key[ZT_PEER_SECRET_KEY_LENGTH])
{ return _dearmor(reinterpret_cast< Buf < Header > & >(packet),packetSize,key); }
/**
* Compress packet payload
@ -904,7 +953,9 @@ int dearmor(Buf< Header<> > &packet,unsigned int packetSize,const uint8_t key[ZT
* @param packetSize Original packet size
* @return New packet size (returns original size of compression didn't help, in which case packet is unmodified)
*/
unsigned int compress(Buf< Header<> > &packet,unsigned int packetSize);
template<typename X>
static ZT_ALWAYS_INLINE unsigned int compress(Buf< X > &packet,unsigned int packetSize)
{ return _compress(reinterpret_cast< Buf< Header > & >(packet),packetSize); }
/**
* Uncompress packet payload (if compressed)
@ -913,7 +964,9 @@ unsigned int compress(Buf< Header<> > &packet,unsigned int packetSize);
* @param packetSize Original packet size
* @return New packet size or -1 on decompression error (returns original packet size if packet wasn't compressed)
*/
int uncompress(Buf< Header<> > &packet,unsigned int packetSize);
template<typename X>
static ZT_ALWAYS_INLINE int uncompress(Buf< X > &packet,unsigned int packetSize)
{ return _uncompress(reinterpret_cast< Buf< Header > & >(packet),packetSize); }
/**
* Get a sequential non-repeating packet ID for the next packet (thread-safe)

View file

@ -47,18 +47,7 @@ class Revocation : public Credential
public:
static ZT_ALWAYS_INLINE ZT_CredentialType credentialType() { return ZT_CREDENTIAL_TYPE_REVOCATION; }
ZT_ALWAYS_INLINE Revocation() :
_id(0),
_credentialId(0),
_networkId(0),
_threshold(0),
_flags(0),
_target(),
_signedBy(),
_type(ZT_CREDENTIAL_TYPE_NULL),
_signatureLength(0)
{
}
ZT_ALWAYS_INLINE Revocation() { memoryZero(this); }
/**
* @param i ID (arbitrary for revocations, currently random)
@ -109,7 +98,7 @@ public:
static ZT_ALWAYS_INLINE int marshalSizeMax() { return ZT_REVOCATION_MARSHAL_SIZE_MAX; }
int marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX],bool forSign = false) const;
int unmarshal(const uint8_t *restrict data,const int len);
int unmarshal(const uint8_t *restrict data,int len);
private:
uint32_t _id;

View file

@ -20,6 +20,7 @@
#include "Constants.hpp"
#include "Utils.hpp"
#include "TriviallyCopyable.hpp"
#if (defined(__amd64) || defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || defined(__AMD64) || defined(__AMD64__) || defined(_M_X64))
#include <xmmintrin.h>
@ -32,7 +33,7 @@ namespace ZeroTier {
/**
* Salsa20 stream cipher
*/
class Salsa20
class Salsa20 : public TriviallyCopyable
{
public:
ZT_ALWAYS_INLINE Salsa20() {}

View file

@ -15,6 +15,7 @@
#define ZT_SCOPEDPTR_HPP
#include "Constants.hpp"
#include "TriviallyCopyable.hpp"
namespace ZeroTier {
@ -24,7 +25,7 @@ namespace ZeroTier {
* This is used in the core to avoid requiring C++11 and because auto_ptr is weird.
*/
template<typename T>
class ScopedPtr
class ScopedPtr : public TriviallyCopyable
{
public:
explicit ZT_ALWAYS_INLINE ScopedPtr(T *const p) : _p(p) {}
@ -51,6 +52,7 @@ private:
ZT_ALWAYS_INLINE ScopedPtr() {}
ZT_ALWAYS_INLINE ScopedPtr(const ScopedPtr &p) : _p(nullptr) {}
ZT_ALWAYS_INLINE ScopedPtr &operator=(const ScopedPtr &p) { return *this; }
T *const _p;
};

View file

@ -30,7 +30,7 @@ class SharedPtr
{
public:
ZT_ALWAYS_INLINE SharedPtr() : _ptr((T *)0) {}
ZT_ALWAYS_INLINE SharedPtr(T *obj) : _ptr(obj) { ++obj->__refCount; }
explicit ZT_ALWAYS_INLINE SharedPtr(T *obj) : _ptr(obj) { ++obj->__refCount; }
ZT_ALWAYS_INLINE SharedPtr(const SharedPtr &sp) : _ptr(sp._getAndInc()) {}
ZT_ALWAYS_INLINE ~SharedPtr()
@ -81,7 +81,22 @@ public:
with._ptr = tmp;
}
ZT_ALWAYS_INLINE operator bool() const { return (_ptr != (T *)0); }
/**
* Set this value to one from another pointer and set that pointer to zero (avoids ref count changes)
*
* @param from Origin pointer; will be zeroed
*/
ZT_ALWAYS_INLINE void move(SharedPtr &from)
{
if (_ptr) {
if (--_ptr->__refCount <= 0)
delete _ptr;
}
_ptr = from._ptr;
from._ptr = nullptr;
}
ZT_ALWAYS_INLINE operator bool() const { return (_ptr != nullptr); }
ZT_ALWAYS_INLINE T &operator*() const { return *_ptr; }
ZT_ALWAYS_INLINE T *operator->() const { return _ptr; }

View file

@ -147,7 +147,7 @@ private:
Mutex lock;
};
RXQueueEntry _rxQueue[ZT_RX_QUEUE_SIZE];
AtomicCounter _rxQueuePtr;
AtomicCounter<unsigned int> _rxQueuePtr;
// Returns matching or next available RX queue entry
ZT_ALWAYS_INLINE RXQueueEntry *_findRXQueueEntry(uint64_t packetId)

View file

@ -55,14 +55,7 @@ class Tag : public Credential
public:
static ZT_ALWAYS_INLINE ZT_CredentialType credentialType() { return ZT_CREDENTIAL_TYPE_TAG; }
ZT_ALWAYS_INLINE Tag() :
_id(0),
_value(0),
_networkId(0),
_ts(0),
_signatureLength(0)
{
}
ZT_ALWAYS_INLINE Tag() { memoryZero(this); }
/**
* @param nwid Network ID

View file

@ -23,6 +23,8 @@
#define CONST_TO_BE_UINT16(x) ((uint16_t)(x))
#endif
// NOTE: packet IDs are always handled in network byte order, so no need to convert them.
namespace ZeroTier {
Trace::Trace(const RuntimeEnvironment *renv) :
@ -72,7 +74,7 @@ void Trace::_tryingNewPath(
memcpy(ev.identityHash,trying.hash(),48);
physicalAddress.forTrace(ev.physicalAddress);
triggerAddress.forTrace(ev.triggerAddress);
ev.triggeringPacketId = Utils::hton(triggeringPacketId);
ev.triggeringPacketId = triggeringPacketId;
ev.triggeringPacketVerb = triggeringPacketVerb;
ev.triggeredByAddress = Utils::hton(triggeredByAddress);
if (triggeredByIdentityHash)
@ -93,7 +95,7 @@ void Trace::_learnedNewPath(
ZT_TraceEvent_VL1_LEARNED_NEW_PATH ev;
ev.evSize = CONST_TO_BE_UINT16(sizeof(ev));
ev.evType = CONST_TO_BE_UINT16(ZT_TRACE_VL1_LEARNED_NEW_PATH);
ev.packetId = Utils::hton(packetId);
ev.packetId = packetId;
ev.address = Utils::hton(peerIdentity.address().toInt());
memcpy(ev.identityHash,peerIdentity.hash(),48);
physicalAddress.forTrace(ev.physicalAddress);
@ -115,10 +117,15 @@ void Trace::_incomingPacketDropped(
ZT_TraceEvent_VL1_INCOMING_PACKET_DROPPED ev;
ev.evSize = CONST_TO_BE_UINT16(sizeof(ev));
ev.evType = CONST_TO_BE_UINT16(ZT_TRACE_VL1_INCOMING_PACKET_DROPPED);
ev.packetId = Utils::hton(packetId);
ev.packetId = packetId;
ev.networkId = Utils::hton(networkId);
ev.address = Utils::hton(peerIdentity.address().toInt());
memcpy(ev.identityHash,peerIdentity.hash(),48);
if (peerIdentity) {
ev.address = Utils::hton(peerIdentity.address().toInt());
memcpy(ev.identityHash,peerIdentity.hash(),48);
} else {
ev.address = 0;
memset(ev.identityHash,0,48);
}
physicalAddress.forTrace(ev.physicalAddress);
ev.hops = hops;
ev.verb = verb;

View file

@ -22,7 +22,6 @@
#include "Constants.hpp"
#include "SharedPtr.hpp"
#include "Mutex.hpp"
#include "Packet.hpp"
#include "InetAddress.hpp"
#include "Address.hpp"
#include "MAC.hpp"

174
node/TriviallyCopyable.hpp Normal file
View file

@ -0,0 +1,174 @@
/*
* Copyright (c)2013-2020 ZeroTier, Inc.
*
* Use of this software is governed by the Business Source License included
* in the LICENSE.TXT file in the project's root directory.
*
* Change Date: 2024-01-01
*
* On the date above, in accordance with the Business Source License, use
* of this software will be governed by version 2.0 of the Apache License.
*/
/****/
#ifndef ZT_TRIVIALLYCOPYABLE_HPP
#define ZT_TRIVIALLYCOPYABLE_HPP
#include "Constants.hpp"
#include "Utils.hpp"
#include <cstring>
#include <cstdlib>
namespace ZeroTier {
/**
* This is a class that others can inherit from to tag themselves as safe to abuse in C-like ways with memcpy, etc.
*
* Later versions of C++ have a built-in auto-detected notion like this, but
* this is more explicit and its use will make audits for memory safety
* a lot easier.
*/
class TriviallyCopyable
{
public:
/**
* Be absolutely sure a TriviallyCopyable object is zeroed using Utils::burn()
*
* @tparam T Automatically inferred type of object
* @param obj Any TriviallyCopyable object
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryBurn(T *obj)
{
TriviallyCopyable *const tmp = obj;
Utils::burn(tmp,sizeof(T));
}
/**
* Be absolutely sure a TriviallyCopyable object is zeroed using Utils::burn()
*
* @tparam T Automatically inferred type of object
* @param obj Any TriviallyCopyable object
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryBurn(T &obj)
{
TriviallyCopyable *const tmp = &obj;
Utils::burn(tmp,sizeof(T));
}
/**
* Zero a TriviallyCopyable object
*
* @tparam T Automatically inferred type of object
* @param obj Any TriviallyCopyable object
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryZero(T *obj)
{
TriviallyCopyable *const tmp = obj;
memset(tmp,0,sizeof(T));
}
/**
* Zero a TriviallyCopyable object
*
* @tparam T Automatically inferred type of object
* @param obj Any TriviallyCopyable object
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryZero(T &obj)
{
TriviallyCopyable *const tmp = &obj;
memset(tmp,0,sizeof(T));
}
/**
* Copy any memory over a TriviallyCopyable object
*
* @tparam T Automatically inferred type of destination
* @param dest Any TriviallyCopyable object
* @param src Source memory of same size or less than sizeof(dest)
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryCopyUnsafe(T *dest,const void *src)
{
TriviallyCopyable *const tmp = dest;
memcpy(tmp,src,sizeof(T));
}
/**
* Copy any memory over a TriviallyCopyable object
*
* @tparam T Automatically inferred type of destination
* @param dest Any TriviallyCopyable object
* @param src Source memory of same size or less than sizeof(dest)
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryCopyUnsafe(T &dest,const void *src)
{
TriviallyCopyable *const tmp = &dest;
memcpy(tmp,src,sizeof(T));
}
/**
* Copy a TriviallyCopyable object
*
* @tparam T Automatically inferred type of destination
* @param dest Destination TriviallyCopyable object
* @param src Source TriviallyCopyable object
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryCopy(T *dest,const T *src)
{
TriviallyCopyable *const tmp = dest;
memcpy(tmp,src,sizeof(T));
}
/**
* Copy a TriviallyCopyable object
*
* @tparam T Automatically inferred type of destination
* @param dest Destination TriviallyCopyable object
* @param src Source TriviallyCopyable object
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryCopy(T *dest,const T &src)
{
TriviallyCopyable *const tmp = dest;
memcpy(tmp,&src,sizeof(T));
}
/**
* Copy a TriviallyCopyable object
*
* @tparam T Automatically inferred type of destination
* @param dest Destination TriviallyCopyable object
* @param src Source TriviallyCopyable object
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryCopy(T &dest,const T *src)
{
TriviallyCopyable *const tmp = &dest;
memcpy(tmp,src,sizeof(T));
}
/**
* Copy a TriviallyCopyable object
*
* @tparam T Automatically inferred type of destination
* @param dest Destination TriviallyCopyable object
* @param src Source TriviallyCopyable object
*/
template<typename T>
static ZT_ALWAYS_INLINE void memoryCopy(T &dest,const T &src)
{
TriviallyCopyable *const tmp = &dest;
memcpy(tmp,&src,sizeof(T));
}
};
} // namespace ZeroTier
#endif

View file

@ -195,24 +195,6 @@ static ZT_ALWAYS_INLINE char *stok(char *str,const char *delim,char **saveptr)
#endif
}
#if 0
static ZT_ALWAYS_INLINE int strToInt(const char *s) { return (int)strtol(s,(char **)0,10); }
static ZT_ALWAYS_INLINE unsigned long strToULong(const char *s) { return strtoul(s,(char **)0,10); }
static ZT_ALWAYS_INLINE long strToLong(const char *s) { return strtol(s,(char **)0,10); }
static ZT_ALWAYS_INLINE long long strTo64(const char *s)
{
#ifdef __WINDOWS__
return (long long)_strtoi64(s,(char **)0,10);
#else
return strtoll(s,(char **)0,10);
#endif
}
static ZT_ALWAYS_INLINE unsigned int hexStrToUInt(const char *s) { return (unsigned int)strtoul(s,(char **)0,16); }
static ZT_ALWAYS_INLINE int hexStrToInt(const char *s) { return (int)strtol(s,(char **)0,16); }
static ZT_ALWAYS_INLINE unsigned long hexStrToULong(const char *s) { return strtoul(s,(char **)0,16); }
static ZT_ALWAYS_INLINE long hexStrToLong(const char *s) { return strtol(s,(char **)0,16); }
#endif
static ZT_ALWAYS_INLINE unsigned int strToUInt(const char *s) { return (unsigned int)strtoul(s,nullptr,10); }
static ZT_ALWAYS_INLINE unsigned long long strToU64(const char *s)
@ -224,15 +206,6 @@ static ZT_ALWAYS_INLINE unsigned long long strToU64(const char *s)
#endif
}
static ZT_ALWAYS_INLINE long long hexStrTo64(const char *s)
{
#ifdef __WINDOWS__
return (long long)_strtoi64(s,(char **)0,16);
#else
return strtoll(s,nullptr,16);
#endif
}
static ZT_ALWAYS_INLINE unsigned long long hexStrToU64(const char *s)
{
#ifdef __WINDOWS__
@ -426,37 +399,6 @@ static ZT_ALWAYS_INLINE void storeBigEndian(void *const p,const I i)
#endif
}
#if 0
template<typename T>
static ZT_ALWAYS_INLINE bool isPrimitiveType() { return false; }
template<>
ZT_ALWAYS_INLINE bool isPrimitiveType<void *>() { return true; }
template<>
ZT_ALWAYS_INLINE bool isPrimitiveType<const void *>() { return true; }
template<>
ZT_ALWAYS_INLINE bool isPrimitiveType<bool>() { return true; }
template<>
ZT_ALWAYS_INLINE bool isPrimitiveType<float>() { return true; }
template<>
ZT_ALWAYS_INLINE bool isPrimitiveType<double>() { return true; }
template<>
ZT_ALWAYS_INLINE bool isPrimitiveType<int8_t>() { return true; }
template<>
ZT_ALWAYS_INLINE bool isPrimitiveType<int16_t>() { return true; }
template<>
ZT_ALWAYS_INLINE bool isPrimitiveType<int32_t>() { return true; }
template<>
ZT_ALWAYS_INLINE bool isPrimitiveType<int64_t>() { return true; }
template<>
ZT_ALWAYS_INLINE bool isPrimitiveType<uint8_t>() { return true; }
template<>
ZT_ALWAYS_INLINE bool isPrimitiveType<uint16_t>() { return true; }
template<>
ZT_ALWAYS_INLINE bool isPrimitiveType<uint32_t>() { return true; }
template<>
ZT_ALWAYS_INLINE bool isPrimitiveType<uint64_t>() { return true; }
#endif
} // namespace Utils
} // namespace ZeroTier