mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-04-19 13:36:54 +02:00
Code formatting and other boring stuff.
This commit is contained in:
parent
1003455160
commit
dc1ef0c49e
22 changed files with 503 additions and 349 deletions
|
@ -53,6 +53,14 @@ Commands:
|
|||
blacklist cidr <IP/bits> <boolean> Toggle physical path blacklisting
|
||||
blacklist if <prefix> <boolean> Toggle interface prefix blacklisting
|
||||
portmap <boolean> Toggle use of uPnP or NAT-PMP
|
||||
controller <command> [option] Local controller management commands
|
||||
networks List networks run by local controller
|
||||
new Create a new network
|
||||
set <network> [setting] [value] Show or modify network settings
|
||||
members <network> List members of a network
|
||||
member <network> [setting] [value] Show or modify member level settings
|
||||
auth <address|fingerprint> Authorize a peer
|
||||
deauth <address|fingerprint> Deauthorize a peer
|
||||
identity <command> [args] Identity management commands
|
||||
new [c25519|p384] Create identity pair (default: c25519)
|
||||
getpublic <identity> Extract only public part of identity
|
||||
|
@ -64,17 +72,8 @@ Commands:
|
|||
show <locator> [identity] Show locator information
|
||||
root [command] Root management commands
|
||||
list List root peers (same as no command)
|
||||
add <identity> <locator> Add or manually update a root
|
||||
add <url> Add or update root(s) from a URL
|
||||
add <identity> <endpoint|locator> Designate a peer as a root
|
||||
remove <address> Un-designate a peer as a root
|
||||
controller <command> [option] Local controller management commands
|
||||
networks List networks run by local controller
|
||||
new Create a new network
|
||||
set <network> [setting] [value] Show or modify network settings
|
||||
members <network> List members of a network
|
||||
member <network> [setting] [value] Show or modify member level settings
|
||||
auth <address|fingerprint> Authorize a peer
|
||||
deauth <address|fingerprint> Deauthorize a peer
|
||||
|
||||
The 'service' command does not exit until the service receives a signal.
|
||||
This is typically run from launchd (Mac), systemd or init (Linux), a Windows
|
||||
|
|
|
@ -10,7 +10,6 @@ import "C"
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
|
@ -39,20 +38,13 @@ func NewEndpointFromString(s string) (*Endpoint, error) {
|
|||
ep.cep._type = C.ZT_ENDPOINT_TYPE_NIL
|
||||
return &ep, nil
|
||||
}
|
||||
if strings.IndexRune(s, '-') > 0 || (strings.IndexRune(s, ':') < 0 && strings.IndexRune(s, '.') < 0) {
|
||||
var ep Endpoint
|
||||
cs := C.CString(s)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
if C.ZT_Endpoint_fromString(&ep.cep, cs) != 0 {
|
||||
return nil, ErrInvalidParameter
|
||||
}
|
||||
return &ep, nil
|
||||
}
|
||||
inaddr := NewInetAddressFromString(s)
|
||||
if inaddr == nil {
|
||||
var ep Endpoint
|
||||
cs := C.CString(s)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
if C.ZT_Endpoint_fromString(&ep.cep, cs) != 0 {
|
||||
return nil, ErrInvalidParameter
|
||||
}
|
||||
return NewEndpointFromInetAddress(inaddr)
|
||||
return &ep, nil
|
||||
}
|
||||
|
||||
func NewEndpointFromInetAddress(addr *InetAddress) (*Endpoint, error) {
|
||||
|
|
|
@ -2123,11 +2123,30 @@ ZT_SDK_API void ZT_Identity_delete(ZT_Identity *id);
|
|||
|
||||
/* ---------------------------------------------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* Convert an endpoint to a string
|
||||
*
|
||||
* @param ep Endpoint structure
|
||||
* @param buf Buffer to store string (recommended size: 256)
|
||||
* @param capacity Capacity of buffer
|
||||
* @return String or NULL on error
|
||||
*/
|
||||
ZT_SDK_API char *ZT_Endpoint_toString(
|
||||
const ZT_Endpoint *ep,
|
||||
char *buf,
|
||||
int capacity);
|
||||
|
||||
/**
|
||||
* Parse an endpoint as a string
|
||||
*
|
||||
* This will automatically detect IP addresses in IP/port format. If one
|
||||
* of these is specified rather than a fully specified endpoint it will be
|
||||
* parsed as an IP/UDP endpoint.
|
||||
*
|
||||
* @param ep Endpoint structure to populate
|
||||
* @param str String representation of endpoint
|
||||
* @return OK (0) or error code
|
||||
*/
|
||||
ZT_SDK_API int ZT_Endpoint_fromString(
|
||||
ZT_Endpoint *ep,
|
||||
const char *str);
|
||||
|
|
125
node/Address.hpp
125
node/Address.hpp
|
@ -31,11 +31,23 @@ namespace ZeroTier {
|
|||
class Address : public TriviallyCopyable
|
||||
{
|
||||
public:
|
||||
ZT_INLINE Address() noexcept : _a(0) {}
|
||||
ZT_INLINE Address(const uint64_t a) noexcept : _a(a) {}
|
||||
explicit ZT_INLINE Address(const uint8_t b[5]) noexcept : _a(((uint64_t)b[0] << 32U) | ((uint64_t)b[1] << 24U) | ((uint64_t)b[2] << 16U) | ((uint64_t)b[3] << 8U) | (uint64_t)b[4]) {}
|
||||
ZT_INLINE Address() noexcept:
|
||||
_a(0)
|
||||
{}
|
||||
|
||||
ZT_INLINE Address &operator=(const uint64_t a) noexcept { _a = a; return *this; }
|
||||
ZT_INLINE Address(const uint64_t a) noexcept:
|
||||
_a(a)
|
||||
{}
|
||||
|
||||
explicit ZT_INLINE Address(const uint8_t b[5]) noexcept:
|
||||
_a(((uint64_t) b[0] << 32U) | ((uint64_t) b[1] << 24U) | ((uint64_t) b[2] << 16U) | ((uint64_t) b[3] << 8U) | (uint64_t) b[4])
|
||||
{}
|
||||
|
||||
ZT_INLINE Address &operator=(const uint64_t a) noexcept
|
||||
{
|
||||
_a = a;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param bits Raw address -- 5 bytes, big-endian byte order
|
||||
|
@ -43,7 +55,7 @@ public:
|
|||
*/
|
||||
ZT_INLINE void setTo(const uint8_t b[5]) noexcept
|
||||
{
|
||||
_a = ((uint64_t)b[0] << 32U) | ((uint64_t)b[1] << 24U) | ((uint64_t)b[2] << 16U) | ((uint64_t)b[3] << 8U) | (uint64_t)b[4];
|
||||
_a = ((uint64_t) b[0] << 32U) | ((uint64_t) b[1] << 24U) | ((uint64_t) b[2] << 16U) | ((uint64_t) b[3] << 8U) | (uint64_t) b[4];
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -53,22 +65,24 @@ public:
|
|||
ZT_INLINE void copyTo(uint8_t b[5]) const noexcept
|
||||
{
|
||||
const uint64_t a = _a;
|
||||
b[0] = (uint8_t)(a >> 32U);
|
||||
b[1] = (uint8_t)(a >> 24U);
|
||||
b[2] = (uint8_t)(a >> 16U);
|
||||
b[3] = (uint8_t)(a >> 8U);
|
||||
b[4] = (uint8_t)a;
|
||||
b[0] = (uint8_t) (a >> 32U);
|
||||
b[1] = (uint8_t) (a >> 24U);
|
||||
b[2] = (uint8_t) (a >> 16U);
|
||||
b[3] = (uint8_t) (a >> 8U);
|
||||
b[4] = (uint8_t) a;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Integer containing address (0 to 2^40)
|
||||
*/
|
||||
ZT_INLINE uint64_t toInt() const noexcept { return _a; }
|
||||
ZT_INLINE uint64_t toInt() const noexcept
|
||||
{ return _a; }
|
||||
|
||||
/**
|
||||
* Set address to zero/NIL
|
||||
*/
|
||||
ZT_INLINE void zero() noexcept { _a = 0; }
|
||||
ZT_INLINE void zero() noexcept
|
||||
{ _a = 0; }
|
||||
|
||||
/**
|
||||
* @param s String with at least 11 characters of space available (10 + terminating NULL)
|
||||
|
@ -78,20 +92,26 @@ public:
|
|||
{
|
||||
const uint64_t a = _a;
|
||||
const unsigned int m = 0xf;
|
||||
s[0] = Utils::HEXCHARS[(unsigned int)(a >> 36U) & m];
|
||||
s[1] = Utils::HEXCHARS[(unsigned int)(a >> 32U) & m];
|
||||
s[2] = Utils::HEXCHARS[(unsigned int)(a >> 28U) & m];
|
||||
s[3] = Utils::HEXCHARS[(unsigned int)(a >> 24U) & m];
|
||||
s[4] = Utils::HEXCHARS[(unsigned int)(a >> 20U) & m];
|
||||
s[5] = Utils::HEXCHARS[(unsigned int)(a >> 16U) & m];
|
||||
s[6] = Utils::HEXCHARS[(unsigned int)(a >> 12U) & m];
|
||||
s[7] = Utils::HEXCHARS[(unsigned int)(a >> 8U) & m];
|
||||
s[8] = Utils::HEXCHARS[(unsigned int)(a >> 4U) & m];
|
||||
s[9] = Utils::HEXCHARS[(unsigned int)a & m];
|
||||
s[0] = Utils::HEXCHARS[(unsigned int) (a >> 36U) & m];
|
||||
s[1] = Utils::HEXCHARS[(unsigned int) (a >> 32U) & m];
|
||||
s[2] = Utils::HEXCHARS[(unsigned int) (a >> 28U) & m];
|
||||
s[3] = Utils::HEXCHARS[(unsigned int) (a >> 24U) & m];
|
||||
s[4] = Utils::HEXCHARS[(unsigned int) (a >> 20U) & m];
|
||||
s[5] = Utils::HEXCHARS[(unsigned int) (a >> 16U) & m];
|
||||
s[6] = Utils::HEXCHARS[(unsigned int) (a >> 12U) & m];
|
||||
s[7] = Utils::HEXCHARS[(unsigned int) (a >> 8U) & m];
|
||||
s[8] = Utils::HEXCHARS[(unsigned int) (a >> 4U) & m];
|
||||
s[9] = Utils::HEXCHARS[(unsigned int) a & m];
|
||||
s[10] = 0;
|
||||
return s;
|
||||
}
|
||||
ZT_INLINE String toString() const { char s[ZT_ADDRESS_STRING_SIZE_MAX]; toString(s); return String(s); }
|
||||
|
||||
ZT_INLINE String toString() const
|
||||
{
|
||||
char s[ZT_ADDRESS_STRING_SIZE_MAX];
|
||||
toString(s);
|
||||
return String(s);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if this address is reserved
|
||||
|
@ -102,26 +122,53 @@ public:
|
|||
*
|
||||
* @return True if address is reserved and may not be used
|
||||
*/
|
||||
ZT_INLINE bool isReserved() const noexcept { return ((!_a) || ((_a >> 32U) == ZT_ADDRESS_RESERVED_PREFIX)); }
|
||||
ZT_INLINE bool isReserved() const noexcept
|
||||
{ return ((!_a) || ((_a >> 32U) == ZT_ADDRESS_RESERVED_PREFIX)); }
|
||||
|
||||
ZT_INLINE unsigned long hashCode() const noexcept { return (unsigned long)_a; }
|
||||
ZT_INLINE unsigned long hashCode() const noexcept
|
||||
{ return (unsigned long) _a; }
|
||||
|
||||
ZT_INLINE operator bool() const noexcept { return (_a != 0); }
|
||||
ZT_INLINE operator uint64_t() const noexcept { return _a; }
|
||||
ZT_INLINE operator bool() const noexcept
|
||||
{ return (_a != 0); }
|
||||
|
||||
ZT_INLINE bool operator==(const Address &a) const noexcept { return _a == a._a; }
|
||||
ZT_INLINE bool operator!=(const Address &a) const noexcept { return _a != a._a; }
|
||||
ZT_INLINE bool operator>(const Address &a) const noexcept { return _a > a._a; }
|
||||
ZT_INLINE bool operator<(const Address &a) const noexcept { return _a < a._a; }
|
||||
ZT_INLINE bool operator>=(const Address &a) const noexcept { return _a >= a._a; }
|
||||
ZT_INLINE bool operator<=(const Address &a) const noexcept { return _a <= a._a; }
|
||||
ZT_INLINE operator uint64_t() const noexcept
|
||||
{ return _a; }
|
||||
|
||||
ZT_INLINE bool operator==(const uint64_t a) const noexcept { return _a == a; }
|
||||
ZT_INLINE bool operator!=(const uint64_t a) const noexcept { return _a != a; }
|
||||
ZT_INLINE bool operator>(const uint64_t a) const noexcept { return _a > a; }
|
||||
ZT_INLINE bool operator<(const uint64_t a) const noexcept { return _a < a; }
|
||||
ZT_INLINE bool operator>=(const uint64_t a) const noexcept { return _a >= a; }
|
||||
ZT_INLINE bool operator<=(const uint64_t a) const noexcept { return _a <= a; }
|
||||
ZT_INLINE bool operator==(const Address &a) const noexcept
|
||||
{ return _a == a._a; }
|
||||
|
||||
ZT_INLINE bool operator!=(const Address &a) const noexcept
|
||||
{ return _a != a._a; }
|
||||
|
||||
ZT_INLINE bool operator>(const Address &a) const noexcept
|
||||
{ return _a > a._a; }
|
||||
|
||||
ZT_INLINE bool operator<(const Address &a) const noexcept
|
||||
{ return _a < a._a; }
|
||||
|
||||
ZT_INLINE bool operator>=(const Address &a) const noexcept
|
||||
{ return _a >= a._a; }
|
||||
|
||||
ZT_INLINE bool operator<=(const Address &a) const noexcept
|
||||
{ return _a <= a._a; }
|
||||
|
||||
ZT_INLINE bool operator==(const uint64_t a) const noexcept
|
||||
{ return _a == a; }
|
||||
|
||||
ZT_INLINE bool operator!=(const uint64_t a) const noexcept
|
||||
{ return _a != a; }
|
||||
|
||||
ZT_INLINE bool operator>(const uint64_t a) const noexcept
|
||||
{ return _a > a; }
|
||||
|
||||
ZT_INLINE bool operator<(const uint64_t a) const noexcept
|
||||
{ return _a < a; }
|
||||
|
||||
ZT_INLINE bool operator>=(const uint64_t a) const noexcept
|
||||
{ return _a >= a; }
|
||||
|
||||
ZT_INLINE bool operator<=(const uint64_t a) const noexcept
|
||||
{ return _a <= a; }
|
||||
|
||||
private:
|
||||
uint64_t _a;
|
||||
|
|
16
node/Buf.cpp
16
node/Buf.cpp
|
@ -37,18 +37,18 @@ void *Buf::operator new(std::size_t sz)
|
|||
|
||||
Buf *b;
|
||||
if (bb) {
|
||||
s_pool.store(((Buf *)bb)->__nextInPool);
|
||||
b = (Buf *)bb;
|
||||
s_pool.store(((Buf *) bb)->__nextInPool);
|
||||
b = (Buf *) bb;
|
||||
} else {
|
||||
s_pool.store(0);
|
||||
b = (Buf *)malloc(sz);
|
||||
b = (Buf *) malloc(sz);
|
||||
if (!b)
|
||||
throw std::bad_alloc();
|
||||
++s_allocated;
|
||||
}
|
||||
|
||||
b->__refCount.store(0);
|
||||
return (void *)b;
|
||||
return (void *) b;
|
||||
}
|
||||
|
||||
void Buf::operator delete(void *ptr)
|
||||
|
@ -66,8 +66,8 @@ void Buf::operator delete(void *ptr)
|
|||
sched_yield();
|
||||
}
|
||||
|
||||
((Buf *)ptr)->__nextInPool.store(bb);
|
||||
s_pool.store((uintptr_t)ptr);
|
||||
((Buf *) ptr)->__nextInPool.store(bb);
|
||||
s_pool.store((uintptr_t) ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -84,9 +84,9 @@ void Buf::freePool() noexcept
|
|||
s_pool.store(0);
|
||||
|
||||
while (bb != 0) {
|
||||
const uintptr_t next = ((Buf *)bb)->__nextInPool;
|
||||
const uintptr_t next = ((Buf *) bb)->__nextInPool;
|
||||
--s_allocated;
|
||||
free((void *)bb);
|
||||
free((void *) bb);
|
||||
bb = next;
|
||||
}
|
||||
}
|
||||
|
|
175
node/Buf.hpp
175
node/Buf.hpp
|
@ -84,6 +84,7 @@ class Buf
|
|||
public:
|
||||
// New and delete operators that allocate Buf instances from a shared lock-free memory pool.
|
||||
static void *operator new(std::size_t sz);
|
||||
|
||||
static void operator delete(void *ptr);
|
||||
|
||||
/**
|
||||
|
@ -114,12 +115,24 @@ public:
|
|||
*/
|
||||
struct Slice : TriviallyCopyable
|
||||
{
|
||||
ZT_INLINE Slice(const SharedPtr<Buf> &b_,const unsigned int s_,const unsigned int e_) noexcept : b(b_),s(s_),e(e_) {}
|
||||
ZT_INLINE Slice() noexcept : b(),s(0),e(0) {}
|
||||
ZT_INLINE Slice(const SharedPtr<Buf> &b_, const unsigned int s_, const unsigned int e_) noexcept: b(b_), s(s_), e(e_)
|
||||
{}
|
||||
|
||||
ZT_INLINE operator bool() const noexcept { return (b); } // NOLINT(google-explicit-constructor,hicpp-explicit-conversions)
|
||||
ZT_INLINE unsigned int size() const noexcept { return (e - s); }
|
||||
ZT_INLINE void zero() noexcept { b.zero(); s = 0; e = 0; }
|
||||
ZT_INLINE Slice() noexcept: b(), s(0), e(0)
|
||||
{}
|
||||
|
||||
ZT_INLINE operator bool() const noexcept
|
||||
{ return (b); }
|
||||
|
||||
ZT_INLINE unsigned int size() const noexcept
|
||||
{ return (e - s); }
|
||||
|
||||
ZT_INLINE void zero() noexcept
|
||||
{
|
||||
b.zero();
|
||||
s = 0;
|
||||
e = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Buffer holding slice data
|
||||
|
@ -140,15 +153,16 @@ public:
|
|||
/**
|
||||
* A vector of slices making up a packet that might span more than one buffer.
|
||||
*/
|
||||
class PacketVector : public ZeroTier::FCV<Slice,ZT_MAX_PACKET_FRAGMENTS>
|
||||
class PacketVector : public ZeroTier::FCV<Slice, ZT_MAX_PACKET_FRAGMENTS>
|
||||
{
|
||||
public:
|
||||
ZT_INLINE PacketVector() : ZeroTier::FCV<Slice,ZT_MAX_PACKET_FRAGMENTS>() {}
|
||||
ZT_INLINE PacketVector() : ZeroTier::FCV<Slice, ZT_MAX_PACKET_FRAGMENTS>()
|
||||
{}
|
||||
|
||||
ZT_INLINE unsigned int totalSize() const noexcept
|
||||
{
|
||||
unsigned int size = 0;
|
||||
for(PacketVector::const_iterator s(begin());s!=end();++s)
|
||||
for (PacketVector::const_iterator s(begin());s != end();++s)
|
||||
size += s->e - s->s;
|
||||
return size;
|
||||
}
|
||||
|
@ -162,17 +176,17 @@ public:
|
|||
ZT_INLINE int mergeCopy(Buf &b) const noexcept
|
||||
{
|
||||
unsigned int size = 0;
|
||||
for(PacketVector::const_iterator s(begin());s!=end();++s) {
|
||||
for (PacketVector::const_iterator s(begin());s != end();++s) {
|
||||
const unsigned int start = s->s;
|
||||
const unsigned int rem = s->e - start;
|
||||
if (likely((size + rem) <= ZT_BUF_MEM_SIZE)) {
|
||||
Utils::copy(b.unsafeData + size,s->b->unsafeData + start,rem);
|
||||
Utils::copy(b.unsafeData + size, s->b->unsafeData + start, rem);
|
||||
size += rem;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return (int)size;
|
||||
return (int) size;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -187,10 +201,10 @@ public:
|
|||
* @return Size of data in destination or -1 on error
|
||||
*/
|
||||
template<typename F>
|
||||
ZT_INLINE int mergeMap(Buf &b,const unsigned int simpleCopyBefore,F copyFunction) const noexcept
|
||||
ZT_INLINE int mergeMap(Buf &b, const unsigned int simpleCopyBefore, F copyFunction) const noexcept
|
||||
{
|
||||
unsigned int size = 0;
|
||||
for(PacketVector::const_iterator s(begin());s!=end();++s) {
|
||||
for (PacketVector::const_iterator s(begin());s != end();++s) {
|
||||
unsigned int start = s->s;
|
||||
unsigned int rem = s->e - start;
|
||||
if (likely((size + rem) <= ZT_BUF_MEM_SIZE)) {
|
||||
|
@ -198,49 +212,49 @@ public:
|
|||
unsigned int sc = simpleCopyBefore - size;
|
||||
if (unlikely(sc > rem))
|
||||
sc = rem;
|
||||
Utils::copy(b.unsafeData + size,s->b->unsafeData + start,sc);
|
||||
Utils::copy(b.unsafeData + size, s->b->unsafeData + start, sc);
|
||||
start += sc;
|
||||
rem -= sc;
|
||||
}
|
||||
|
||||
if (likely(rem > 0)) {
|
||||
copyFunction(b.unsafeData + size,s->b->unsafeData + start,rem);
|
||||
copyFunction(b.unsafeData + size, s->b->unsafeData + start, rem);
|
||||
size += rem;
|
||||
}
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return (int)size;
|
||||
return (int) size;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a new uninitialized buffer with undefined contents (use clear() to zero if needed)
|
||||
*/
|
||||
ZT_INLINE Buf() noexcept : __nextInPool(0),__refCount(0) {} // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
|
||||
ZT_INLINE Buf() noexcept: __nextInPool(0), __refCount(0)
|
||||
{}
|
||||
|
||||
/**
|
||||
* Create a new buffer and copy data into it
|
||||
*/
|
||||
ZT_INLINE Buf(const void *const data,const unsigned int len) noexcept : // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
|
||||
__nextInPool(0),
|
||||
ZT_INLINE Buf(const void *const data, const unsigned int len) noexcept:
|
||||
__refCount(0)
|
||||
{
|
||||
Utils::copy(unsafeData,data,len);
|
||||
Utils::copy(unsafeData, data, len);
|
||||
}
|
||||
|
||||
ZT_INLINE Buf(const Buf &b2) noexcept : // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
|
||||
ZT_INLINE Buf(const Buf &b2) noexcept:
|
||||
__nextInPool(0),
|
||||
__refCount(0)
|
||||
{
|
||||
Utils::copy<ZT_BUF_MEM_SIZE>(unsafeData,b2.unsafeData);
|
||||
Utils::copy<ZT_BUF_MEM_SIZE>(unsafeData, b2.unsafeData);
|
||||
}
|
||||
|
||||
ZT_INLINE Buf &operator=(const Buf &b2) noexcept
|
||||
{
|
||||
if (this != &b2)
|
||||
Utils::copy<ZT_BUF_MEM_SIZE>(unsafeData,b2.unsafeData);
|
||||
Utils::copy<ZT_BUF_MEM_SIZE>(unsafeData, b2.unsafeData);
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
@ -253,7 +267,8 @@ public:
|
|||
* @param ii Iterator to check
|
||||
* @return True if iterator has read past the size of the buffer
|
||||
*/
|
||||
static ZT_INLINE bool writeOverflow(const int &ii) noexcept { return ((ii - ZT_BUF_MEM_SIZE) > 0); }
|
||||
static ZT_INLINE bool writeOverflow(const int &ii) noexcept
|
||||
{ return ((ii - ZT_BUF_MEM_SIZE) > 0); }
|
||||
|
||||
/**
|
||||
* Check for overflow beyond the size of the data that should be in the buffer
|
||||
|
@ -265,7 +280,8 @@ public:
|
|||
* @param size Size of data that should be in buffer
|
||||
* @return True if iterator has read past the size of the data
|
||||
*/
|
||||
static ZT_INLINE bool readOverflow(const int &ii,const unsigned int size) noexcept { return ((ii - (int)size) > 0); }
|
||||
static ZT_INLINE bool readOverflow(const int &ii, const unsigned int size) noexcept
|
||||
{ return ((ii - (int) size) > 0); }
|
||||
|
||||
/**
|
||||
* Set all memory to zero
|
||||
|
@ -284,7 +300,7 @@ public:
|
|||
ZT_INLINE uint8_t rI8(int &ii) const noexcept
|
||||
{
|
||||
const int s = ii++;
|
||||
return unsafeData[(unsigned int)s & ZT_BUF_MEM_MASK];
|
||||
return unsafeData[(unsigned int) s & ZT_BUF_MEM_MASK];
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -295,7 +311,7 @@ public:
|
|||
*/
|
||||
ZT_INLINE uint16_t rI16(int &ii) const noexcept
|
||||
{
|
||||
const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
|
||||
const unsigned int s = (unsigned int) ii & ZT_BUF_MEM_MASK;
|
||||
ii += 2;
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
return (
|
||||
|
@ -314,7 +330,7 @@ public:
|
|||
*/
|
||||
ZT_INLINE uint32_t rI32(int &ii) const noexcept
|
||||
{
|
||||
const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
|
||||
const unsigned int s = (unsigned int) ii & ZT_BUF_MEM_MASK;
|
||||
ii += 4;
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
return (
|
||||
|
@ -335,7 +351,7 @@ public:
|
|||
*/
|
||||
ZT_INLINE uint64_t rI64(int &ii) const noexcept
|
||||
{
|
||||
const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
|
||||
const unsigned int s = (unsigned int) ii & ZT_BUF_MEM_MASK;
|
||||
ii += 8;
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
return (
|
||||
|
@ -368,10 +384,10 @@ public:
|
|||
* @return Bytes read or a negative value on unmarshal error (passed from object) or overflow
|
||||
*/
|
||||
template<typename T>
|
||||
ZT_INLINE int rO(int &ii,T &obj) const noexcept
|
||||
ZT_INLINE int rO(int &ii, T &obj) const noexcept
|
||||
{
|
||||
if (likely(ii < ZT_BUF_MEM_SIZE)) {
|
||||
int ms = obj.unmarshal(unsafeData + ii,ZT_BUF_MEM_SIZE - ii);
|
||||
int ms = obj.unmarshal(unsafeData + ii, ZT_BUF_MEM_SIZE - ii);
|
||||
if (ms > 0)
|
||||
ii += ms;
|
||||
return ms;
|
||||
|
@ -390,16 +406,16 @@ public:
|
|||
* @param bufSize Capacity of buffer in bytes
|
||||
* @return Pointer to buf or NULL on overflow or error
|
||||
*/
|
||||
ZT_INLINE char *rS(int &ii,char *const buf,const unsigned int bufSize) const noexcept
|
||||
ZT_INLINE char *rS(int &ii, char *const buf, const unsigned int bufSize) const noexcept
|
||||
{
|
||||
const char *const s = (const char *)(unsafeData + ii);
|
||||
const char *const s = (const char *) (unsafeData + ii);
|
||||
const int sii = ii;
|
||||
while (ii < ZT_BUF_MEM_SIZE) {
|
||||
if (unsafeData[ii++] == 0) {
|
||||
const int l = ii - sii;
|
||||
if (unlikely((unsigned int)l > bufSize))
|
||||
if (unlikely((unsigned int) l > bufSize))
|
||||
return nullptr;
|
||||
Utils::copy(buf,s,l);
|
||||
Utils::copy(buf, s, l);
|
||||
return buf;
|
||||
}
|
||||
}
|
||||
|
@ -421,7 +437,7 @@ public:
|
|||
*/
|
||||
ZT_INLINE const char *rSnc(int &ii) const noexcept
|
||||
{
|
||||
const char *const s = (const char *)(unsafeData + ii);
|
||||
const char *const s = (const char *) (unsafeData + ii);
|
||||
while (ii < ZT_BUF_MEM_SIZE) {
|
||||
if (unsafeData[ii++] == 0)
|
||||
return s;
|
||||
|
@ -440,10 +456,10 @@ public:
|
|||
* @param len Length of buffer
|
||||
* @return Pointer to data or NULL on overflow or error
|
||||
*/
|
||||
ZT_INLINE uint8_t *rB(int &ii,void *const bytes,const unsigned int len) const noexcept
|
||||
ZT_INLINE uint8_t *rB(int &ii, void *const bytes, const unsigned int len) const noexcept
|
||||
{
|
||||
if (likely(((ii += (int)len) <= ZT_BUF_MEM_SIZE))) {
|
||||
Utils::copy(bytes,unsafeData + ii,len);
|
||||
if (likely(((ii += (int) len) <= ZT_BUF_MEM_SIZE))) {
|
||||
Utils::copy(bytes, unsafeData + ii, len);
|
||||
return reinterpret_cast<uint8_t *>(bytes);
|
||||
}
|
||||
return nullptr;
|
||||
|
@ -462,10 +478,10 @@ public:
|
|||
* @param len Length of data field to obtain a pointer to
|
||||
* @return Pointer to field or NULL on overflow
|
||||
*/
|
||||
ZT_INLINE const uint8_t *rBnc(int &ii,unsigned int len) const noexcept
|
||||
ZT_INLINE const uint8_t *rBnc(int &ii, unsigned int len) const noexcept
|
||||
{
|
||||
const uint8_t *const b = unsafeData + ii;
|
||||
return ((ii += (int)len) <= ZT_BUF_MEM_SIZE) ? b : nullptr;
|
||||
return ((ii += (int) len) <= ZT_BUF_MEM_SIZE) ? b : nullptr;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -477,7 +493,7 @@ public:
|
|||
template<unsigned int I>
|
||||
ZT_INLINE uint8_t lI8() const noexcept
|
||||
{
|
||||
static_assert(I < ZT_BUF_MEM_SIZE,"overflow");
|
||||
static_assert(I < ZT_BUF_MEM_SIZE, "overflow");
|
||||
return unsafeData[I];
|
||||
}
|
||||
|
||||
|
@ -490,7 +506,7 @@ public:
|
|||
template<unsigned int I>
|
||||
ZT_INLINE uint8_t lI16() const noexcept
|
||||
{
|
||||
static_assert((I + 1) < ZT_BUF_MEM_SIZE,"overflow");
|
||||
static_assert((I + 1) < ZT_BUF_MEM_SIZE, "overflow");
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
return (
|
||||
((uint16_t)unsafeData[I] << 8U) |
|
||||
|
@ -509,7 +525,7 @@ public:
|
|||
template<unsigned int I>
|
||||
ZT_INLINE uint8_t lI32() const noexcept
|
||||
{
|
||||
static_assert((I + 3) < ZT_BUF_MEM_SIZE,"overflow");
|
||||
static_assert((I + 3) < ZT_BUF_MEM_SIZE, "overflow");
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
return (
|
||||
((uint32_t)unsafeData[I] << 24U) |
|
||||
|
@ -530,7 +546,7 @@ public:
|
|||
template<unsigned int I>
|
||||
ZT_INLINE uint8_t lI64() const noexcept
|
||||
{
|
||||
static_assert((I + 7) < ZT_BUF_MEM_SIZE,"overflow");
|
||||
static_assert((I + 7) < ZT_BUF_MEM_SIZE, "overflow");
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
return (
|
||||
((uint64_t)unsafeData[I] << 56U) |
|
||||
|
@ -555,7 +571,7 @@ public:
|
|||
*/
|
||||
ZT_INLINE uint8_t lI8(const int ii) const noexcept
|
||||
{
|
||||
return unsafeData[(unsigned int)ii & ZT_BUF_MEM_MASK];
|
||||
return unsafeData[(unsigned int) ii & ZT_BUF_MEM_MASK];
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -567,7 +583,7 @@ public:
|
|||
*/
|
||||
ZT_INLINE uint16_t lI16(const int ii) const noexcept
|
||||
{
|
||||
const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
|
||||
const unsigned int s = (unsigned int) ii & ZT_BUF_MEM_MASK;
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
return (
|
||||
((uint16_t)unsafeData[s] << 8U) |
|
||||
|
@ -586,7 +602,7 @@ public:
|
|||
*/
|
||||
ZT_INLINE uint32_t lI32(const int ii) const noexcept
|
||||
{
|
||||
const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
|
||||
const unsigned int s = (unsigned int) ii & ZT_BUF_MEM_MASK;
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
return (
|
||||
((uint32_t)unsafeData[s] << 24U) |
|
||||
|
@ -607,7 +623,7 @@ public:
|
|||
*/
|
||||
ZT_INLINE uint8_t lI64(const int ii) const noexcept
|
||||
{
|
||||
const unsigned int s = (unsigned int)ii & ZT_BUF_MEM_MASK;
|
||||
const unsigned int s = (unsigned int) ii & ZT_BUF_MEM_MASK;
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
return (
|
||||
((uint64_t)unsafeData[s] << 56U) |
|
||||
|
@ -629,10 +645,10 @@ public:
|
|||
* @param ii Index value-result parameter (incremented by 1)
|
||||
* @param n Byte
|
||||
*/
|
||||
ZT_INLINE void wI8(int &ii,const uint8_t n) noexcept
|
||||
ZT_INLINE void wI8(int &ii, const uint8_t n) noexcept
|
||||
{
|
||||
const int s = ii++;
|
||||
unsafeData[(unsigned int)s & ZT_BUF_MEM_MASK] = n;
|
||||
unsafeData[(unsigned int) s & ZT_BUF_MEM_MASK] = n;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -641,9 +657,9 @@ public:
|
|||
* @param ii Index value-result parameter (incremented by 2)
|
||||
* @param n Integer
|
||||
*/
|
||||
ZT_INLINE void wI16(int &ii,const uint16_t n) noexcept
|
||||
ZT_INLINE void wI16(int &ii, const uint16_t n) noexcept
|
||||
{
|
||||
const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
|
||||
const unsigned int s = ((unsigned int) ii) & ZT_BUF_MEM_MASK;
|
||||
ii += 2;
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
unsafeData[s] = (uint8_t)(n >> 8U);
|
||||
|
@ -659,9 +675,9 @@ public:
|
|||
* @param ii Index value-result parameter (incremented by 4)
|
||||
* @param n Integer
|
||||
*/
|
||||
ZT_INLINE void wI32(int &ii,const uint32_t n) noexcept
|
||||
ZT_INLINE void wI32(int &ii, const uint32_t n) noexcept
|
||||
{
|
||||
const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
|
||||
const unsigned int s = ((unsigned int) ii) & ZT_BUF_MEM_MASK;
|
||||
ii += 4;
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
unsafeData[s] = (uint8_t)(n >> 24U);
|
||||
|
@ -679,9 +695,9 @@ public:
|
|||
* @param ii Index value-result parameter (incremented by 8)
|
||||
* @param n Integer
|
||||
*/
|
||||
ZT_INLINE void wI64(int &ii,const uint64_t n) noexcept
|
||||
ZT_INLINE void wI64(int &ii, const uint64_t n) noexcept
|
||||
{
|
||||
const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
|
||||
const unsigned int s = ((unsigned int) ii) & ZT_BUF_MEM_MASK;
|
||||
ii += 8;
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
unsafeData[s] = (uint8_t)(n >> 56U);
|
||||
|
@ -705,7 +721,7 @@ public:
|
|||
* @param t Object to write
|
||||
*/
|
||||
template<typename T>
|
||||
ZT_INLINE void wO(int &ii,T &t) noexcept
|
||||
ZT_INLINE void wO(int &ii, T &t) noexcept
|
||||
{
|
||||
const int s = ii;
|
||||
if (likely((s + T::marshalSizeMax()) <= ZT_BUF_MEM_SIZE)) {
|
||||
|
@ -723,16 +739,16 @@ public:
|
|||
* @param ii Index value-result parameter (incremented by length of string)
|
||||
* @param s String to write (writes an empty string if this is NULL)
|
||||
*/
|
||||
ZT_INLINE void wS(int &ii,const char *s) noexcept
|
||||
ZT_INLINE void wS(int &ii, const char *s) noexcept
|
||||
{
|
||||
if (s) {
|
||||
char c;
|
||||
do {
|
||||
c = *(s++);
|
||||
wI8(ii,(uint8_t)c);
|
||||
wI8(ii, (uint8_t) c);
|
||||
} while (c);
|
||||
} else {
|
||||
wI8(ii,0);
|
||||
wI8(ii, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -743,11 +759,11 @@ public:
|
|||
* @param bytes Bytes to write
|
||||
* @param len Size of data in bytes
|
||||
*/
|
||||
ZT_INLINE void wB(int &ii,const void *const bytes,const unsigned int len) noexcept
|
||||
ZT_INLINE void wB(int &ii, const void *const bytes, const unsigned int len) noexcept
|
||||
{
|
||||
const int s = ii;
|
||||
if (likely((ii += (int)len) <= ZT_BUF_MEM_SIZE))
|
||||
Utils::copy(unsafeData + s,bytes,len);
|
||||
if (likely((ii += (int) len) <= ZT_BUF_MEM_SIZE))
|
||||
Utils::copy(unsafeData + s, bytes, len);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -756,11 +772,11 @@ public:
|
|||
* @param ii Index value-result parameter (incremented by len)
|
||||
* @param len Number of zero bytes to write
|
||||
*/
|
||||
ZT_INLINE void wZ(int &ii,const unsigned int len) noexcept
|
||||
ZT_INLINE void wZ(int &ii, const unsigned int len) noexcept
|
||||
{
|
||||
const int s = ii;
|
||||
if (likely((ii += (int)len) <= ZT_BUF_MEM_SIZE))
|
||||
Utils::zero(unsafeData + s,len);
|
||||
if (likely((ii += (int) len) <= ZT_BUF_MEM_SIZE))
|
||||
Utils::zero(unsafeData + s, len);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -769,27 +785,27 @@ public:
|
|||
* @param ii Index value-result parameter (incremented by len)
|
||||
* @param len Number of random bytes to write
|
||||
*/
|
||||
ZT_INLINE void wR(int &ii,const unsigned int len) noexcept
|
||||
ZT_INLINE void wR(int &ii, const unsigned int len) noexcept
|
||||
{
|
||||
const int s = ii;
|
||||
if (likely((ii += (int)len) <= ZT_BUF_MEM_SIZE))
|
||||
Utils::getSecureRandom(unsafeData + s,len);
|
||||
if (likely((ii += (int) len) <= ZT_BUF_MEM_SIZE))
|
||||
Utils::getSecureRandom(unsafeData + s, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* Store a byte without advancing the index
|
||||
*/
|
||||
ZT_INLINE void sI8(const int ii,const uint8_t n) noexcept
|
||||
ZT_INLINE void sI8(const int ii, const uint8_t n) noexcept
|
||||
{
|
||||
unsafeData[(unsigned int)ii & ZT_BUF_MEM_MASK] = n;
|
||||
unsafeData[(unsigned int) ii & ZT_BUF_MEM_MASK] = n;
|
||||
}
|
||||
|
||||
/**
|
||||
* Store an integer without advancing the index
|
||||
*/
|
||||
ZT_INLINE void sI16(const int ii,const uint16_t n) noexcept
|
||||
ZT_INLINE void sI16(const int ii, const uint16_t n) noexcept
|
||||
{
|
||||
const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
|
||||
const unsigned int s = ((unsigned int) ii) & ZT_BUF_MEM_MASK;
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
unsafeData[s] = (uint8_t)(n >> 8U);
|
||||
unsafeData[s + 1] = (uint8_t)n;
|
||||
|
@ -801,9 +817,9 @@ public:
|
|||
/**
|
||||
* Store an integer without advancing the index
|
||||
*/
|
||||
ZT_INLINE void sI32(const int ii,const uint32_t n) noexcept
|
||||
ZT_INLINE void sI32(const int ii, const uint32_t n) noexcept
|
||||
{
|
||||
const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
|
||||
const unsigned int s = ((unsigned int) ii) & ZT_BUF_MEM_MASK;
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
unsafeData[s] = (uint8_t)(n >> 24U);
|
||||
unsafeData[s + 1] = (uint8_t)(n >> 16U);
|
||||
|
@ -817,9 +833,9 @@ public:
|
|||
/**
|
||||
* Store an integer without advancing the index
|
||||
*/
|
||||
ZT_INLINE void sI64(const int ii,const uint64_t n) noexcept
|
||||
ZT_INLINE void sI64(const int ii, const uint64_t n) noexcept
|
||||
{
|
||||
const unsigned int s = ((unsigned int)ii) & ZT_BUF_MEM_MASK;
|
||||
const unsigned int s = ((unsigned int) ii) & ZT_BUF_MEM_MASK;
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
unsafeData[s] = (uint8_t)(n >> 56U);
|
||||
unsafeData[s + 1] = (uint8_t)(n >> 48U);
|
||||
|
@ -837,7 +853,8 @@ public:
|
|||
/**
|
||||
* @return Capacity of this buffer (usable size of data.bytes)
|
||||
*/
|
||||
static constexpr unsigned int capacity() noexcept { return ZT_BUF_MEM_SIZE; }
|
||||
static constexpr unsigned int capacity() noexcept
|
||||
{ return ZT_BUF_MEM_SIZE; }
|
||||
|
||||
private:
|
||||
// Next item in free buffer pool linked list if Buf is placed in pool, undefined and unused otherwise
|
||||
|
|
|
@ -137,7 +137,7 @@
|
|||
/**
|
||||
* Maximum number of queued endpoints to try per "pulse."
|
||||
*/
|
||||
#define ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE 16
|
||||
#define ZT_NAT_T_PORT_SCAN_MAX 16
|
||||
|
||||
/**
|
||||
* Delay between calls to the pulse() method in Peer for each peer
|
||||
|
|
|
@ -20,10 +20,6 @@
|
|||
#include <string>
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ public:
|
|||
};
|
||||
|
||||
ZT_INLINE Defragmenter()
|
||||
{} // NOLINT(hicpp-use-equals-default,modernize-use-equals-default)
|
||||
{}
|
||||
|
||||
/**
|
||||
* Process a fragment of a multi-part message
|
||||
|
|
|
@ -60,16 +60,20 @@ bool Endpoint::fromString(const char *s) noexcept
|
|||
return true;
|
||||
|
||||
const char *start = strchr(s, '-');
|
||||
if (start) {
|
||||
if (start++ != nullptr) {
|
||||
// Parse a fully qualified type-address format Endpoint.
|
||||
char tmp[16];
|
||||
for (unsigned int i = 0;i < 15;++i) {
|
||||
if ((tmp[i] = s[i]) == 0)
|
||||
for (unsigned int i=0;i<16;++i) {
|
||||
char ss = s[i];
|
||||
if (ss == '-') {
|
||||
tmp[i] = 0;
|
||||
break;
|
||||
}
|
||||
tmp[i] = ss;
|
||||
}
|
||||
tmp[15] = 0;
|
||||
this->type = (ZT_EndpointType)Utils::strToUInt(tmp);
|
||||
|
||||
++start;
|
||||
Fingerprint tmpfp;
|
||||
MAC tmpmac;
|
||||
switch (this->type) {
|
||||
|
@ -93,10 +97,15 @@ bool Endpoint::fromString(const char *s) noexcept
|
|||
if (!asInetAddress(this->value.ss).fromString(start))
|
||||
return false;
|
||||
default:
|
||||
this->type = ZT_ENDPOINT_TYPE_NIL;
|
||||
return false;
|
||||
}
|
||||
} else if ((strchr(s, ':')) || (strchr(s, '.'))) {
|
||||
// Parse raw IP/port strings as IP_UDP endpoints.
|
||||
this->type = ZT_ENDPOINT_TYPE_IP_UDP;
|
||||
if (!asInetAddress(this->value.ss).fromString(s))
|
||||
return false;
|
||||
} else {
|
||||
// A naked '0' can be a NIL endpoint.
|
||||
if (Utils::strToUInt(s) != (unsigned int)ZT_ENDPOINT_TYPE_NIL)
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ class EphemeralKey
|
|||
public:
|
||||
enum Type
|
||||
{
|
||||
TYPE_NIL = 0,
|
||||
TYPE_NIL = 0,
|
||||
TYPE_C25519_P384 = ZT_CRYPTO_ALG_P384
|
||||
};
|
||||
|
||||
|
@ -61,23 +61,24 @@ public:
|
|||
/**
|
||||
* Create an uninitialized ephemeral key (must call generate())
|
||||
*/
|
||||
ZT_INLINE EphemeralKey() noexcept :
|
||||
ZT_INLINE EphemeralKey() noexcept:
|
||||
pub()
|
||||
{
|
||||
const_cast<uint8_t *>(pub)[0] = (uint8_t)TYPE_NIL;
|
||||
Utils::memoryLock(this,sizeof(EphemeralKey));
|
||||
const_cast<uint8_t *>(pub)[0] = (uint8_t) TYPE_NIL;
|
||||
Utils::memoryLock(this, sizeof(EphemeralKey));
|
||||
}
|
||||
|
||||
ZT_INLINE ~EphemeralKey() noexcept
|
||||
{
|
||||
Utils::burn(m_priv,sizeof(m_priv));
|
||||
Utils::memoryUnlock(this,sizeof(EphemeralKey));
|
||||
Utils::burn(m_priv, sizeof(m_priv));
|
||||
Utils::memoryUnlock(this, sizeof(EphemeralKey));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return True if this ephemeral key has been initialized with generate()
|
||||
*/
|
||||
ZT_INLINE operator bool() const noexcept { return pub[0] != (uint8_t)TYPE_NIL; }
|
||||
ZT_INLINE operator bool() const noexcept
|
||||
{ return pub[0] != (uint8_t) TYPE_NIL; }
|
||||
|
||||
/**
|
||||
* Generate or re-generate key pair.
|
||||
|
@ -85,9 +86,9 @@ public:
|
|||
ZT_INLINE void generate() noexcept
|
||||
{
|
||||
uint8_t *const p = const_cast<uint8_t *>(pub);
|
||||
p[0] = (uint8_t)TYPE_C25519_P384;
|
||||
C25519::generateC25519(p + 1,m_priv);
|
||||
ECC384GenerateKey(p + 1 + ZT_C25519_ECDH_PUBLIC_KEY_SIZE,m_priv + ZT_C25519_ECDH_PRIVATE_KEY_SIZE);
|
||||
p[0] = (uint8_t) TYPE_C25519_P384;
|
||||
C25519::generateC25519(p + 1, m_priv);
|
||||
ECC384GenerateKey(p + 1 + ZT_C25519_ECDH_PUBLIC_KEY_SIZE, m_priv + ZT_C25519_ECDH_PRIVATE_KEY_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -101,16 +102,16 @@ public:
|
|||
* @param key Key buffer to fill with symmetric key
|
||||
* @return True on success
|
||||
*/
|
||||
ZT_INLINE bool agree(const uint8_t identityKey[ZT_SYMMETRIC_KEY_SIZE],const uint8_t *otherPub,const unsigned int otherPubLength,uint8_t key[ZT_SYMMETRIC_KEY_SIZE]) const noexcept
|
||||
ZT_INLINE bool agree(const uint8_t identityKey[ZT_SYMMETRIC_KEY_SIZE], const uint8_t *otherPub, const unsigned int otherPubLength, uint8_t key[ZT_SYMMETRIC_KEY_SIZE]) const noexcept
|
||||
{
|
||||
if ((otherPubLength < ZT_EPHEMERALKEY_PUBLIC_SIZE)||(otherPub[0] != (uint8_t)TYPE_C25519_P384))
|
||||
if ((otherPubLength < ZT_EPHEMERALKEY_PUBLIC_SIZE) || (otherPub[0] != (uint8_t) TYPE_C25519_P384))
|
||||
return false;
|
||||
uint8_t tmp[ZT_C25519_ECDH_SHARED_SECRET_SIZE + ZT_ECC384_SHARED_SECRET_SIZE];
|
||||
C25519::agree(m_priv,otherPub + 1,tmp);
|
||||
if (!ECC384ECDH(otherPub + 1 + ZT_C25519_ECDH_PUBLIC_KEY_SIZE,m_priv + ZT_C25519_ECDH_PRIVATE_KEY_SIZE,tmp + ZT_C25519_ECDH_SHARED_SECRET_SIZE))
|
||||
C25519::agree(m_priv, otherPub + 1, tmp);
|
||||
if (!ECC384ECDH(otherPub + 1 + ZT_C25519_ECDH_PUBLIC_KEY_SIZE, m_priv + ZT_C25519_ECDH_PRIVATE_KEY_SIZE, tmp + ZT_C25519_ECDH_SHARED_SECRET_SIZE))
|
||||
return false;
|
||||
SHA384(key,tmp,ZT_C25519_ECDH_SHARED_SECRET_SIZE + ZT_ECC384_SHARED_SECRET_SIZE,identityKey,ZT_SYMMETRIC_KEY_SIZE);
|
||||
Utils::burn(tmp,ZT_C25519_ECDH_SHARED_SECRET_SIZE + ZT_ECC384_SHARED_SECRET_SIZE);
|
||||
SHA384(key, tmp, ZT_C25519_ECDH_SHARED_SECRET_SIZE + ZT_ECC384_SHARED_SECRET_SIZE, identityKey, ZT_SYMMETRIC_KEY_SIZE);
|
||||
Utils::burn(tmp, ZT_C25519_ECDH_SHARED_SECRET_SIZE + ZT_ECC384_SHARED_SECRET_SIZE);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -123,8 +124,8 @@ public:
|
|||
ZT_INLINE bool acknowledged(const uint8_t ackHash[ZT_SHA384_DIGEST_SIZE]) const noexcept
|
||||
{
|
||||
uint8_t h[ZT_SHA384_DIGEST_SIZE];
|
||||
SHA384(h,pub,ZT_EPHEMERALKEY_PUBLIC_SIZE);
|
||||
return Utils::secureEq(ackHash,h,ZT_SHA384_DIGEST_SIZE);
|
||||
SHA384(h, pub, ZT_EPHEMERALKEY_PUBLIC_SIZE);
|
||||
return Utils::secureEq(ackHash, h, ZT_SHA384_DIGEST_SIZE);
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -39,7 +39,7 @@ namespace ZeroTier {
|
|||
class Expect
|
||||
{
|
||||
public:
|
||||
ZT_INLINE Expect() {} // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init,hicpp-use-equals-default,modernize-use-equals-default)
|
||||
ZT_INLINE Expect() {}
|
||||
|
||||
/**
|
||||
* Called by other code when something is sending a packet that could potentially receive an OK response
|
||||
|
|
|
@ -50,14 +50,16 @@ public:
|
|||
*
|
||||
* @param s Base32 string
|
||||
*/
|
||||
ZT_INLINE void toString(char s[ZT_FINGERPRINT_STRING_SIZE_MAX]) const noexcept
|
||||
ZT_INLINE char *toString(char s[ZT_FINGERPRINT_STRING_SIZE_MAX]) const noexcept
|
||||
{
|
||||
Address(this->address).toString(s);
|
||||
if (haveHash()) {
|
||||
s[ZT_ADDRESS_LENGTH_HEX] = '-';
|
||||
Utils::b32e(this->hash, ZT_FINGERPRINT_HASH_SIZE, s + (ZT_ADDRESS_LENGTH_HEX + 1), ZT_FINGERPRINT_STRING_SIZE_MAX - (ZT_ADDRESS_LENGTH_HEX + 1));
|
||||
}
|
||||
return s;
|
||||
}
|
||||
ZT_INLINE String toString() const { char tmp[ZT_FINGERPRINT_STRING_SIZE_MAX]; return String(toString(tmp)); }
|
||||
|
||||
/**
|
||||
* Set this fingerprint to a base32-encoded string
|
||||
|
|
|
@ -72,7 +72,7 @@ char *Locator::toString(char s[ZT_LOCATOR_STRING_SIZE_MAX]) const noexcept
|
|||
static_assert(ZT_LOCATOR_STRING_SIZE_MAX > ((((ZT_LOCATOR_MARSHAL_SIZE_MAX / 5) + 1) * 8) + ZT_ADDRESS_LENGTH_HEX + 1), "overflow");
|
||||
uint8_t bin[ZT_LOCATOR_MARSHAL_SIZE_MAX];
|
||||
Address(m_signer.address).toString(s);
|
||||
s[ZT_ADDRESS_LENGTH_HEX] = '-';
|
||||
s[ZT_ADDRESS_LENGTH_HEX] = '@';
|
||||
Utils::b32e(bin, marshal(bin, false), s + (ZT_ADDRESS_LENGTH_HEX + 1), ZT_LOCATOR_STRING_SIZE_MAX - (ZT_ADDRESS_LENGTH_HEX + 1));
|
||||
return s;
|
||||
}
|
||||
|
|
|
@ -115,7 +115,9 @@ public:
|
|||
* @return Pointer to buffer
|
||||
*/
|
||||
char *toString(char s[ZT_LOCATOR_STRING_SIZE_MAX]) const noexcept;
|
||||
ZT_INLINE String toString() const { char tmp[ZT_LOCATOR_STRING_SIZE_MAX]; return String(toString(tmp)); }
|
||||
|
||||
ZT_INLINE String toString() const
|
||||
{ char tmp[ZT_LOCATOR_STRING_SIZE_MAX]; return String(toString(tmp)); }
|
||||
|
||||
/**
|
||||
* Decode a string format locator
|
||||
|
|
133
node/MAC.hpp
133
node/MAC.hpp
|
@ -28,31 +28,34 @@ namespace ZeroTier {
|
|||
class MAC : public TriviallyCopyable
|
||||
{
|
||||
public:
|
||||
ZT_INLINE MAC() noexcept : m_mac(0ULL) {}
|
||||
|
||||
ZT_INLINE MAC(const uint8_t a,const uint8_t b,const uint8_t c,const uint8_t d,const uint8_t e,const uint8_t f) noexcept :
|
||||
m_mac((((uint64_t)a) << 40U) | (((uint64_t)b) << 32U) | (((uint64_t)c) << 24U) | (((uint64_t)d) << 16U) | (((uint64_t)e) << 8U) | ((uint64_t)f) )
|
||||
ZT_INLINE MAC() noexcept: m_mac(0ULL)
|
||||
{}
|
||||
|
||||
explicit ZT_INLINE MAC(const uint64_t m) noexcept :
|
||||
ZT_INLINE MAC(const uint8_t a, const uint8_t b, const uint8_t c, const uint8_t d, const uint8_t e, const uint8_t f) noexcept:
|
||||
m_mac((((uint64_t) a) << 40U) | (((uint64_t) b) << 32U) | (((uint64_t) c) << 24U) | (((uint64_t) d) << 16U) | (((uint64_t) e) << 8U) | ((uint64_t) f))
|
||||
{}
|
||||
|
||||
explicit ZT_INLINE MAC(const uint64_t m) noexcept:
|
||||
m_mac(m)
|
||||
{}
|
||||
|
||||
explicit ZT_INLINE MAC(const uint8_t b[6]) noexcept
|
||||
{ setTo(b); }
|
||||
|
||||
ZT_INLINE MAC(const Address &ztaddr,const uint64_t nwid) noexcept
|
||||
{ fromAddress(ztaddr,nwid); }
|
||||
ZT_INLINE MAC(const Address &ztaddr, const uint64_t nwid) noexcept
|
||||
{ fromAddress(ztaddr, nwid); }
|
||||
|
||||
/**
|
||||
* @return MAC in 64-bit integer
|
||||
*/
|
||||
ZT_INLINE uint64_t toInt() const noexcept { return m_mac; }
|
||||
ZT_INLINE uint64_t toInt() const noexcept
|
||||
{ return m_mac; }
|
||||
|
||||
/**
|
||||
* Set MAC to zero
|
||||
*/
|
||||
ZT_INLINE void zero() noexcept { m_mac = 0ULL; }
|
||||
ZT_INLINE void zero() noexcept
|
||||
{ m_mac = 0ULL; }
|
||||
|
||||
/**
|
||||
* @param bits Raw MAC in big-endian byte order
|
||||
|
@ -60,7 +63,7 @@ public:
|
|||
*/
|
||||
ZT_INLINE void setTo(const uint8_t b[6]) noexcept
|
||||
{
|
||||
m_mac = ((uint64_t)b[0] << 40U) | ((uint64_t)b[1] << 32U) | ((uint64_t)b[2] << 24U) | ((uint64_t)b[3] << 16U) | ((uint64_t)b[4] << 8U) | (uint64_t)b[5];
|
||||
m_mac = ((uint64_t) b[0] << 40U) | ((uint64_t) b[1] << 32U) | ((uint64_t) b[2] << 24U) | ((uint64_t) b[3] << 16U) | ((uint64_t) b[4] << 8U) | (uint64_t) b[5];
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -69,23 +72,25 @@ public:
|
|||
*/
|
||||
ZT_INLINE void copyTo(uint8_t b[6]) const noexcept
|
||||
{
|
||||
b[0] = (uint8_t)(m_mac >> 40U);
|
||||
b[1] = (uint8_t)(m_mac >> 32U);
|
||||
b[2] = (uint8_t)(m_mac >> 24U);
|
||||
b[3] = (uint8_t)(m_mac >> 16U);
|
||||
b[4] = (uint8_t)(m_mac >> 8U);
|
||||
b[5] = (uint8_t)m_mac;
|
||||
b[0] = (uint8_t) (m_mac >> 40U);
|
||||
b[1] = (uint8_t) (m_mac >> 32U);
|
||||
b[2] = (uint8_t) (m_mac >> 24U);
|
||||
b[3] = (uint8_t) (m_mac >> 16U);
|
||||
b[4] = (uint8_t) (m_mac >> 8U);
|
||||
b[5] = (uint8_t) m_mac;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return True if this is broadcast (all 0xff)
|
||||
*/
|
||||
ZT_INLINE bool isBroadcast() const noexcept { return m_mac; }
|
||||
ZT_INLINE bool isBroadcast() const noexcept
|
||||
{ return m_mac; }
|
||||
|
||||
/**
|
||||
* @return True if this is a multicast MAC
|
||||
*/
|
||||
ZT_INLINE bool isMulticast() const noexcept { return ((m_mac & 0x010000000000ULL) != 0ULL); }
|
||||
ZT_INLINE bool isMulticast() const noexcept
|
||||
{ return ((m_mac & 0x010000000000ULL) != 0ULL); }
|
||||
|
||||
/**
|
||||
* Set this MAC to a MAC derived from an address and a network ID
|
||||
|
@ -93,9 +98,9 @@ public:
|
|||
* @param ztaddr ZeroTier address
|
||||
* @param nwid 64-bit network ID
|
||||
*/
|
||||
ZT_INLINE void fromAddress(const Address &ztaddr,uint64_t nwid) noexcept
|
||||
ZT_INLINE void fromAddress(const Address &ztaddr, uint64_t nwid) noexcept
|
||||
{
|
||||
uint64_t m = ((uint64_t)firstOctetForNetwork(nwid)) << 40U;
|
||||
uint64_t m = ((uint64_t) firstOctetForNetwork(nwid)) << 40U;
|
||||
m |= ztaddr.toInt(); // a is 40 bits
|
||||
m ^= ((nwid >> 8U) & 0xffU) << 32U;
|
||||
m ^= ((nwid >> 16U) & 0xffU) << 24U;
|
||||
|
@ -129,7 +134,7 @@ public:
|
|||
*/
|
||||
static ZT_INLINE unsigned char firstOctetForNetwork(uint64_t nwid) noexcept
|
||||
{
|
||||
const uint8_t a = ((uint8_t)(nwid & 0xfeU) | 0x02U); // locally administered, not multicast, from LSB of network ID
|
||||
const uint8_t a = ((uint8_t) (nwid & 0xfeU) | 0x02U); // locally administered, not multicast, from LSB of network ID
|
||||
return ((a == 0x52) ? 0x32 : a); // blacklist 0x52 since it's used by KVM, libvirt, and other popular virtualization engines... seems de-facto standard on Linux
|
||||
}
|
||||
|
||||
|
@ -137,17 +142,23 @@ public:
|
|||
* @param i Value from 0 to 5 (inclusive)
|
||||
* @return Byte at said position (address interpreted in big-endian order)
|
||||
*/
|
||||
ZT_INLINE uint8_t operator[](unsigned int i) const noexcept { return (uint8_t)(m_mac >> (unsigned int)(40 - (i * 8))); }
|
||||
ZT_INLINE uint8_t operator[](unsigned int i) const noexcept
|
||||
{ return (uint8_t)(m_mac >> (unsigned int) (40 - (i * 8))); }
|
||||
|
||||
/**
|
||||
* @return 6, which is the number of bytes in a MAC, for container compliance
|
||||
*/
|
||||
ZT_INLINE unsigned int size() const noexcept { return 6; }
|
||||
ZT_INLINE unsigned int size() const noexcept
|
||||
{ return 6; }
|
||||
|
||||
ZT_INLINE unsigned long hashCode() const noexcept { return (unsigned long)Utils::hash64(m_mac); }
|
||||
ZT_INLINE unsigned long hashCode() const noexcept
|
||||
{ return (unsigned long) Utils::hash64(m_mac); }
|
||||
|
||||
ZT_INLINE operator bool() const noexcept { return (m_mac != 0ULL); }
|
||||
ZT_INLINE operator uint64_t() const noexcept { return m_mac; }
|
||||
ZT_INLINE operator bool() const noexcept
|
||||
{ return (m_mac != 0ULL); }
|
||||
|
||||
ZT_INLINE operator uint64_t() const noexcept
|
||||
{ return m_mac; }
|
||||
|
||||
/**
|
||||
* Convert this MAC to a standard format colon-separated hex string
|
||||
|
@ -174,10 +185,12 @@ public:
|
|||
buf[14] = ':';
|
||||
buf[15] = Utils::HEXCHARS[(m_mac >> 4U) & 0xfU];
|
||||
buf[16] = Utils::HEXCHARS[m_mac & 0xfU];
|
||||
buf[17] = (char)0;
|
||||
buf[17] = (char) 0;
|
||||
return buf;
|
||||
}
|
||||
ZT_INLINE String toString() const { char tmp[18]; return String(toString(tmp)); }
|
||||
|
||||
ZT_INLINE String toString() const
|
||||
{ char tmp[18]; return String(toString(tmp)); }
|
||||
|
||||
/**
|
||||
* Parse a MAC address in hex format with or without : separators and ignoring non-hex characters.
|
||||
|
@ -191,12 +204,12 @@ public:
|
|||
while (*s) {
|
||||
uint64_t c;
|
||||
const char hc = *s++;
|
||||
if ((hc >= 48)&&(hc <= 57))
|
||||
c = (uint64_t)hc - 48;
|
||||
else if ((hc >= 97)&&(hc <= 102))
|
||||
c = (uint64_t)hc - 87;
|
||||
else if ((hc >= 65)&&(hc <= 70))
|
||||
c = (uint64_t)hc - 55;
|
||||
if ((hc >= 48) && (hc <= 57))
|
||||
c = (uint64_t) hc - 48;
|
||||
else if ((hc >= 97) && (hc <= 102))
|
||||
c = (uint64_t) hc - 87;
|
||||
else if ((hc >= 65) && (hc <= 70))
|
||||
c = (uint64_t) hc - 55;
|
||||
else continue;
|
||||
m_mac = (m_mac << 4U) | c;
|
||||
}
|
||||
|
@ -204,21 +217,47 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
ZT_INLINE MAC &operator=(const uint64_t m) noexcept { m_mac = m; return *this; }
|
||||
ZT_INLINE MAC &operator=(const uint64_t m) noexcept
|
||||
{
|
||||
m_mac = m;
|
||||
return *this;
|
||||
}
|
||||
|
||||
ZT_INLINE bool operator==(const MAC &m) const noexcept { return (m_mac == m.m_mac); }
|
||||
ZT_INLINE bool operator!=(const MAC &m) const noexcept { return (m_mac != m.m_mac); }
|
||||
ZT_INLINE bool operator<(const MAC &m) const noexcept { return (m_mac < m.m_mac); }
|
||||
ZT_INLINE bool operator<=(const MAC &m) const noexcept { return (m_mac <= m.m_mac); }
|
||||
ZT_INLINE bool operator>(const MAC &m) const noexcept { return (m_mac > m.m_mac); }
|
||||
ZT_INLINE bool operator>=(const MAC &m) const noexcept { return (m_mac >= m.m_mac); }
|
||||
ZT_INLINE bool operator==(const MAC &m) const noexcept
|
||||
{ return (m_mac == m.m_mac); }
|
||||
|
||||
ZT_INLINE bool operator==(const uint64_t m) const noexcept { return (m_mac == m); }
|
||||
ZT_INLINE bool operator!=(const uint64_t m) const noexcept { return (m_mac != m); }
|
||||
ZT_INLINE bool operator<(const uint64_t m) const noexcept { return (m_mac < m); }
|
||||
ZT_INLINE bool operator<=(const uint64_t m) const noexcept { return (m_mac <= m); }
|
||||
ZT_INLINE bool operator>(const uint64_t m) const noexcept { return (m_mac > m); }
|
||||
ZT_INLINE bool operator>=(const uint64_t m) const noexcept { return (m_mac >= m); }
|
||||
ZT_INLINE bool operator!=(const MAC &m) const noexcept
|
||||
{ return (m_mac != m.m_mac); }
|
||||
|
||||
ZT_INLINE bool operator<(const MAC &m) const noexcept
|
||||
{ return (m_mac < m.m_mac); }
|
||||
|
||||
ZT_INLINE bool operator<=(const MAC &m) const noexcept
|
||||
{ return (m_mac <= m.m_mac); }
|
||||
|
||||
ZT_INLINE bool operator>(const MAC &m) const noexcept
|
||||
{ return (m_mac > m.m_mac); }
|
||||
|
||||
ZT_INLINE bool operator>=(const MAC &m) const noexcept
|
||||
{ return (m_mac >= m.m_mac); }
|
||||
|
||||
ZT_INLINE bool operator==(const uint64_t m) const noexcept
|
||||
{ return (m_mac == m); }
|
||||
|
||||
ZT_INLINE bool operator!=(const uint64_t m) const noexcept
|
||||
{ return (m_mac != m); }
|
||||
|
||||
ZT_INLINE bool operator<(const uint64_t m) const noexcept
|
||||
{ return (m_mac < m); }
|
||||
|
||||
ZT_INLINE bool operator<=(const uint64_t m) const noexcept
|
||||
{ return (m_mac <= m); }
|
||||
|
||||
ZT_INLINE bool operator>(const uint64_t m) const noexcept
|
||||
{ return (m_mac > m); }
|
||||
|
||||
ZT_INLINE bool operator>=(const uint64_t m) const noexcept
|
||||
{ return (m_mac >= m); }
|
||||
|
||||
private:
|
||||
uint64_t m_mac;
|
||||
|
|
108
node/Node.cpp
108
node/Node.cpp
|
@ -79,6 +79,8 @@ Node::Node(void *uPtr, void *tPtr, const struct ZT_Node_Callbacks *callbacks, in
|
|||
m_now(now),
|
||||
m_online(false)
|
||||
{
|
||||
ZT_SPEW("starting up...");
|
||||
|
||||
// Load this node's identity.
|
||||
uint64_t idtmp[2];
|
||||
idtmp[0] = 0;
|
||||
|
@ -87,10 +89,11 @@ Node::Node(void *uPtr, void *tPtr, const struct ZT_Node_Callbacks *callbacks, in
|
|||
bool haveIdentity = false;
|
||||
if (!data.empty()) {
|
||||
data.push_back(0); // zero-terminate string
|
||||
if (RR->identity.fromString((const char *) data.data())) {
|
||||
if (RR->identity.fromString((const char *)data.data())) {
|
||||
RR->identity.toString(false, RR->publicIdentityStr);
|
||||
RR->identity.toString(true, RR->secretIdentityStr);
|
||||
haveIdentity = true;
|
||||
ZT_SPEW("loaded identity %s", RR->identity.toString().c_str());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -101,14 +104,15 @@ Node::Node(void *uPtr, void *tPtr, const struct ZT_Node_Callbacks *callbacks, in
|
|||
RR->identity.toString(true, RR->secretIdentityStr);
|
||||
idtmp[0] = RR->identity.address();
|
||||
idtmp[1] = 0;
|
||||
stateObjectPut(tPtr, ZT_STATE_OBJECT_IDENTITY_SECRET, idtmp, RR->secretIdentityStr, (unsigned int) strlen(RR->secretIdentityStr));
|
||||
stateObjectPut(tPtr, ZT_STATE_OBJECT_IDENTITY_PUBLIC, idtmp, RR->publicIdentityStr, (unsigned int) strlen(RR->publicIdentityStr));
|
||||
stateObjectPut(tPtr, ZT_STATE_OBJECT_IDENTITY_SECRET, idtmp, RR->secretIdentityStr, (unsigned int)strlen(RR->secretIdentityStr));
|
||||
stateObjectPut(tPtr, ZT_STATE_OBJECT_IDENTITY_PUBLIC, idtmp, RR->publicIdentityStr, (unsigned int)strlen(RR->publicIdentityStr));
|
||||
ZT_SPEW("no pre-existing identity found, created %s", RR->identity.toString().c_str());
|
||||
} else {
|
||||
idtmp[0] = RR->identity.address();
|
||||
idtmp[1] = 0;
|
||||
data = stateObjectGet(tPtr, ZT_STATE_OBJECT_IDENTITY_PUBLIC, idtmp);
|
||||
if ((data.empty()) || (memcmp(data.data(), RR->publicIdentityStr, strlen(RR->publicIdentityStr)) != 0))
|
||||
stateObjectPut(tPtr, ZT_STATE_OBJECT_IDENTITY_PUBLIC, idtmp, RR->publicIdentityStr, (unsigned int) strlen(RR->publicIdentityStr));
|
||||
stateObjectPut(tPtr, ZT_STATE_OBJECT_IDENTITY_PUBLIC, idtmp, RR->publicIdentityStr, (unsigned int)strlen(RR->publicIdentityStr));
|
||||
}
|
||||
|
||||
// 2X hash our identity private key(s) to obtain a symmetric key for encrypting
|
||||
|
@ -121,9 +125,9 @@ Node::Node(void *uPtr, void *tPtr, const struct ZT_Node_Callbacks *callbacks, in
|
|||
Utils::burn(tmph, ZT_SHA384_DIGEST_SIZE);
|
||||
|
||||
// Generate a random sort order for privileged ports for use in NAT-t algorithms.
|
||||
for(unsigned int i=0;i<1023;++i)
|
||||
for (unsigned int i = 0;i < 1023;++i)
|
||||
RR->randomPrivilegedPortOrder[i] = (uint16_t)(i + 1);
|
||||
for(unsigned int i=0;i<512;++i) {
|
||||
for (unsigned int i = 0;i < 512;++i) {
|
||||
uint64_t rn = Utils::random();
|
||||
const unsigned int a = (unsigned int)rn % 1023;
|
||||
const unsigned int b = (unsigned int)(rn >> 32U) % 1023;
|
||||
|
@ -136,13 +140,16 @@ Node::Node(void *uPtr, void *tPtr, const struct ZT_Node_Callbacks *callbacks, in
|
|||
|
||||
// This constructs all the components of the ZeroTier core within a single contiguous memory container,
|
||||
// which reduces memory fragmentation and may improve cache locality.
|
||||
ZT_SPEW("initializing subsystem objects...");
|
||||
m_objects = new _NodeObjects(RR, tPtr);
|
||||
ZT_SPEW("node initialized!");
|
||||
|
||||
postEvent(tPtr, ZT_EVENT_UP);
|
||||
}
|
||||
|
||||
Node::~Node()
|
||||
{
|
||||
ZT_SPEW("node destructor run");
|
||||
m_networks_l.lock();
|
||||
m_networks_l.unlock();
|
||||
m_networks.clear();
|
||||
|
@ -150,7 +157,7 @@ Node::~Node()
|
|||
m_networks_l.unlock();
|
||||
|
||||
if (m_objects)
|
||||
delete (_NodeObjects *) m_objects;
|
||||
delete (_NodeObjects *)m_objects;
|
||||
|
||||
// Let go of cached Buf objects. If other nodes happen to be running in this
|
||||
// same process space new Bufs will be allocated as needed, but this is almost
|
||||
|
@ -161,6 +168,7 @@ Node::~Node()
|
|||
|
||||
void Node::shutdown(void *tPtr)
|
||||
{
|
||||
ZT_SPEW("explicit shutdown() called");
|
||||
postEvent(tPtr, ZT_EVENT_DOWN);
|
||||
if (RR->topology)
|
||||
RR->topology->saveAll(tPtr);
|
||||
|
@ -202,25 +210,6 @@ ZT_ResultCode Node::processVirtualNetworkFrame(
|
|||
}
|
||||
}
|
||||
|
||||
struct _processBackgroundTasks_eachPeer
|
||||
{
|
||||
const int64_t now;
|
||||
void *const tPtr;
|
||||
bool online;
|
||||
|
||||
ZT_INLINE _processBackgroundTasks_eachPeer(const int64_t now_, void *const tPtr_) noexcept :
|
||||
now(now_),
|
||||
tPtr(tPtr_),
|
||||
online(false)
|
||||
{}
|
||||
|
||||
ZT_INLINE void operator()(const SharedPtr<Peer> &peer, const bool isRoot) noexcept
|
||||
{
|
||||
peer->pulse(tPtr, now, isRoot);
|
||||
this->online |= (isRoot && peer->directlyConnected(now));
|
||||
}
|
||||
};
|
||||
|
||||
ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int64_t *nextBackgroundTaskDeadline)
|
||||
{
|
||||
m_now = now;
|
||||
|
@ -230,14 +219,22 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
|
|||
// Call peer pulse() method of all peers every ZT_PEER_PULSE_INTERVAL.
|
||||
if ((now - m_lastPeerPulse) >= ZT_PEER_PULSE_INTERVAL) {
|
||||
m_lastPeerPulse = now;
|
||||
ZT_SPEW("running pulse() on each peer...");
|
||||
try {
|
||||
_processBackgroundTasks_eachPeer pf(now, tPtr);
|
||||
RR->topology->eachPeerWithRoot<_processBackgroundTasks_eachPeer &>(pf);
|
||||
Vector< SharedPtr<Peer> > allPeers, rootPeers;
|
||||
RR->topology->getAllPeers(allPeers, rootPeers);
|
||||
|
||||
if (m_online.exchange(pf.online) != pf.online)
|
||||
postEvent(tPtr, pf.online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
|
||||
bool online = false;
|
||||
for(Vector< SharedPtr<Peer> >::iterator p(allPeers.begin());p!=allPeers.end();++p) {
|
||||
const bool isRoot = std::find(rootPeers.begin(), rootPeers.end(), *p) != rootPeers.end();
|
||||
(*p)->pulse(tPtr, now, isRoot);
|
||||
online |= ((isRoot || rootPeers.empty()) && (*p)->directlyConnected(now));
|
||||
}
|
||||
|
||||
RR->topology->rankRoots();
|
||||
|
||||
if (m_online.exchange(online) != online)
|
||||
postEvent(tPtr, online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
|
||||
} catch (...) {
|
||||
return ZT_RESULT_FATAL_ERROR_INTERNAL;
|
||||
}
|
||||
|
@ -246,6 +243,7 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
|
|||
// Perform network housekeeping and possibly request new certs and configs every ZT_NETWORK_HOUSEKEEPING_PERIOD.
|
||||
if ((now - m_lastNetworkHousekeepingRun) >= ZT_NETWORK_HOUSEKEEPING_PERIOD) {
|
||||
m_lastHousekeepingRun = now;
|
||||
ZT_SPEW("running networking housekeeping...");
|
||||
RWMutex::RLock l(m_networks_l);
|
||||
for (Map<uint64_t, SharedPtr<Network> >::const_iterator i(m_networks.begin());i != m_networks.end();++i) {
|
||||
i->second->doPeriodicTasks(tPtr, now);
|
||||
|
@ -255,6 +253,7 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
|
|||
// Clean up other stuff every ZT_HOUSEKEEPING_PERIOD.
|
||||
if ((now - m_lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
|
||||
m_lastHousekeepingRun = now;
|
||||
ZT_SPEW("running housekeeping...");
|
||||
|
||||
// Clean up any old local controller auth memoizations. This is an
|
||||
// optimization for network controllers to know whether to accept
|
||||
|
@ -282,8 +281,12 @@ ZT_ResultCode Node::processBackgroundTasks(void *tPtr, int64_t now, volatile int
|
|||
ZT_ResultCode Node::join(uint64_t nwid, const ZT_Fingerprint *controllerFingerprint, void *uptr, void *tptr)
|
||||
{
|
||||
Fingerprint fp;
|
||||
if (controllerFingerprint)
|
||||
if (controllerFingerprint) {
|
||||
fp = *controllerFingerprint;
|
||||
ZT_SPEW("joining network %.16llx with fingerprint %s",nwid,fp.toString().c_str());
|
||||
} else {
|
||||
ZT_SPEW("joining network %.16llx",nwid);
|
||||
}
|
||||
|
||||
RWMutex::Lock l(m_networks_l);
|
||||
SharedPtr<Network> &nw = m_networks[nwid];
|
||||
|
@ -296,6 +299,7 @@ ZT_ResultCode Node::join(uint64_t nwid, const ZT_Fingerprint *controllerFingerpr
|
|||
|
||||
ZT_ResultCode Node::leave(uint64_t nwid, void **uptr, void *tptr)
|
||||
{
|
||||
ZT_SPEW("leaving network %.16llx",nwid);
|
||||
ZT_VirtualNetworkConfig ctmp;
|
||||
|
||||
m_networks_l.lock();
|
||||
|
@ -327,25 +331,27 @@ ZT_ResultCode Node::leave(uint64_t nwid, void **uptr, void *tptr)
|
|||
|
||||
ZT_ResultCode Node::multicastSubscribe(void *tPtr, uint64_t nwid, uint64_t multicastGroup, unsigned long multicastAdi)
|
||||
{
|
||||
ZT_SPEW("multicast subscribe to %s:%lu",MAC(multicastGroup).toString().c_str(),multicastAdi);
|
||||
const SharedPtr<Network> nw(this->network(nwid));
|
||||
if (nw) {
|
||||
nw->multicastSubscribe(tPtr, MulticastGroup(MAC(multicastGroup), (uint32_t) (multicastAdi & 0xffffffff)));
|
||||
nw->multicastSubscribe(tPtr, MulticastGroup(MAC(multicastGroup), (uint32_t)(multicastAdi & 0xffffffff)));
|
||||
return ZT_RESULT_OK;
|
||||
} else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
|
||||
}
|
||||
|
||||
ZT_ResultCode Node::multicastUnsubscribe(uint64_t nwid, uint64_t multicastGroup, unsigned long multicastAdi)
|
||||
{
|
||||
ZT_SPEW("multicast unsubscribe from %s:%lu",MAC(multicastGroup).toString().c_str(),multicastAdi);
|
||||
const SharedPtr<Network> nw(this->network(nwid));
|
||||
if (nw) {
|
||||
nw->multicastUnsubscribe(MulticastGroup(MAC(multicastGroup), (uint32_t) (multicastAdi & 0xffffffff)));
|
||||
nw->multicastUnsubscribe(MulticastGroup(MAC(multicastGroup), (uint32_t)(multicastAdi & 0xffffffff)));
|
||||
return ZT_RESULT_OK;
|
||||
} else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
|
||||
}
|
||||
|
||||
ZT_ResultCode Node::addRoot(void *tPtr, const ZT_Identity *id, const ZT_Locator *loc)
|
||||
{
|
||||
if ((!id)||(!loc))
|
||||
if ((!id) || (!loc))
|
||||
return ZT_RESULT_ERROR_BAD_PARAMETER;
|
||||
const SharedPtr<const Locator> locator(new Locator(*reinterpret_cast<const Locator *>(loc)));
|
||||
// SECURITY: locator credential validation happens in Topology.cpp in addRoot().
|
||||
|
@ -384,7 +390,7 @@ ZT_PeerList *Node::peers() const
|
|||
((sizeof(ZT_Path) * ZT_MAX_PEER_NETWORK_PATHS) * peers.size()) +
|
||||
(sizeof(Identity) * peers.size()) +
|
||||
((sizeof(ZT_Endpoint) * ZT_LOCATOR_MAX_ENDPOINTS) * peers.size());
|
||||
char *buf = (char *) malloc(bufSize);
|
||||
char *buf = (char *)malloc(bufSize);
|
||||
if (!buf)
|
||||
return nullptr;
|
||||
Utils::zero(buf, bufSize);
|
||||
|
@ -410,9 +416,9 @@ ZT_PeerList *Node::peers() const
|
|||
p->fingerprint.address = p->address;
|
||||
Utils::copy<ZT_FINGERPRINT_HASH_SIZE>(p->fingerprint.hash, (*pi)->identity().fingerprint().hash);
|
||||
if ((*pi)->remoteVersionKnown()) {
|
||||
p->versionMajor = (int) (*pi)->remoteVersionMajor();
|
||||
p->versionMinor = (int) (*pi)->remoteVersionMinor();
|
||||
p->versionRev = (int) (*pi)->remoteVersionRevision();
|
||||
p->versionMajor = (int)(*pi)->remoteVersionMajor();
|
||||
p->versionMinor = (int)(*pi)->remoteVersionMinor();
|
||||
p->versionRev = (int)(*pi)->remoteVersionRevision();
|
||||
} else {
|
||||
p->versionMajor = -1;
|
||||
p->versionMinor = -1;
|
||||
|
@ -426,7 +432,7 @@ ZT_PeerList *Node::peers() const
|
|||
|
||||
Vector<SharedPtr<Path> > paths;
|
||||
(*pi)->getAllPaths(paths);
|
||||
p->pathCount = (unsigned int) paths.size();
|
||||
p->pathCount = (unsigned int)paths.size();
|
||||
p->paths = peerPath;
|
||||
for (Vector<SharedPtr<Path> >::iterator path(paths.begin());path != paths.end();++path) {
|
||||
ZT_Path *const pp = peerPath++;
|
||||
|
@ -443,7 +449,7 @@ ZT_PeerList *Node::peers() const
|
|||
p->locatorTimestamp = loc->timestamp();
|
||||
p->locatorEndpointCount = (unsigned int)loc->endpoints().size();
|
||||
p->locatorEndpoints = locatorEndpoint;
|
||||
for(Vector<Endpoint>::const_iterator ep(loc->endpoints().begin());ep!=loc->endpoints().end();++ep)
|
||||
for (Vector<Endpoint>::const_iterator ep(loc->endpoints().begin());ep != loc->endpoints().end();++ep)
|
||||
*(locatorEndpoint++) = *ep;
|
||||
}
|
||||
|
||||
|
@ -457,7 +463,7 @@ ZT_VirtualNetworkConfig *Node::networkConfig(uint64_t nwid) const
|
|||
{
|
||||
SharedPtr<Network> nw(network(nwid));
|
||||
if (nw) {
|
||||
ZT_VirtualNetworkConfig *const nc = (ZT_VirtualNetworkConfig *) ::malloc(sizeof(ZT_VirtualNetworkConfig));
|
||||
ZT_VirtualNetworkConfig *const nc = (ZT_VirtualNetworkConfig *)::malloc(sizeof(ZT_VirtualNetworkConfig));
|
||||
nw->externalConfig(nc);
|
||||
return nc;
|
||||
}
|
||||
|
@ -468,11 +474,11 @@ ZT_VirtualNetworkList *Node::networks() const
|
|||
{
|
||||
RWMutex::RLock l(m_networks_l);
|
||||
|
||||
char *const buf = (char *) ::malloc(sizeof(ZT_VirtualNetworkList) + (sizeof(ZT_VirtualNetworkConfig) * m_networks.size()));
|
||||
char *const buf = (char *)::malloc(sizeof(ZT_VirtualNetworkList) + (sizeof(ZT_VirtualNetworkConfig) * m_networks.size()));
|
||||
if (!buf)
|
||||
return nullptr;
|
||||
ZT_VirtualNetworkList *nl = (ZT_VirtualNetworkList *) buf; // NOLINT(modernize-use-auto,hicpp-use-auto)
|
||||
nl->networks = (ZT_VirtualNetworkConfig *) (buf + sizeof(ZT_VirtualNetworkList));
|
||||
ZT_VirtualNetworkList *nl = (ZT_VirtualNetworkList *)buf; // NOLINT(modernize-use-auto,hicpp-use-auto)
|
||||
nl->networks = (ZT_VirtualNetworkConfig *)(buf + sizeof(ZT_VirtualNetworkList));
|
||||
|
||||
nl->networkCount = 0;
|
||||
for (Map<uint64_t, SharedPtr<Network> >::const_iterator i(m_networks.begin());i != m_networks.end();++i) // NOLINT(modernize-use-auto,modernize-loop-convert,hicpp-use-auto)
|
||||
|
@ -572,7 +578,7 @@ bool Node::shouldUsePathForZeroTierTraffic(void *tPtr, const Identity &id, const
|
|||
m_uPtr,
|
||||
tPtr,
|
||||
id.address().toInt(),
|
||||
(const ZT_Identity *) &id,
|
||||
(const ZT_Identity *)&id,
|
||||
localSocket,
|
||||
reinterpret_cast<const struct sockaddr_storage *>(&remoteAddress)) != 0);
|
||||
}
|
||||
|
@ -617,7 +623,7 @@ void Node::ncSendConfig(uint64_t nwid, uint64_t requestPacketId, const Address &
|
|||
SharedPtr<Network> n(network(nwid));
|
||||
if (!n)
|
||||
return;
|
||||
n->setConfiguration((void *) 0, nc, true);
|
||||
n->setConfiguration((void *)0, nc, true);
|
||||
} else {
|
||||
Dictionary dconf;
|
||||
if (nc.toDictionary(dconf)) {
|
||||
|
@ -763,7 +769,7 @@ void ZT_freeQueryResult(void *qr)
|
|||
|
||||
enum ZT_ResultCode ZT_Node_new(ZT_Node **node, void *uptr, void *tptr, const struct ZT_Node_Callbacks *callbacks, int64_t now)
|
||||
{
|
||||
*node = (ZT_Node *) 0;
|
||||
*node = (ZT_Node *)0;
|
||||
try {
|
||||
*node = reinterpret_cast<ZT_Node *>(new ZeroTier::Node(uptr, tptr, callbacks, now));
|
||||
return ZT_RESULT_OK;
|
||||
|
@ -913,7 +919,7 @@ uint64_t ZT_Node_address(ZT_Node *node)
|
|||
|
||||
const ZT_Identity *ZT_Node_identity(ZT_Node *node)
|
||||
{
|
||||
return (const ZT_Identity *) (&(reinterpret_cast<ZeroTier::Node *>(node)->identity()));
|
||||
return (const ZT_Identity *)(&(reinterpret_cast<ZeroTier::Node *>(node)->identity()));
|
||||
}
|
||||
|
||||
void ZT_Node_status(ZT_Node *node, ZT_NodeStatus *status)
|
||||
|
@ -928,7 +934,7 @@ ZT_PeerList *ZT_Node_peers(ZT_Node *node)
|
|||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->peers();
|
||||
} catch (...) {
|
||||
return (ZT_PeerList *) 0;
|
||||
return (ZT_PeerList *)0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -937,7 +943,7 @@ ZT_VirtualNetworkConfig *ZT_Node_networkConfig(ZT_Node *node, uint64_t nwid)
|
|||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->networkConfig(nwid);
|
||||
} catch (...) {
|
||||
return (ZT_VirtualNetworkConfig *) 0;
|
||||
return (ZT_VirtualNetworkConfig *)0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -946,7 +952,7 @@ ZT_VirtualNetworkList *ZT_Node_networks(ZT_Node *node)
|
|||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->networks();
|
||||
} catch (...) {
|
||||
return (ZT_VirtualNetworkList *) 0;
|
||||
return (ZT_VirtualNetworkList *)0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -202,7 +202,7 @@ public:
|
|||
* @param nwid Network ID
|
||||
* @return Network associated with ID
|
||||
*/
|
||||
ZT_INLINE SharedPtr<Network> network(uint64_t nwid) const noexcept
|
||||
ZT_INLINE SharedPtr<Network> network(const uint64_t nwid) const noexcept
|
||||
{
|
||||
RWMutex::RLock l(m_networks_l);
|
||||
const SharedPtr<Network> *const n = m_networks.get(nwid);
|
||||
|
@ -274,7 +274,7 @@ public:
|
|||
ZT_INLINE void stateObjectPut(void *const tPtr, ZT_StateObjectType type, const uint64_t id[2], const void *const data, const unsigned int len) noexcept
|
||||
{
|
||||
if (m_cb.statePutFunction)
|
||||
m_cb.statePutFunction(reinterpret_cast<ZT_Node *>(this), m_uPtr, tPtr, type, id, data, (int) len);
|
||||
m_cb.statePutFunction(reinterpret_cast<ZT_Node *>(this), m_uPtr, tPtr, type, id, data, (int)len);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -366,7 +366,7 @@ private:
|
|||
{}
|
||||
|
||||
ZT_INLINE unsigned long hashCode() const noexcept
|
||||
{ return (unsigned long) (nwid + address); }
|
||||
{ return (unsigned long)(nwid + address); }
|
||||
|
||||
ZT_INLINE bool operator==(const p_LocalControllerAuth &a) const noexcept
|
||||
{ return ((a.nwid == nwid) && (a.address == address)); }
|
||||
|
|
|
@ -199,9 +199,10 @@ typedef unsigned uint128_t __attribute__((mode(TI)));
|
|||
#endif
|
||||
|
||||
// Macro to print very verbose tracing information to standard error.
|
||||
#define ZT_VA_ARGS(...) , ##__VA_ARGS__
|
||||
#define ZT_DEBUG_SPEW
|
||||
#ifdef ZT_DEBUG_SPEW
|
||||
#define ZT_SPEW(f,...) fprintf(stderr,"%s:%d(%s): " f ZT_EOL_S,__FILE__,__LINE__,__FUNCTION__,__VA_ARGS__)
|
||||
#define ZT_SPEW(f,...) fprintf(stderr,"%s:%d(%s): " f ZT_EOL_S,__FILE__,__LINE__,__FUNCTION__ ZT_VA_ARGS(__VA_ARGS__))
|
||||
#else
|
||||
#define ZT_SPEW(f,...)
|
||||
#endif
|
||||
|
|
116
node/Peer.cpp
116
node/Peer.cpp
|
@ -131,7 +131,7 @@ void Peer::received(
|
|||
RR->t->learnedNewPath(tPtr, 0x582fabdd, packetId, m_id, path->address(), old);
|
||||
} else {
|
||||
path->sent(now, hello(tPtr, path->localSocket(), path->address(), now));
|
||||
RR->t->tryingNewPath(tPtr, 0xb7747ddd, m_id, path->address(), path->address(), packetId, (uint8_t) verb, m_id);
|
||||
RR->t->tryingNewPath(tPtr, 0xb7747ddd, m_id, path->address(), path->address(), packetId, (uint8_t)verb, m_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ unsigned int Peer::hello(void *tPtr, int64_t localSocket, const InetAddress &atA
|
|||
outp.wI8(ii, ZEROTIER_VERSION_MAJOR);
|
||||
outp.wI8(ii, ZEROTIER_VERSION_MINOR);
|
||||
outp.wI16(ii, ZEROTIER_VERSION_REVISION);
|
||||
outp.wI64(ii, (uint64_t) now);
|
||||
outp.wI64(ii, (uint64_t)now);
|
||||
outp.wO(ii, RR->identity);
|
||||
outp.wO(ii, atAddress);
|
||||
|
||||
|
@ -188,8 +188,8 @@ unsigned int Peer::hello(void *tPtr, int64_t localSocket, const InetAddress &atA
|
|||
const int cryptSectionStart = ii;
|
||||
FCV<uint8_t, 4096> md;
|
||||
Dictionary::append(md, ZT_PROTO_HELLO_NODE_META_INSTANCE_ID, RR->instanceId);
|
||||
outp.wI16(ii, (uint16_t) md.size());
|
||||
outp.wB(ii, md.data(), (unsigned int) md.size());
|
||||
outp.wI16(ii, (uint16_t)md.size());
|
||||
outp.wB(ii, md.data(), (unsigned int)md.size());
|
||||
|
||||
if (unlikely((ii + ZT_HMACSHA384_LEN) > ZT_BUF_SIZE)) // sanity check: should be impossible
|
||||
return 0;
|
||||
|
@ -213,9 +213,7 @@ unsigned int Peer::hello(void *tPtr, int64_t localSocket, const InetAddress &atA
|
|||
p1305.finish(polyMac);
|
||||
Utils::storeAsIsEndian<uint64_t>(outp.unsafeData + ZT_PROTO_PACKET_MAC_INDEX, polyMac[0]);
|
||||
|
||||
if (likely(RR->node->putPacket(tPtr, localSocket, atAddress, outp.unsafeData, ii)))
|
||||
return ii;
|
||||
return 0;
|
||||
return (likely(RR->node->putPacket(tPtr, localSocket, atAddress, outp.unsafeData, ii))) ? ii : 0;
|
||||
}
|
||||
|
||||
void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
|
||||
|
@ -263,47 +261,83 @@ void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
|
|||
// Attempt up to ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE queued addresses.
|
||||
|
||||
unsigned int attempts = 0;
|
||||
do {
|
||||
for(;;) {
|
||||
p_TryQueueItem &qi = m_tryQueue.front();
|
||||
|
||||
if (qi.target.isInetAddr()) {
|
||||
// Skip entry if it overlaps with any currently active IP.
|
||||
for (unsigned int i = 0;i < m_alivePathCount;++i) {
|
||||
if (m_paths[i]->address().ipsEqual(qi.target.ip()))
|
||||
goto next_tryQueue_item;
|
||||
goto discard_queue_item;
|
||||
}
|
||||
}
|
||||
|
||||
if (qi.target.type == ZT_ENDPOINT_TYPE_IP_UDP) {
|
||||
++attempts;
|
||||
if (qi.privilegedPortTrialIteration < 0) {
|
||||
if (qi.iteration < 0) {
|
||||
|
||||
// If iteration is less than zero, try to contact the original address.
|
||||
// It may be set to a larger negative value to try multiple times such
|
||||
// as e.g. -3 to try 3 times.
|
||||
sent(now, m_sendProbe(tPtr, -1, qi.target.ip(), nullptr, 0, now));
|
||||
if ((qi.target.ip().isV4()) && (qi.target.ip().port() < 1024)) {
|
||||
qi.privilegedPortTrialIteration = 0;
|
||||
if (m_tryQueue.size() > 1)
|
||||
m_tryQueue.splice(m_tryQueue.end(),m_tryQueue,m_tryQueue.begin());
|
||||
continue;
|
||||
} // else goto next_tryQueue_item;
|
||||
} else if (qi.privilegedPortTrialIteration < 1023) {
|
||||
uint16_t ports[ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE];
|
||||
unsigned int pn = 0;
|
||||
while ((pn < ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE) && (qi.privilegedPortTrialIteration < 1023)) {
|
||||
const uint16_t p = RR->randomPrivilegedPortOrder[qi.privilegedPortTrialIteration++];
|
||||
if ((unsigned int)p != qi.target.ip().port())
|
||||
ports[pn++] = p;
|
||||
++qi.iteration;
|
||||
goto requeue_item;
|
||||
|
||||
} else if (qi.target.ip().isV4() && (m_alivePathCount == 0)) {
|
||||
// When iteration reaches zero the queue item is dropped unless it's
|
||||
// IPv4 and we have no direct paths. In that case some heavier NAT-t
|
||||
// strategies are attempted.
|
||||
|
||||
if (qi.target.ip().port() < 1024) {
|
||||
|
||||
// If the source port is privileged, we actually scan every possible
|
||||
// privileged port in random order slowly over multiple iterations
|
||||
// of pulse(). This is done in batches of ZT_NAT_T_PORT_SCAN_MAX.
|
||||
uint16_t ports[ZT_NAT_T_PORT_SCAN_MAX];
|
||||
unsigned int pn = 0;
|
||||
while ((pn < ZT_NAT_T_PORT_SCAN_MAX) && (qi.iteration < 1023)) {
|
||||
const uint16_t p = RR->randomPrivilegedPortOrder[qi.iteration++];
|
||||
if ((unsigned int)p != qi.target.ip().port())
|
||||
ports[pn++] = p;
|
||||
}
|
||||
if (pn > 0)
|
||||
sent(now, m_sendProbe(tPtr, -1, qi.target.ip(), ports, pn, now));
|
||||
if (qi.iteration < 1023)
|
||||
goto requeue_item;
|
||||
|
||||
} else {
|
||||
|
||||
// For un-privileged ports we'll try ZT_NAT_T_PORT_SCAN_MAX ports
|
||||
// beyond the one we were sent to catch some sequentially assigning
|
||||
// symmetric NATs.
|
||||
InetAddress tmp(qi.target.ip());
|
||||
unsigned int p = tmp.port() + 1 + (unsigned int)qi.iteration++;
|
||||
if (p > 65535)
|
||||
p -= 64512; // wrap back to 1024
|
||||
tmp.setPort(p);
|
||||
sent(now, m_sendProbe(tPtr, -1, tmp, nullptr, 0, now));
|
||||
if (qi.iteration < ZT_NAT_T_PORT_SCAN_MAX)
|
||||
goto requeue_item;
|
||||
|
||||
}
|
||||
sent(now, m_sendProbe(tPtr, -1, qi.target.ip(), ports, pn, now));
|
||||
if (qi.privilegedPortTrialIteration < 1023) {
|
||||
if (m_tryQueue.size() > 1)
|
||||
m_tryQueue.splice(m_tryQueue.end(),m_tryQueue,m_tryQueue.begin());
|
||||
continue;
|
||||
} // else goto next_tryQueue_item;
|
||||
}
|
||||
}
|
||||
|
||||
next_tryQueue_item:
|
||||
// Discard front item unless the code skips to requeue_item.
|
||||
discard_queue_item:
|
||||
m_tryQueue.pop_front();
|
||||
} while ((attempts < ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE) && (!m_tryQueue.empty()));
|
||||
if ((m_tryQueue.empty()) || (attempts >= ZT_NAT_T_PORT_SCAN_MAX))
|
||||
break;
|
||||
else continue;
|
||||
|
||||
// If the code skips here the front item is instead moved to the back.
|
||||
requeue_item:
|
||||
if (m_tryQueue.size() > 1)
|
||||
m_tryQueue.splice(m_tryQueue.end(), m_tryQueue, m_tryQueue.begin());
|
||||
if (attempts >= ZT_NAT_T_PORT_SCAN_MAX)
|
||||
break;
|
||||
else continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Do keepalive on all currently active paths, sending HELLO to the first
|
||||
|
@ -338,7 +372,7 @@ void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
|
|||
}
|
||||
}
|
||||
|
||||
void Peer::contact(void *tPtr, const int64_t now, const Endpoint &ep)
|
||||
void Peer::contact(void *tPtr, const int64_t now, const Endpoint &ep, int tries)
|
||||
{
|
||||
static uint8_t foo = 0;
|
||||
RWMutex::Lock l(m_lock);
|
||||
|
@ -364,12 +398,12 @@ void Peer::contact(void *tPtr, const int64_t now, const Endpoint &ep)
|
|||
for (List<p_TryQueueItem>::iterator i(m_tryQueue.begin());i != m_tryQueue.end();++i) {
|
||||
if (i->target.isSameAddress(ep)) {
|
||||
i->target = ep;
|
||||
i->privilegedPortTrialIteration = -1;
|
||||
i->iteration = -tries;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
m_tryQueue.push_back(p_TryQueueItem(ep));
|
||||
m_tryQueue.push_back(p_TryQueueItem(ep, -tries));
|
||||
}
|
||||
|
||||
void Peer::resetWithinScope(void *tPtr, InetAddress::IpScope scope, int inetAddressFamily, int64_t now)
|
||||
|
@ -415,14 +449,14 @@ void Peer::save(void *tPtr) const
|
|||
uint8_t buf[8 + ZT_PEER_MARSHAL_SIZE_MAX];
|
||||
|
||||
// Prefix each saved peer with the current timestamp.
|
||||
Utils::storeBigEndian<uint64_t>(buf, (uint64_t) RR->node->now());
|
||||
Utils::storeBigEndian<uint64_t>(buf, (uint64_t)RR->node->now());
|
||||
|
||||
const int len = marshal(buf + 8);
|
||||
if (len > 0) {
|
||||
uint64_t id[2];
|
||||
id[0] = m_id.address().toInt();
|
||||
id[1] = 0;
|
||||
RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_PEER, id, buf, (unsigned int) len + 8);
|
||||
RR->node->stateObjectPut(tPtr, ZT_STATE_OBJECT_PEER, id, buf, (unsigned int)len + 8);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -463,13 +497,13 @@ int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
|
|||
data[p++] = 0;
|
||||
}
|
||||
|
||||
Utils::storeBigEndian(data + p, (uint16_t) m_vProto);
|
||||
Utils::storeBigEndian(data + p, (uint16_t)m_vProto);
|
||||
p += 2;
|
||||
Utils::storeBigEndian(data + p, (uint16_t) m_vMajor);
|
||||
Utils::storeBigEndian(data + p, (uint16_t)m_vMajor);
|
||||
p += 2;
|
||||
Utils::storeBigEndian(data + p, (uint16_t) m_vMinor);
|
||||
Utils::storeBigEndian(data + p, (uint16_t)m_vMinor);
|
||||
p += 2;
|
||||
Utils::storeBigEndian(data + p, (uint16_t) m_vRevision);
|
||||
Utils::storeBigEndian(data + p, (uint16_t)m_vRevision);
|
||||
p += 2;
|
||||
|
||||
data[p++] = 0;
|
||||
|
@ -539,7 +573,7 @@ int Peer::unmarshal(const uint8_t *restrict data, const int len) noexcept
|
|||
p += 2;
|
||||
m_vRevision = Utils::loadBigEndian<uint16_t>(data + p);
|
||||
p += 2;
|
||||
p += 2 + (int) Utils::loadBigEndian<uint16_t>(data + p);
|
||||
p += 2 + (int)Utils::loadBigEndian<uint16_t>(data + p);
|
||||
|
||||
m_deriveSecondaryIdentityKeys();
|
||||
|
||||
|
|
|
@ -231,8 +231,9 @@ public:
|
|||
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
|
||||
* @param now Current time
|
||||
* @param ep Endpoint to attempt to contact
|
||||
* @param tries Number of times to try (default: 1)
|
||||
*/
|
||||
void contact(void *tPtr, int64_t now, const Endpoint &ep);
|
||||
void contact(void *tPtr, int64_t now, const Endpoint &ep, int tries = 1);
|
||||
|
||||
/**
|
||||
* Reset paths within a given IP scope and address family
|
||||
|
@ -525,16 +526,16 @@ private:
|
|||
{
|
||||
ZT_INLINE p_TryQueueItem() :
|
||||
target(),
|
||||
privilegedPortTrialIteration(-1)
|
||||
iteration(0)
|
||||
{}
|
||||
|
||||
ZT_INLINE p_TryQueueItem(const Endpoint &t) :
|
||||
ZT_INLINE p_TryQueueItem(const Endpoint &t, int iter) :
|
||||
target(t),
|
||||
privilegedPortTrialIteration(-1)
|
||||
iteration(iter)
|
||||
{}
|
||||
|
||||
Endpoint target;
|
||||
int privilegedPortTrialIteration;
|
||||
int iteration;
|
||||
};
|
||||
|
||||
List<p_TryQueueItem> m_tryQueue;
|
||||
|
|
|
@ -147,39 +147,28 @@ public:
|
|||
}
|
||||
|
||||
/**
|
||||
* Apply a function or function object to all peers
|
||||
*
|
||||
* This locks the peer map during execution, so calls to get() etc. during
|
||||
* eachPeer() will deadlock.
|
||||
*
|
||||
* @param f Function to apply
|
||||
* @tparam F Function or function object type
|
||||
* @param allPeers vector to fill with all current peers
|
||||
*/
|
||||
template<typename F>
|
||||
ZT_INLINE void eachPeerWithRoot(F f) const
|
||||
ZT_INLINE void getAllPeers(Vector< SharedPtr<Peer> > &allPeers) const
|
||||
{
|
||||
allPeers.clear();
|
||||
RWMutex::RLock l(m_peers_l);
|
||||
|
||||
Vector<uintptr_t> rootPeerPtrs;
|
||||
rootPeerPtrs.reserve(m_rootPeers.size());
|
||||
for(Vector< SharedPtr<Peer> >::const_iterator rp(m_rootPeers.begin());rp != m_rootPeers.end();++rp)
|
||||
rootPeerPtrs.push_back((uintptr_t)rp->ptr());
|
||||
std::sort(rootPeerPtrs.begin(),rootPeerPtrs.end());
|
||||
|
||||
allPeers.reserve(m_peers.size());
|
||||
for(Map< Address,SharedPtr<Peer> >::const_iterator i(m_peers.begin());i != m_peers.end();++i)
|
||||
f(i->second,std::binary_search(rootPeerPtrs.begin(),rootPeerPtrs.end(),(uintptr_t)i->second.ptr()));
|
||||
allPeers.push_back(i->second);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param allPeers vector to fill with all current peers
|
||||
*/
|
||||
ZT_INLINE void getAllPeers(Vector< SharedPtr<Peer> > &allPeers) const
|
||||
ZT_INLINE void getAllPeers(Vector< SharedPtr<Peer> > &allPeers,Vector< SharedPtr<Peer> > &rootPeers) const
|
||||
{
|
||||
RWMutex::RLock l(m_peers_l);
|
||||
allPeers.clear();
|
||||
RWMutex::RLock l(m_peers_l);
|
||||
allPeers.reserve(m_peers.size());
|
||||
for(Map< Address,SharedPtr<Peer> >::const_iterator i(m_peers.begin());i != m_peers.end();++i)
|
||||
allPeers.push_back(i->second);
|
||||
rootPeers = m_rootPeers;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Add table
Reference in a new issue