Yet more perf, security hardening, and compile fix tweaks.

This commit is contained in:
Adam Ierymenko 2020-02-20 09:33:11 -08:00
parent a4e24c8390
commit 320c8429c2
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
10 changed files with 91 additions and 35 deletions

View file

@ -13,17 +13,24 @@
#include "Buf.hpp" #include "Buf.hpp"
#ifdef __WINDOWS__
#define sched_yield() Sleep(0)
#endif
namespace ZeroTier { namespace ZeroTier {
static std::atomic<uintptr_t> s_pool(0); static std::atomic<uintptr_t> s_pool(0);
#define ZT_ATOMIC_PTR_LOCKED (~((uintptr_t)0))
void *Buf::operator new(std::size_t sz) void *Buf::operator new(std::size_t sz)
{ {
uintptr_t bb; uintptr_t bb;
for (;;) { for (;;) {
bb = s_pool.exchange(~((uintptr_t)0)); bb = s_pool.exchange(ZT_ATOMIC_PTR_LOCKED);
if (bb != ~((uintptr_t)0)) if (bb != ZT_ATOMIC_PTR_LOCKED)
break; break;
sched_yield();
} }
Buf *b; Buf *b;
@ -46,9 +53,10 @@ void Buf::operator delete(void *ptr)
if (ptr) { if (ptr) {
uintptr_t bb; uintptr_t bb;
for (;;) { for (;;) {
bb = s_pool.exchange(~((uintptr_t)0)); bb = s_pool.exchange(ZT_ATOMIC_PTR_LOCKED);
if (bb != ~((uintptr_t)0)) if (bb != ZT_ATOMIC_PTR_LOCKED)
break; break;
sched_yield();
} }
((Buf *)ptr)->__nextInPool = bb; ((Buf *)ptr)->__nextInPool = bb;
@ -60,11 +68,12 @@ void Buf::freePool() noexcept
{ {
uintptr_t bb; uintptr_t bb;
for (;;) { for (;;) {
bb = s_pool.exchange(~((uintptr_t)0)); bb = s_pool.exchange(ZT_ATOMIC_PTR_LOCKED);
if (bb != ~((uintptr_t)0)) if (bb != ZT_ATOMIC_PTR_LOCKED)
break; break;
sched_yield();
} }
s_pool.store((uintptr_t)0); s_pool.store(0);
while (bb != 0) { while (bb != 0) {
uintptr_t next = ((Buf *)bb)->__nextInPool; uintptr_t next = ((Buf *)bb)->__nextInPool;

View file

@ -16,7 +16,7 @@ set(core_headers
ECC384.hpp ECC384.hpp
Expect.hpp Expect.hpp
FCV.hpp FCV.hpp
H.hpp Hash.hpp
Hashtable.hpp Hashtable.hpp
Identity.hpp Identity.hpp
InetAddress.hpp InetAddress.hpp

View file

@ -40,7 +40,10 @@ namespace ZeroTier {
class RuntimeEnvironment; class RuntimeEnvironment;
/** /**
* Certificate indicating ownership of a network identifier * Certificate indicating ownership of a "thing" such as an IP address
*
* These are used in conjunction with the rules engine to make IP addresses and
* other identifiers un-spoofable.
*/ */
class CertificateOfOwnership : public Credential class CertificateOfOwnership : public Credential
{ {
@ -96,15 +99,39 @@ public:
return this->_owns(THING_MAC_ADDRESS,tmp,6); return this->_owns(THING_MAC_ADDRESS,tmp,6);
} }
/**
* Add an IP address to this certificate
*
* @param ip IPv4 or IPv6 address
*/
void addThing(const InetAddress &ip); void addThing(const InetAddress &ip);
/**
* Add an Ethernet MAC address
*
* ZeroTier MAC addresses are always un-spoofable. This could in theory be
* used to make bridged MAC addresses un-spoofable as well, but it's not
* currently implemented.
*
* @param mac 48-bit MAC address
*/
void addThing(const MAC &mac); void addThing(const MAC &mac);
/** /**
* Sign this certificate
*
* @param signer Signing identity, must have private key * @param signer Signing identity, must have private key
* @return True if signature was successful * @return True if signature was successful
*/ */
bool sign(const Identity &signer); bool sign(const Identity &signer);
/**
* Verify certificate signature
*
* @param RR Runtime environment
* @param tPtr That pointer we pass around
* @return Credential verification result: OK, bad signature, or identity needed
*/
ZT_ALWAYS_INLINE Credential::VerifyResult verify(const RuntimeEnvironment *RR,void *tPtr) const { return _verify(RR,tPtr,*this); } ZT_ALWAYS_INLINE Credential::VerifyResult verify(const RuntimeEnvironment *RR,void *tPtr) const { return _verify(RR,tPtr,*this); }
static constexpr int marshalSizeMax() noexcept { return ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX; } static constexpr int marshalSizeMax() noexcept { return ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX; }

View file

@ -63,7 +63,7 @@ public:
COMPLETE, COMPLETE,
/** /**
* This fragment duplicates another with the same fragment number for this message * We already have this fragment number or the message is complete
*/ */
ERR_DUPLICATE_FRAGMENT, ERR_DUPLICATE_FRAGMENT,

View file

@ -11,8 +11,8 @@
*/ */
/****/ /****/
#ifndef ZT_H_HPP #ifndef ZT_HASH_HPP
#define ZT_H_HPP #define ZT_HASH_HPP
#include "Constants.hpp" #include "Constants.hpp"
#include "TriviallyCopyable.hpp" #include "TriviallyCopyable.hpp"
@ -30,15 +30,15 @@ namespace ZeroTier {
* @tparam BITS Bits in hash, must be a multiple of 64 * @tparam BITS Bits in hash, must be a multiple of 64
*/ */
template<unsigned int BITS> template<unsigned int BITS>
class H : public TriviallyCopyable class Hash : public TriviallyCopyable
{ {
public: public:
ZT_ALWAYS_INLINE H() noexcept {} ZT_ALWAYS_INLINE Hash() noexcept {}
/** /**
* @param h Hash value of size BITS / 8 * @param h Hash value of size BITS / 8
*/ */
explicit ZT_ALWAYS_INLINE H(const void *h) noexcept { memcpy(_h,h,BITS / 8); } explicit ZT_ALWAYS_INLINE Hash(const void *h) noexcept { memcpy(_h,h,BITS / 8); }
/** /**
* @param h Hash value of size BITS / 8 * @param h Hash value of size BITS / 8
@ -70,12 +70,12 @@ public:
return false; return false;
} }
ZT_ALWAYS_INLINE bool operator==(const H &h) const noexcept { return memcmp(_h,h._h,BITS / 8) == 0; } ZT_ALWAYS_INLINE bool operator==(const Hash &h) const noexcept { return memcmp(_h,h._h,BITS / 8) == 0; }
ZT_ALWAYS_INLINE bool operator!=(const H &h) const noexcept { return memcmp(_h,h._h,BITS / 8) != 0; } ZT_ALWAYS_INLINE bool operator!=(const Hash &h) const noexcept { return memcmp(_h,h._h,BITS / 8) != 0; }
ZT_ALWAYS_INLINE bool operator<(const H &h) const noexcept { return memcmp(_h,h._h,BITS / 8) < 0; } ZT_ALWAYS_INLINE bool operator<(const Hash &h) const noexcept { return memcmp(_h,h._h,BITS / 8) < 0; }
ZT_ALWAYS_INLINE bool operator>(const H &h) const noexcept { return memcmp(_h,h._h,BITS / 8) > 0; } ZT_ALWAYS_INLINE bool operator>(const Hash &h) const noexcept { return memcmp(_h,h._h,BITS / 8) > 0; }
ZT_ALWAYS_INLINE bool operator<=(const H &h) const noexcept { return memcmp(_h,h._h,BITS / 8) <= 0; } ZT_ALWAYS_INLINE bool operator<=(const Hash &h) const noexcept { return memcmp(_h,h._h,BITS / 8) <= 0; }
ZT_ALWAYS_INLINE bool operator>=(const H &h) const noexcept { return memcmp(_h,h._h,BITS / 8) >= 0; } ZT_ALWAYS_INLINE bool operator>=(const Hash &h) const noexcept { return memcmp(_h,h._h,BITS / 8) >= 0; }
private: private:
unsigned long _h[BITS / sizeof(unsigned long)]; unsigned long _h[BITS / sizeof(unsigned long)];

View file

@ -24,7 +24,7 @@
#include "SHA512.hpp" #include "SHA512.hpp"
#include "ECC384.hpp" #include "ECC384.hpp"
#include "TriviallyCopyable.hpp" #include "TriviallyCopyable.hpp"
#include "H.hpp" #include "Hash.hpp"
#define ZT_IDENTITY_STRING_BUFFER_LENGTH 1024 #define ZT_IDENTITY_STRING_BUFFER_LENGTH 1024
#define ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE (ZT_C25519_PUBLIC_KEY_LEN + ZT_ECC384_PUBLIC_KEY_SIZE) #define ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE (ZT_C25519_PUBLIC_KEY_LEN + ZT_ECC384_PUBLIC_KEY_SIZE)
@ -128,7 +128,7 @@ public:
* *
* @return 384-bit/48-byte hash * @return 384-bit/48-byte hash
*/ */
ZT_ALWAYS_INLINE const H<384> &hash() const noexcept { return _hash; } ZT_ALWAYS_INLINE const Hash<384> &hash() const noexcept { return _hash; }
/** /**
* Compute a hash of this identity's public and private keys. * Compute a hash of this identity's public and private keys.
@ -248,7 +248,7 @@ private:
void _computeHash(); void _computeHash();
Address _address; Address _address;
H<384> _hash; Hash<384> _hash;
ZT_PACKED_STRUCT(struct { // don't re-order these ZT_PACKED_STRUCT(struct { // don't re-order these
uint8_t c25519[ZT_C25519_PRIVATE_KEY_LEN]; uint8_t c25519[ZT_C25519_PRIVATE_KEY_LEN];
uint8_t p384[ZT_ECC384_PRIVATE_KEY_SIZE]; uint8_t p384[ZT_ECC384_PRIVATE_KEY_SIZE];

View file

@ -24,6 +24,13 @@ namespace ZeroTier {
/** /**
* Transfer rate and total transferred amount meter * Transfer rate and total transferred amount meter
* *
* This class is lock-free and thread-safe.
*
* This maintains a set of buckets numbered according to the current time
* modulo TUNIT. Each bucket is incremented within that time window. When
* the time moves on to a new bucket, its old contents are added to a
* total accumulator and a new counter for that bucket starts.
*
* @tparam TUNIT Unit of time in milliseconds (default: 1000 for one second) * @tparam TUNIT Unit of time in milliseconds (default: 1000 for one second)
* @tparam LSIZE Log size in units of time (default: 10 for 10s worth of data) * @tparam LSIZE Log size in units of time (default: 10 for 10s worth of data)
*/ */

View file

@ -33,13 +33,12 @@
#endif #endif
#endif #endif
#if defined(_WIN32) || defined(_WIN64)
#ifdef _MSC_VER #ifdef _MSC_VER
#pragma warning(disable : 4290) #pragma warning(disable : 4290)
#pragma warning(disable : 4996) #pragma warning(disable : 4996)
#pragma warning(disable : 4101) #pragma warning(disable : 4101)
#endif #endif
#if defined(_WIN32) || defined(_WIN64)
#ifndef __WINDOWS__ #ifndef __WINDOWS__
#define __WINDOWS__ #define __WINDOWS__
#endif #endif
@ -81,20 +80,27 @@
#define __BSD__ #define __BSD__
#endif #endif
#include <sys/endian.h> #include <sys/endian.h>
#ifndef __BYTE_ORDER #if (!defined(__BYTE_ORDER)) && (defined(_BYTE_ORDER))
#define __BYTE_ORDER _BYTE_ORDER #define __BYTE_ORDER _BYTE_ORDER
#define __LITTLE_ENDIAN _LITTLE_ENDIAN #define __LITTLE_ENDIAN _LITTLE_ENDIAN
#define __BIG_ENDIAN _BIG_ENDIAN #define __BIG_ENDIAN _BIG_ENDIAN
#endif #endif
#endif #endif
#ifdef __NetBSD__ #ifdef __NetBSD__
#ifndef RTF_MULTICAST #ifndef RTF_MULTICAST
#define RTF_MULTICAST 0x20000000 #define RTF_MULTICAST 0x20000000
#endif #endif
#endif #endif
// Avoid unaligned type casts on all but x86/x64 architecture. #if (defined(__amd64) || defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || defined(__AMD64) || defined(__AMD64__) || defined(_M_X64))
#if (!(defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_AMD64) || defined(_M_X64) || defined(i386) || defined(__i386) || defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) || defined(_M_IX86) || defined(__X86__) || defined(_X86_) || defined(__I86__) || defined(__INTEL__) || defined(__386))) #define ZT_ARCH_X64
#endif
// As far as we know it's only generally safe to do unaligned type casts in all
// cases on x86 and x64 architectures. Others such as ARM and MIPS will generate
// a fault or exhibit undefined behavior that varies by vendor.
#if (!(defined(ZT_ARCH_X64) || defined(i386) || defined(__i386) || defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__) || defined(_M_IX86) || defined(__X86__) || defined(_X86_) || defined(__I86__) || defined(__INTEL__) || defined(__386)))
#ifndef ZT_NO_UNALIGNED_ACCESS #ifndef ZT_NO_UNALIGNED_ACCESS
#define ZT_NO_UNALIGNED_ACCESS #define ZT_NO_UNALIGNED_ACCESS
#endif #endif
@ -109,6 +115,10 @@
#define __LITTLE_ENDIAN 1234 #define __LITTLE_ENDIAN 1234
#define __BYTE_ORDER 1234 #define __BYTE_ORDER 1234
#endif #endif
// It would probably be safe to assume LE everywhere except on very specific architectures as there
// are few BE chips remaining in the wild that are powerful enough to run this, but for now we'll
// try to include endian.h and error out if it doesn't exist.
#ifndef __BYTE_ORDER #ifndef __BYTE_ORDER
#include <endian.h> #include <endian.h>
#endif #endif
@ -143,7 +153,10 @@
#endif #endif
#endif #endif
#ifndef __CPP11__ #ifndef __CPP11__
/* TODO: will need some kind of very basic atomic<> implemenation if we want to compile on pre-c++11 compilers */ // TODO: we'll need to "polyfill" a subset of std::atomic for integers if we want to build on pre-C++11 compilers.
// Beyond that defining nullptr, constexpr, and noexcept should allow us to still build on these. So far we've
// avoided deeper C++11 features like lambdas in the core until we're 100% sure all the ancient targets are gone.
#error need pre-c++11 std::atomic implementation
#define nullptr (0) #define nullptr (0)
#define constexpr ZT_ALWAYS_INLINE #define constexpr ZT_ALWAYS_INLINE
#define noexcept throw() #define noexcept throw()

View file

@ -30,7 +30,7 @@
#include "Hashtable.hpp" #include "Hashtable.hpp"
#include "SharedPtr.hpp" #include "SharedPtr.hpp"
#include "ScopedPtr.hpp" #include "ScopedPtr.hpp"
#include "H.hpp" #include "Hash.hpp"
namespace ZeroTier { namespace ZeroTier {
@ -94,7 +94,7 @@ public:
* @param hash Identity hash * @param hash Identity hash
* @return Peer or NULL if no peer is currently in memory for this hash (cache is not checked in this case) * @return Peer or NULL if no peer is currently in memory for this hash (cache is not checked in this case)
*/ */
ZT_ALWAYS_INLINE SharedPtr<Peer> peerByHash(const H<384> &hash) ZT_ALWAYS_INLINE SharedPtr<Peer> peerByHash(const Hash<384> &hash)
{ {
RWMutex::RLock _l(_peers_l); RWMutex::RLock _l(_peers_l);
const SharedPtr<Peer> *const ap = _peersByIdentityHash.get(hash); const SharedPtr<Peer> *const ap = _peersByIdentityHash.get(hash);
@ -365,7 +365,7 @@ private:
Hashtable< Address,SharedPtr<Peer> > _peers; Hashtable< Address,SharedPtr<Peer> > _peers;
Hashtable< uint64_t,SharedPtr<Peer> > _peersByIncomingProbe; Hashtable< uint64_t,SharedPtr<Peer> > _peersByIncomingProbe;
Hashtable< H<384>,SharedPtr<Peer> > _peersByIdentityHash; Hashtable< Hash<384>,SharedPtr<Peer> > _peersByIdentityHash;
Hashtable< uint64_t,SharedPtr<Path> > _paths; Hashtable< uint64_t,SharedPtr<Path> > _paths;
std::set< Identity > _roots; // locked by _peers_l std::set< Identity > _roots; // locked by _peers_l
std::vector< SharedPtr<Peer> > _rootPeers; // locked by _peers_l std::vector< SharedPtr<Peer> > _rootPeers; // locked by _peers_l

View file

@ -25,7 +25,7 @@
#include "Constants.hpp" #include "Constants.hpp"
#if (defined(__amd64) || defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || defined(__AMD64) || defined(__AMD64__) || defined(_M_X64)) #ifdef ZT_ARCH_X64
#include <emmintrin.h> #include <emmintrin.h>
#include <xmmintrin.h> #include <xmmintrin.h>
#include <immintrin.h> #include <immintrin.h>
@ -35,7 +35,7 @@ namespace ZeroTier {
namespace Utils { namespace Utils {
#if (defined(__amd64) || defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || defined(__AMD64) || defined(__AMD64__) || defined(_M_X64)) #ifdef ZT_ARCH_X64
struct CPUIDRegisters struct CPUIDRegisters
{ {
uint32_t eax,ebx,ecx,edx; uint32_t eax,ebx,ecx,edx;