mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-04-21 14:36:55 +02:00
Wiring up addroot/removeroot
This commit is contained in:
parent
0de194dab3
commit
5c6bf9d0a4
15 changed files with 310 additions and 224 deletions
|
@ -34,7 +34,11 @@ func Status(basePath, authToken string, args []string, jsonOutput bool) {
|
|||
}
|
||||
fmt.Printf("%.10x: %s %s\n", uint64(status.Address), online, status.Version)
|
||||
fmt.Printf("\tidentity:\t%s\n", status.Identity.String())
|
||||
fmt.Printf("\tports:\tprimary: %d secondary: %d\n", status.Config.Settings.PrimaryPort, status.Config.Settings.SecondaryPort)
|
||||
if status.Config.Settings.SecondaryPort > 0 && status.Config.Settings.SecondaryPort < 65536 {
|
||||
fmt.Printf("\tports:\t%d %d\n", status.Config.Settings.PrimaryPort, status.Config.Settings.SecondaryPort)
|
||||
} else {
|
||||
fmt.Printf("\tports:\t%d\n", status.Config.Settings.PrimaryPort)
|
||||
}
|
||||
fmt.Printf("\tport search:\t%s\n", enabledDisabled(status.Config.Settings.PortSearch))
|
||||
fmt.Printf("\tport mapping (uPnP/NAT-PMP):\t%s\n", enabledDisabled(status.Config.Settings.PortMapping))
|
||||
fmt.Printf("\tblacklisted interface prefixes:\t")
|
||||
|
|
|
@ -134,7 +134,7 @@ func main() {
|
|||
case "peers", "listpeers":
|
||||
authTokenRequired(authToken)
|
||||
cli.Peers(basePath, authToken, cmdArgs, *jflag)
|
||||
case "roots", "listroots", "listmoons":
|
||||
case "roots", "listroots":
|
||||
authTokenRequired(authToken)
|
||||
cli.Roots(basePath, authToken, cmdArgs, *jflag)
|
||||
case "addroot":
|
||||
|
|
|
@ -226,6 +226,12 @@ func apiCheckAuth(out http.ResponseWriter, req *http.Request, token string) bool
|
|||
return false
|
||||
}
|
||||
|
||||
type peerMutableFields struct {
|
||||
Identity *Identity `json:"identity"`
|
||||
Role *int `json:"role"`
|
||||
Bootstrap *InetAddress `json:"bootstrap,omitempty"`
|
||||
}
|
||||
|
||||
// createAPIServer creates and starts an HTTP server for a given node
|
||||
func createAPIServer(basePath string, node *Node) (*http.Server, *http.Server, error) {
|
||||
// Read authorization token, automatically generating one if it's missing
|
||||
|
@ -360,12 +366,35 @@ func createAPIServer(basePath string, node *Node) (*http.Server, *http.Server, e
|
|||
}
|
||||
}
|
||||
|
||||
// Right now POST/PUT is only used with peers to add or remove root servers.
|
||||
if req.Method == http.MethodPost || req.Method == http.MethodPut {
|
||||
if queriedID == 0 {
|
||||
_ = apiSendObj(out, req, http.StatusNotFound, &APIErr{"peer not found"})
|
||||
return
|
||||
}
|
||||
} else if req.Method == http.MethodGet || req.Method == http.MethodHead {
|
||||
var peerChanges peerMutableFields
|
||||
if apiReadObj(out, req, &peerChanges) == nil {
|
||||
if peerChanges.Role != nil || peerChanges.Bootstrap != nil {
|
||||
peers := node.Peers()
|
||||
for _, p := range peers {
|
||||
if p.Address == queriedID && (peerChanges.Identity == nil || peerChanges.Identity.Equals(p.Identity)) {
|
||||
if peerChanges.Role != nil && *peerChanges.Role != p.Role {
|
||||
if *peerChanges.Role == PeerRoleRoot {
|
||||
_ = node.AddRoot(p.Identity, peerChanges.Bootstrap)
|
||||
} else {
|
||||
node.RemoveRoot(p.Identity)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if req.Method == http.MethodGet || req.Method == http.MethodHead || req.Method == http.MethodPost || req.Method == http.MethodPut {
|
||||
peers := node.Peers()
|
||||
if queriedID != 0 {
|
||||
for _, p := range peers {
|
||||
|
|
|
@ -17,6 +17,7 @@ package zerotier
|
|||
import "C"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
@ -51,7 +52,7 @@ type Identity struct {
|
|||
func identityFinalizer(obj interface{}) {
|
||||
id, _ := obj.(*Identity)
|
||||
if id != nil && uintptr(id.cid) != 0 {
|
||||
defer C.ZT_Identity_delete(id.cid)
|
||||
C.ZT_Identity_delete(id.cid)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -72,12 +73,25 @@ func newIdentityFromCIdentity(cid unsafe.Pointer) (*Identity, error) {
|
|||
}
|
||||
|
||||
id.cid = cid
|
||||
|
||||
runtime.SetFinalizer(id, identityFinalizer)
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// cIdentity returns a pointer to the core ZT_Identity instance or nil/0 on error.
|
||||
func (id *Identity) cIdentity() unsafe.Pointer {
|
||||
if uintptr(id.cid) == 0 {
|
||||
idCStr := C.CString(id.String())
|
||||
defer C.free(unsafe.Pointer(idCStr))
|
||||
id.cid = C.ZT_Identity_fromString(idCStr)
|
||||
if uintptr(id.cid) == 0 {
|
||||
return nil
|
||||
}
|
||||
runtime.SetFinalizer(id, identityFinalizer)
|
||||
}
|
||||
return id.cid
|
||||
}
|
||||
|
||||
// NewIdentity generates a new identity of the selected type
|
||||
func NewIdentity(identityType int) (*Identity, error) {
|
||||
return newIdentityFromCIdentity(C.ZT_Identity_new(C.enum_ZT_Identity_Type(identityType)))
|
||||
|
@ -140,8 +154,6 @@ func NewIdentityFromString(s string) (*Identity, error) {
|
|||
|
||||
}
|
||||
|
||||
runtime.SetFinalizer(id, identityFinalizer)
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
|
@ -184,26 +196,18 @@ func (id *Identity) String() string {
|
|||
|
||||
// LocallyValidate performs local self-validation of this identity
|
||||
func (id *Identity) LocallyValidate() bool {
|
||||
id.cIdentity()
|
||||
if uintptr(id.cid) == 0 {
|
||||
idCStr := C.CString(id.String())
|
||||
defer C.free(unsafe.Pointer(idCStr))
|
||||
id.cid = C.ZT_Identity_fromString(idCStr)
|
||||
if uintptr(id.cid) == 0 {
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
return C.ZT_Identity_validate(id.cid) != 0
|
||||
}
|
||||
|
||||
// Sign signs a message with this identity
|
||||
func (id *Identity) Sign(msg []byte) ([]byte, error) {
|
||||
id.cIdentity()
|
||||
if uintptr(id.cid) == 0 {
|
||||
idCStr := C.CString(id.String())
|
||||
defer C.free(unsafe.Pointer(idCStr))
|
||||
id.cid = C.ZT_Identity_fromString(idCStr)
|
||||
if uintptr(id.cid) == 0 {
|
||||
return nil, ErrInvalidKey
|
||||
}
|
||||
return nil, ErrInvalidKey
|
||||
}
|
||||
|
||||
var dataP unsafe.Pointer
|
||||
|
@ -225,13 +229,9 @@ func (id *Identity) Verify(msg, sig []byte) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
id.cIdentity()
|
||||
if uintptr(id.cid) == 0 {
|
||||
idCStr := C.CString(id.String())
|
||||
defer C.free(unsafe.Pointer(idCStr))
|
||||
id.cid = C.ZT_Identity_fromString(idCStr)
|
||||
if uintptr(id.cid) == 0 {
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var dataP unsafe.Pointer
|
||||
|
@ -241,6 +241,17 @@ func (id *Identity) Verify(msg, sig []byte) bool {
|
|||
return C.ZT_Identity_verify(id.cid, dataP, C.uint(len(msg)), unsafe.Pointer(&sig[0]), C.uint(len(sig))) != 0
|
||||
}
|
||||
|
||||
// Equals performs a deep equality test between this and another identity
|
||||
func (id *Identity) Equals(id2 *Identity) bool {
|
||||
if id2 == nil {
|
||||
return id == nil
|
||||
}
|
||||
if id == nil {
|
||||
return false
|
||||
}
|
||||
return id.address == id2.address && id.idtype == id2.idtype && bytes.Equal(id.publicKey, id2.publicKey) && bytes.Equal(id.privateKey, id2.privateKey)
|
||||
}
|
||||
|
||||
// MarshalJSON marshals this Identity in its string format (private key is never included)
|
||||
func (id *Identity) MarshalJSON() ([]byte, error) {
|
||||
return []byte("\"" + id.String() + "\""), nil
|
||||
|
|
|
@ -74,8 +74,12 @@ func sockaddrStorageToUDPAddr2(ss unsafe.Pointer) *net.UDPAddr {
|
|||
return sockaddrStorageToUDPAddr((*C.struct_sockaddr_storage)(ss))
|
||||
}
|
||||
|
||||
func makeSockaddrStorage(ip net.IP, port int, ss *C.struct_sockaddr_storage) bool {
|
||||
func zeroSockaddrStorage(ss *C.struct_sockaddr_storage) {
|
||||
C.memset(unsafe.Pointer(ss), 0, C.sizeof_struct_sockaddr_storage)
|
||||
}
|
||||
|
||||
func makeSockaddrStorage(ip net.IP, port int, ss *C.struct_sockaddr_storage) bool {
|
||||
zeroSockaddrStorage(ss)
|
||||
if len(ip) == 4 {
|
||||
sa4 := (*C.struct_sockaddr_in)(unsafe.Pointer(ss))
|
||||
sa4.sin_family = syscall.AF_INET
|
||||
|
|
|
@ -542,6 +542,32 @@ func (n *Node) Leave(nwid NetworkID) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// AddRoot adds a root server with an optional bootstrap address for establishing first contact.
|
||||
// If you're already using roots backed by proper global LF data stores the bootstrap address may
|
||||
// be unnecessary as your node can probably find the new root automatically.
|
||||
func (n *Node) AddRoot(id *Identity, bootstrap *InetAddress) error {
|
||||
if id == nil {
|
||||
return ErrInvalidParameter
|
||||
}
|
||||
var cBootstrap C.struct_sockaddr_storage
|
||||
if bootstrap != nil {
|
||||
makeSockaddrStorage(bootstrap.IP, bootstrap.Port, &cBootstrap)
|
||||
} else {
|
||||
zeroSockaddrStorage(&cBootstrap)
|
||||
}
|
||||
C.ZT_Node_addRoot(n.zn, nil, id.cIdentity(), &cBootstrap)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveRoot removes a root server for this node.
|
||||
// This doesn't instantly close paths to the given peer or forget about it. It just
|
||||
// demotes it to a normal peer.
|
||||
func (n *Node) RemoveRoot(id *Identity) {
|
||||
if id != nil {
|
||||
C.ZT_Node_removeRoot(n.zn, nil, id.cIdentity())
|
||||
}
|
||||
}
|
||||
|
||||
// GetNetwork looks up a network by ID or returns nil if not joined
|
||||
func (n *Node) GetNetwork(nwid NetworkID) *Network {
|
||||
n.networksLock.RLock()
|
||||
|
@ -883,13 +909,10 @@ func goZtEvent(gn unsafe.Pointer, eventType C.int, data unsafe.Pointer) {
|
|||
if node == nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch eventType {
|
||||
case C.ZT_EVENT_OFFLINE:
|
||||
atomic.StoreUint32(&node.online, 0)
|
||||
case C.ZT_EVENT_ONLINE:
|
||||
atomic.StoreUint32(&node.online, 1)
|
||||
case C.ZT_EVENT_TRACE:
|
||||
node.handleTrace(C.GoString((*C.char)(data)))
|
||||
}
|
||||
}
|
||||
|
|
96
node/Buf.cpp
96
node/Buf.cpp
|
@ -13,104 +13,12 @@
|
|||
|
||||
#include "Buf.hpp"
|
||||
|
||||
#ifndef __GNUC__
|
||||
#include <atomic>
|
||||
#endif
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
#ifdef __GNUC__
|
||||
static uintptr_t s_pool = 0;
|
||||
uintptr_t Buf_pool = 0;
|
||||
#else
|
||||
static std::atomic<uintptr_t> s_pool(0);
|
||||
std::atomic<uintptr_t> Buf_pool(0);
|
||||
#endif
|
||||
|
||||
void Buf::operator delete(void *ptr,std::size_t sz)
|
||||
{
|
||||
if (ptr) {
|
||||
uintptr_t bb;
|
||||
const uintptr_t locked = ~((uintptr_t)0);
|
||||
for (;;) {
|
||||
#ifdef __GNUC__
|
||||
bb = __sync_fetch_and_or(&s_pool,locked); // get value of s_pool and "lock" by filling with all 1's
|
||||
#else
|
||||
bb = s_pool.fetch_or(locked);
|
||||
#endif
|
||||
if (bb != locked)
|
||||
break;
|
||||
}
|
||||
|
||||
((Buf *)ptr)->__nextInPool = bb;
|
||||
#ifdef __GNUC__
|
||||
__sync_fetch_and_and(&s_pool,(uintptr_t)ptr);
|
||||
#else
|
||||
s_pool.store((uintptr_t)ptr);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
SharedPtr<Buf> Buf::get()
|
||||
{
|
||||
uintptr_t bb;
|
||||
const uintptr_t locked = ~((uintptr_t)0);
|
||||
for (;;) {
|
||||
#ifdef __GNUC__
|
||||
bb = __sync_fetch_and_or(&s_pool,locked); // get value of s_pool and "lock" by filling with all 1's
|
||||
#else
|
||||
bb = s_pool.fetch_or(locked);
|
||||
#endif
|
||||
if (bb != locked)
|
||||
break;
|
||||
}
|
||||
|
||||
Buf *b;
|
||||
if (bb == 0) {
|
||||
#ifdef __GNUC__
|
||||
__sync_fetch_and_and(&s_pool,bb);
|
||||
#else
|
||||
s_pool.store(bb);
|
||||
#endif
|
||||
b = (Buf *)malloc(sizeof(Buf));
|
||||
if (!b)
|
||||
return SharedPtr<Buf>();
|
||||
} else {
|
||||
b = (Buf *)bb;
|
||||
#ifdef __GNUC__
|
||||
__sync_fetch_and_and(&s_pool,b->__nextInPool);
|
||||
#else
|
||||
s_pool.store(b->__nextInPool);
|
||||
#endif
|
||||
}
|
||||
|
||||
b->__refCount.zero();
|
||||
return SharedPtr<Buf>(b);
|
||||
}
|
||||
|
||||
void Buf::freePool()
|
||||
{
|
||||
uintptr_t bb;
|
||||
const uintptr_t locked = ~((uintptr_t)0);
|
||||
for (;;) {
|
||||
#ifdef __GNUC__
|
||||
bb = __sync_fetch_and_or(&s_pool,locked); // get value of s_pool and "lock" by filling with all 1's
|
||||
#else
|
||||
bb = s_pool.fetch_or(locked);
|
||||
#endif
|
||||
if (bb != locked)
|
||||
break;
|
||||
}
|
||||
|
||||
#ifdef __GNUC__
|
||||
__sync_fetch_and_and(&s_pool,(uintptr_t)0);
|
||||
#else
|
||||
s_pool.store((uintptr_t)0);
|
||||
#endif
|
||||
|
||||
while (bb != 0) {
|
||||
uintptr_t next = ((Buf *)bb)->__nextInPool;
|
||||
free((void *)bb);
|
||||
bb = next;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
|
210
node/Buf.hpp
210
node/Buf.hpp
|
@ -24,6 +24,10 @@
|
|||
#include <cstring>
|
||||
#include <cstdlib>
|
||||
|
||||
#ifndef __GNUC__
|
||||
#include <atomic>
|
||||
#endif
|
||||
|
||||
// Buffers are 16384 bytes in size because this is the smallest size that can hold any packet
|
||||
// and is a power of two. It needs to be a power of two because masking is significantly faster
|
||||
// than integer division modulus.
|
||||
|
@ -32,6 +36,12 @@
|
|||
|
||||
namespace ZeroTier {
|
||||
|
||||
#ifdef __GNUC__
|
||||
extern uintptr_t Buf_pool;
|
||||
#else
|
||||
extern std::atomic<uintptr_t> Buf_pool;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Buffer and methods for branch-free bounds-checked data assembly and parsing
|
||||
*
|
||||
|
@ -62,28 +72,93 @@ namespace ZeroTier {
|
|||
* reads to ensure that overflow did not occur.
|
||||
*
|
||||
* Buf uses a lock-free pool for extremely fast allocation and deallocation.
|
||||
*
|
||||
* Buf can optionally take a template parameter that will be placed in the 'data'
|
||||
* union as 'fields.' This must be a basic plain data type and must be no larger than
|
||||
* ZT_BUF_MEM_SIZE. It's typically a packed struct.
|
||||
*
|
||||
* @tparam U Type to overlap with data bytes in data union (can't be larger than ZT_BUF_MEM_SIZE)
|
||||
*/
|
||||
template<typename U = void>
|
||||
class Buf
|
||||
{
|
||||
friend class SharedPtr<Buf>;
|
||||
friend class SharedPtr< Buf<U> >;
|
||||
|
||||
private:
|
||||
// Direct construction isn't allowed; use get().
|
||||
ZT_ALWAYS_INLINE Buf()
|
||||
{}
|
||||
|
||||
ZT_ALWAYS_INLINE Buf(const Buf &b)
|
||||
{}
|
||||
template<typename X>
|
||||
ZT_ALWAYS_INLINE Buf(const Buf<X> &b)
|
||||
{ memcpy(data.bytes,b.data.bytes,ZT_BUF_MEM_SIZE); }
|
||||
|
||||
public:
|
||||
static void operator delete(void *ptr,std::size_t sz);
|
||||
static void operator delete(void *ptr,std::size_t sz)
|
||||
{
|
||||
if (ptr) {
|
||||
uintptr_t bb;
|
||||
const uintptr_t locked = ~((uintptr_t)0);
|
||||
for (;;) {
|
||||
#ifdef __GNUC__
|
||||
bb = __sync_fetch_and_or(&Buf_pool,locked); // get value of s_pool and "lock" by filling with all 1's
|
||||
#else
|
||||
bb = s_pool.fetch_or(locked);
|
||||
#endif
|
||||
if (bb != locked)
|
||||
break;
|
||||
}
|
||||
|
||||
((Buf *)ptr)->__nextInPool = bb;
|
||||
#ifdef __GNUC__
|
||||
__sync_fetch_and_and(&Buf_pool,(uintptr_t)ptr);
|
||||
#else
|
||||
s_pool.store((uintptr_t)ptr);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get obtains a buffer from the pool or allocates a new buffer if the pool is empty
|
||||
*
|
||||
* @return Buffer
|
||||
*/
|
||||
static SharedPtr<Buf> get();
|
||||
static ZT_ALWAYS_INLINE SharedPtr< Buf<U> > get()
|
||||
{
|
||||
uintptr_t bb;
|
||||
const uintptr_t locked = ~((uintptr_t)0);
|
||||
for (;;) {
|
||||
#ifdef __GNUC__
|
||||
bb = __sync_fetch_and_or(&Buf_pool,locked); // get value of s_pool and "lock" by filling with all 1's
|
||||
#else
|
||||
bb = s_pool.fetch_or(locked);
|
||||
#endif
|
||||
if (bb != locked)
|
||||
break;
|
||||
}
|
||||
|
||||
Buf *b;
|
||||
if (bb == 0) {
|
||||
#ifdef __GNUC__
|
||||
__sync_fetch_and_and(&Buf_pool,bb);
|
||||
#else
|
||||
s_pool.store(bb);
|
||||
#endif
|
||||
b = (Buf *)malloc(sizeof(Buf));
|
||||
if (!b)
|
||||
return SharedPtr<Buf>();
|
||||
} else {
|
||||
b = (Buf *)bb;
|
||||
#ifdef __GNUC__
|
||||
__sync_fetch_and_and(&Buf_pool,b->__nextInPool);
|
||||
#else
|
||||
s_pool.store(b->__nextInPool);
|
||||
#endif
|
||||
}
|
||||
|
||||
b->__refCount.zero();
|
||||
return SharedPtr<Buf>(b);
|
||||
}
|
||||
|
||||
/**
|
||||
* Free buffers in the pool
|
||||
|
@ -92,7 +167,32 @@ public:
|
|||
* and outstanding buffers will still be returned to the pool. This just
|
||||
* frees buffers currently held in reserve.
|
||||
*/
|
||||
static void freePool();
|
||||
static inline void freePool()
|
||||
{
|
||||
uintptr_t bb;
|
||||
const uintptr_t locked = ~((uintptr_t)0);
|
||||
for (;;) {
|
||||
#ifdef __GNUC__
|
||||
bb = __sync_fetch_and_or(&Buf_pool,locked); // get value of s_pool and "lock" by filling with all 1's
|
||||
#else
|
||||
bb = s_pool.fetch_or(locked);
|
||||
#endif
|
||||
if (bb != locked)
|
||||
break;
|
||||
}
|
||||
|
||||
#ifdef __GNUC__
|
||||
__sync_fetch_and_and(&Buf_pool,(uintptr_t)0);
|
||||
#else
|
||||
s_pool.store((uintptr_t)0);
|
||||
#endif
|
||||
|
||||
while (bb != 0) {
|
||||
uintptr_t next = ((Buf *)bb)->__nextInPool;
|
||||
free((void *)bb);
|
||||
bb = next;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for overflow beyond the size of the buffer
|
||||
|
@ -119,9 +219,19 @@ public:
|
|||
static ZT_ALWAYS_INLINE bool readOverflow(const int &ii,const unsigned int size)
|
||||
{ return ((ii - (int)size) > 0); }
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Read methods
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
template<typename X>
|
||||
ZT_ALWAYS_INLINE Buf &operator=(const Buf<X> &b) const
|
||||
{
|
||||
memcpy(data.bytes,b.data.bytes,ZT_BUF_MEM_SIZE);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zero memory
|
||||
*
|
||||
* For performance reasons Buf does not do this on every get().
|
||||
*/
|
||||
ZT_ALWAYS_INLINE void clear() { memset(data.bytes,0,ZT_BUF_MEM_SIZE); }
|
||||
|
||||
/**
|
||||
* Read a byte
|
||||
|
@ -132,7 +242,7 @@ public:
|
|||
ZT_ALWAYS_INLINE uint8_t rI8(int &ii) const
|
||||
{
|
||||
const unsigned int s = (unsigned int)ii++;
|
||||
return data[s & ZT_BUF_MEM_MASK];
|
||||
return data.bytes[s & ZT_BUF_MEM_MASK];
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -147,10 +257,10 @@ public:
|
|||
ii += 2;
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
return (
|
||||
((uint16_t)data[s] << 8U) |
|
||||
(uint16_t)data[s + 1]);
|
||||
((uint16_t)data.bytes[s] << 8U) |
|
||||
(uint16_t)data.bytes[s + 1]);
|
||||
#else
|
||||
return Utils::ntoh(*reinterpret_cast<const uint16_t *>(data + s));
|
||||
return Utils::ntoh(*reinterpret_cast<const uint16_t *>(data.bytes + s));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -166,12 +276,12 @@ public:
|
|||
ii += 4;
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
return (
|
||||
((uint32_t)data[s] << 24U) |
|
||||
((uint32_t)data[s + 1] << 16U) |
|
||||
((uint32_t)data[s + 2] << 8U) |
|
||||
(uint32_t)data[s + 3]);
|
||||
((uint32_t)data.bytes[s] << 24U) |
|
||||
((uint32_t)data.bytes[s + 1] << 16U) |
|
||||
((uint32_t)data.bytes[s + 2] << 8U) |
|
||||
(uint32_t)data.bytes[s + 3]);
|
||||
#else
|
||||
return Utils::ntoh(*reinterpret_cast<const uint32_t *>(data + s));
|
||||
return Utils::ntoh(*reinterpret_cast<const uint32_t *>(data.bytes + s));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -187,16 +297,16 @@ public:
|
|||
ii += 8;
|
||||
#ifdef ZT_NO_UNALIGNED_ACCESS
|
||||
return (
|
||||
((uint64_t)data[s] << 56U) |
|
||||
((uint64_t)data[s + 1] << 48U) |
|
||||
((uint64_t)data[s + 2] << 40U) |
|
||||
((uint64_t)data[s + 3] << 32U) |
|
||||
((uint64_t)data[s + 4] << 24U) |
|
||||
((uint64_t)data[s + 5] << 16U) |
|
||||
((uint64_t)data[s + 6] << 8U) |
|
||||
(uint64_t)data[s + 7]);
|
||||
((uint64_t)data.bytes[s] << 56U) |
|
||||
((uint64_t)data.bytes[s + 1] << 48U) |
|
||||
((uint64_t)data.bytes[s + 2] << 40U) |
|
||||
((uint64_t)data.bytes[s + 3] << 32U) |
|
||||
((uint64_t)data.bytes[s + 4] << 24U) |
|
||||
((uint64_t)data.bytes[s + 5] << 16U) |
|
||||
((uint64_t)data.bytes[s + 6] << 8U) |
|
||||
(uint64_t)data.bytes[s + 7]);
|
||||
#else
|
||||
return Utils::ntoh(*reinterpret_cast<const uint64_t *>(data + s));
|
||||
return Utils::ntoh(*reinterpret_cast<const uint64_t *>(data.bytes + s));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -219,7 +329,7 @@ public:
|
|||
ZT_ALWAYS_INLINE int rO(int &ii,T &obj) const
|
||||
{
|
||||
if (ii < ZT_BUF_MEM_SIZE) {
|
||||
int ms = obj.unmarshal(data + ii,ZT_BUF_MEM_SIZE - ii);
|
||||
int ms = obj.unmarshal(data.bytes + ii,ZT_BUF_MEM_SIZE - ii);
|
||||
if (ms > 0)
|
||||
ii += ms;
|
||||
return ms;
|
||||
|
@ -240,10 +350,10 @@ public:
|
|||
*/
|
||||
ZT_ALWAYS_INLINE char *rS(int &ii,char *const buf,const unsigned int bufSize) const
|
||||
{
|
||||
const char *const s = (const char *)(data + ii);
|
||||
const char *const s = (const char *)(data.bytes + ii);
|
||||
const int sii = ii;
|
||||
while (ii < ZT_BUF_MEM_SIZE) {
|
||||
if (data[ii++] == 0) {
|
||||
if (data.bytes[ii++] == 0) {
|
||||
memcpy(buf,s,ii - sii);
|
||||
return buf;
|
||||
}
|
||||
|
@ -266,9 +376,9 @@ public:
|
|||
*/
|
||||
ZT_ALWAYS_INLINE const char *rSnc(int &ii) const
|
||||
{
|
||||
const char *const s = (const char *)(data + ii);
|
||||
const char *const s = (const char *)(data.bytes + ii);
|
||||
while (ii < ZT_BUF_MEM_SIZE) {
|
||||
if (data[ii++] == 0)
|
||||
if (data.bytes[ii++] == 0)
|
||||
return s;
|
||||
}
|
||||
return nullptr;
|
||||
|
@ -287,7 +397,7 @@ public:
|
|||
*/
|
||||
ZT_ALWAYS_INLINE void *rB(int &ii,void *bytes,unsigned int len) const
|
||||
{
|
||||
const void *const b = (const void *)(data + ii);
|
||||
const void *const b = (const void *)(data.bytes + ii);
|
||||
if ((ii += (int)len) <= ZT_BUF_MEM_SIZE) {
|
||||
memcpy(bytes,b,len);
|
||||
return bytes;
|
||||
|
@ -310,14 +420,10 @@ public:
|
|||
*/
|
||||
ZT_ALWAYS_INLINE const void *rBnc(int &ii,unsigned int len) const
|
||||
{
|
||||
const void *const b = (const void *)(data + ii);
|
||||
const void *const b = (const void *)(data.bytes + ii);
|
||||
return ((ii += (int)len) <= ZT_BUF_MEM_SIZE) ? b : nullptr;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Write methods
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* Write a byte
|
||||
*
|
||||
|
@ -344,7 +450,7 @@ public:
|
|||
data[s] = (uint8_t)(n >> 8U);
|
||||
data[s + 1] = (uint8_t)n;
|
||||
#else
|
||||
*reinterpret_cast<uint16_t *>(data + s) = Utils::hton(n);
|
||||
*reinterpret_cast<uint16_t *>(data.bytes + s) = Utils::hton(n);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -364,7 +470,7 @@ public:
|
|||
data[s + 2] = (uint8_t)(n >> 8U);
|
||||
data[s + 3] = (uint8_t)n;
|
||||
#else
|
||||
*reinterpret_cast<uint32_t *>(data + s) = Utils::hton(n);
|
||||
*reinterpret_cast<uint32_t *>(data.bytes + s) = Utils::hton(n);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -388,7 +494,7 @@ public:
|
|||
data[s + 6] = (uint8_t)(n >> 8U);
|
||||
data[s + 7] = (uint8_t)n;
|
||||
#else
|
||||
*reinterpret_cast<uint64_t *>(data + s) = Utils::hton(n);
|
||||
*reinterpret_cast<uint64_t *>(data.bytes + s) = Utils::hton(n);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -404,7 +510,7 @@ public:
|
|||
{
|
||||
const unsigned int s = (unsigned int)ii;
|
||||
if ((s + T::marshalSizeMax()) <= ZT_BUF_MEM_SIZE) {
|
||||
int ms = t.marshal(data + s);
|
||||
int ms = t.marshal(data.bytes + s);
|
||||
if (ms > 0)
|
||||
ii += ms;
|
||||
} else {
|
||||
|
@ -442,28 +548,22 @@ public:
|
|||
{
|
||||
unsigned int s = (unsigned int)ii;
|
||||
if ((ii += (int)len) <= ZT_BUF_MEM_SIZE)
|
||||
memcpy(data + s,bytes,len);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
ZT_ALWAYS_INLINE Buf &operator=(const Buf &b)
|
||||
{
|
||||
if (&b != this)
|
||||
memcpy(data,b.data,ZT_BUF_MEM_SIZE);
|
||||
return *this;
|
||||
memcpy(data.bytes + s,bytes,len);
|
||||
}
|
||||
|
||||
/**
|
||||
* Raw buffer
|
||||
* Raw data and fields (if U template parameter is set)
|
||||
*
|
||||
* The extra eight bytes permit silent overflow of integer types without reading or writing
|
||||
* beyond Buf's memory and without branching or extra masks. They can be ignored otherwise.
|
||||
*/
|
||||
uint8_t data[ZT_BUF_MEM_SIZE + 8];
|
||||
ZT_PACKED_STRUCT(union {
|
||||
uint8_t bytes[ZT_BUF_MEM_SIZE + 8];
|
||||
U fields;
|
||||
}) data;
|
||||
|
||||
private:
|
||||
volatile uintptr_t __nextInPool;
|
||||
volatile uintptr_t __nextInPool; // next item in free pool if this Buf is in Buf_pool
|
||||
AtomicCounter __refCount;
|
||||
};
|
||||
|
||||
|
|
|
@ -233,9 +233,6 @@
|
|||
|
||||
// Exceptions thrown in core ZT code
|
||||
#define ZT_EXCEPTION_OUT_OF_BOUNDS 100
|
||||
#define ZT_EXCEPTION_OUT_OF_MEMORY 101
|
||||
#define ZT_EXCEPTION_PRIVATE_KEY_REQUIRED 102
|
||||
#define ZT_EXCEPTION_INVALID_ARGUMENT 103
|
||||
#define ZT_EXCEPTION_INVALID_SERIALIZED_DATA_INVALID_TYPE 200
|
||||
#define ZT_EXCEPTION_INVALID_SERIALIZED_DATA_OVERFLOW 201
|
||||
#define ZT_EXCEPTION_INVALID_SERIALIZED_DATA_INVALID_CRYPTOGRAPHIC_TOKEN 202
|
||||
|
|
|
@ -83,7 +83,7 @@ int Locator::unmarshal(const uint8_t *restrict data,const int len)
|
|||
if (ec > ZT_LOCATOR_MAX_ENDPOINTS)
|
||||
return -1;
|
||||
_endpointCount = ec;
|
||||
for (int i = 0; i < ec; ++i) {
|
||||
for (unsigned int i = 0; i < ec; ++i) {
|
||||
int tmp = _at[i].unmarshal(data + p,len - p);
|
||||
if (tmp < 0)
|
||||
return -1;
|
||||
|
@ -97,7 +97,7 @@ int Locator::unmarshal(const uint8_t *restrict data,const int len)
|
|||
if (sl > ZT_SIGNATURE_BUFFER_SIZE)
|
||||
return -1;
|
||||
_signatureLength = sl;
|
||||
if ((p + sl) > len)
|
||||
if ((p + (int)sl) > len)
|
||||
return -1;
|
||||
memcpy(_signature,data + p,sl);
|
||||
p += (int)sl;
|
||||
|
|
71
node/MAC.hpp
71
node/MAC.hpp
|
@ -35,11 +35,11 @@ public:
|
|||
ZT_ALWAYS_INLINE MAC(const MAC &m) : _m(m._m) {}
|
||||
|
||||
ZT_ALWAYS_INLINE MAC(const unsigned char a,const unsigned char b,const unsigned char c,const unsigned char d,const unsigned char e,const unsigned char f) :
|
||||
_m( ((((uint64_t)a) & 0xffULL) << 40) |
|
||||
((((uint64_t)b) & 0xffULL) << 32) |
|
||||
((((uint64_t)c) & 0xffULL) << 24) |
|
||||
((((uint64_t)d) & 0xffULL) << 16) |
|
||||
((((uint64_t)e) & 0xffULL) << 8) |
|
||||
_m( ((((uint64_t)a) & 0xffULL) << 40U) |
|
||||
((((uint64_t)b) & 0xffULL) << 32U) |
|
||||
((((uint64_t)c) & 0xffULL) << 24U) |
|
||||
((((uint64_t)d) & 0xffULL) << 16U) |
|
||||
((((uint64_t)e) & 0xffULL) << 8U) |
|
||||
(((uint64_t)f) & 0xffULL) ) {}
|
||||
ZT_ALWAYS_INLINE MAC(const void *bits,unsigned int len) { setTo(bits,len); }
|
||||
ZT_ALWAYS_INLINE MAC(const Address &ztaddr,uint64_t nwid) { fromAddress(ztaddr,nwid); }
|
||||
|
@ -70,13 +70,13 @@ public:
|
|||
_m = 0ULL;
|
||||
return;
|
||||
}
|
||||
const unsigned char *b = (const unsigned char *)bits;
|
||||
_m = ((((uint64_t)*b) & 0xff) << 40); ++b;
|
||||
_m |= ((((uint64_t)*b) & 0xff) << 32); ++b;
|
||||
_m |= ((((uint64_t)*b) & 0xff) << 24); ++b;
|
||||
_m |= ((((uint64_t)*b) & 0xff) << 16); ++b;
|
||||
_m |= ((((uint64_t)*b) & 0xff) << 8); ++b;
|
||||
_m |= (((uint64_t)*b) & 0xff);
|
||||
const uint8_t *const b = (const uint8_t *)bits;
|
||||
_m = (uint64_t)b[0] << 40U;
|
||||
_m |= (uint64_t)b[1] << 32U;
|
||||
_m |= (uint64_t)b[2] << 24U;
|
||||
_m |= (uint64_t)b[3] << 16U;
|
||||
_m |= (uint64_t)b[4] << 8U;
|
||||
_m |= (uint64_t)b[5];
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -87,13 +87,13 @@ public:
|
|||
{
|
||||
if (len < 6)
|
||||
return;
|
||||
unsigned char *b = (unsigned char *)buf;
|
||||
*(b++) = (unsigned char)((_m >> 40) & 0xff);
|
||||
*(b++) = (unsigned char)((_m >> 32) & 0xff);
|
||||
*(b++) = (unsigned char)((_m >> 24) & 0xff);
|
||||
*(b++) = (unsigned char)((_m >> 16) & 0xff);
|
||||
*(b++) = (unsigned char)((_m >> 8) & 0xff);
|
||||
*b = (unsigned char)(_m & 0xff);
|
||||
uint8_t *const b = (uint8_t *)buf;
|
||||
b[0] = (uint8_t)(_m >> 40U);
|
||||
b[1] = (uint8_t)(_m >> 32U);
|
||||
b[2] = (uint8_t)(_m >> 24U);
|
||||
b[3] = (uint8_t)(_m >> 16U);
|
||||
b[4] = (uint8_t)(_m >> 8U);
|
||||
b[5] = (uint8_t)_m;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -104,7 +104,7 @@ public:
|
|||
template<unsigned int C>
|
||||
ZT_ALWAYS_INLINE void appendTo(Buffer<C> &b) const
|
||||
{
|
||||
unsigned char *p = (unsigned char *)b.appendField(6);
|
||||
uint8_t *p = (uint8_t *)b.appendField(6);
|
||||
*(p++) = (unsigned char)((_m >> 40) & 0xff);
|
||||
*(p++) = (unsigned char)((_m >> 32) & 0xff);
|
||||
*(p++) = (unsigned char)((_m >> 24) & 0xff);
|
||||
|
@ -123,11 +123,6 @@ public:
|
|||
*/
|
||||
ZT_ALWAYS_INLINE bool isMulticast() const { return ((_m & 0x010000000000ULL) != 0ULL); }
|
||||
|
||||
/**
|
||||
* @param True if this is a locally-administered MAC
|
||||
*/
|
||||
ZT_ALWAYS_INLINE bool isLocallyAdministered() const { return ((_m & 0x020000000000ULL) != 0ULL); }
|
||||
|
||||
/**
|
||||
* Set this MAC to a MAC derived from an address and a network ID
|
||||
*
|
||||
|
@ -187,25 +182,25 @@ public:
|
|||
|
||||
ZT_ALWAYS_INLINE unsigned long hashCode() const { return (unsigned long)_m; }
|
||||
|
||||
inline char *toString(char buf[18]) const
|
||||
ZT_ALWAYS_INLINE char *toString(char buf[18]) const
|
||||
{
|
||||
buf[0] = Utils::HEXCHARS[(_m >> 44) & 0xf];
|
||||
buf[1] = Utils::HEXCHARS[(_m >> 40) & 0xf];
|
||||
buf[0] = Utils::HEXCHARS[(_m >> 44U) & 0xfU];
|
||||
buf[1] = Utils::HEXCHARS[(_m >> 40U) & 0xfU];
|
||||
buf[2] = ':';
|
||||
buf[3] = Utils::HEXCHARS[(_m >> 36) & 0xf];
|
||||
buf[4] = Utils::HEXCHARS[(_m >> 32) & 0xf];
|
||||
buf[3] = Utils::HEXCHARS[(_m >> 36U) & 0xfU];
|
||||
buf[4] = Utils::HEXCHARS[(_m >> 32U) & 0xfU];
|
||||
buf[5] = ':';
|
||||
buf[6] = Utils::HEXCHARS[(_m >> 28) & 0xf];
|
||||
buf[7] = Utils::HEXCHARS[(_m >> 24) & 0xf];
|
||||
buf[6] = Utils::HEXCHARS[(_m >> 28U) & 0xfU];
|
||||
buf[7] = Utils::HEXCHARS[(_m >> 24U) & 0xfU];
|
||||
buf[8] = ':';
|
||||
buf[9] = Utils::HEXCHARS[(_m >> 20) & 0xf];
|
||||
buf[10] = Utils::HEXCHARS[(_m >> 16) & 0xf];
|
||||
buf[9] = Utils::HEXCHARS[(_m >> 20U) & 0xfU];
|
||||
buf[10] = Utils::HEXCHARS[(_m >> 16U) & 0xfU];
|
||||
buf[11] = ':';
|
||||
buf[12] = Utils::HEXCHARS[(_m >> 12) & 0xf];
|
||||
buf[13] = Utils::HEXCHARS[(_m >> 8) & 0xf];
|
||||
buf[12] = Utils::HEXCHARS[(_m >> 12U) & 0xfU];
|
||||
buf[13] = Utils::HEXCHARS[(_m >> 8U) & 0xfU];
|
||||
buf[14] = ':';
|
||||
buf[15] = Utils::HEXCHARS[(_m >> 4) & 0xf];
|
||||
buf[16] = Utils::HEXCHARS[_m & 0xf];
|
||||
buf[15] = Utils::HEXCHARS[(_m >> 4U) & 0xfU];
|
||||
buf[16] = Utils::HEXCHARS[_m & 0xfU];
|
||||
buf[17] = (char)0;
|
||||
return buf;
|
||||
}
|
||||
|
|
|
@ -254,6 +254,21 @@
|
|||
*/
|
||||
#define ZT_PROTO_NODE_META_WILL_RELAY_TO "r"
|
||||
|
||||
/**
|
||||
* X coordinate of your node (sent in OK(HELLO))
|
||||
*/
|
||||
#define ZT_PROTO_NODE_META_LOCATION_X "gX"
|
||||
|
||||
/**
|
||||
* Y coordinate of your node (sent in OK(HELLO))
|
||||
*/
|
||||
#define ZT_PROTO_NODE_META_LOCATION_Y "gY"
|
||||
|
||||
/**
|
||||
* Z coordinate of your node (sent in OK(HELLO))
|
||||
*/
|
||||
#define ZT_PROTO_NODE_META_LOCATION_Z "gZ"
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
namespace ZeroTier {
|
||||
|
|
|
@ -59,7 +59,7 @@ public:
|
|||
*
|
||||
* @param renv Runtime environment
|
||||
*/
|
||||
Peer(const RuntimeEnvironment *renv);
|
||||
explicit Peer(const RuntimeEnvironment *renv);
|
||||
|
||||
ZT_ALWAYS_INLINE ~Peer() { Utils::burn(_key,sizeof(_key)); }
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ public:
|
|||
* @param signer Signing identity, must have private key
|
||||
* @return True if signature was successful
|
||||
*/
|
||||
inline bool sign(const Identity &signer)
|
||||
ZT_ALWAYS_INLINE bool sign(const Identity &signer)
|
||||
{
|
||||
if (signer.hasPrivate()) {
|
||||
Buffer<sizeof(Revocation) + 64> tmp;
|
||||
|
|
|
@ -85,7 +85,7 @@ static unsigned long _Utils_itoa(unsigned long n,char *s)
|
|||
unsigned long pos = _Utils_itoa(n / 10,s);
|
||||
if (pos >= 22) // sanity check,should be impossible
|
||||
pos = 22;
|
||||
s[pos] = '0' + (char)(n % 10);
|
||||
s[pos] = (char)('0' + (n % 10));
|
||||
return pos + 1;
|
||||
}
|
||||
char *decimal(unsigned long n,char s[24])
|
||||
|
|
Loading…
Add table
Reference in a new issue