mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-06-05 20:13:44 +02:00
Try to ignore temporary IPv6 addresses, fix Peer serialization, add an endpoint cache to Peer, some more Go stuff.
This commit is contained in:
parent
8a6ef33c4a
commit
a4ae4941c3
14 changed files with 315 additions and 201 deletions
|
@ -27,6 +27,10 @@
|
|||
#include <unistd.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/un.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <ifaddrs.h>
|
||||
#include <netinet6/in6_var.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <netinet/in.h>
|
||||
#include <errno.h>
|
||||
|
@ -684,3 +688,26 @@ extern "C" void ZT_GoTap_setMtu(ZT_GoTap *tap,unsigned int mtu)
|
|||
{
|
||||
reinterpret_cast<EthernetTap *>(tap)->setMtu(mtu);
|
||||
}
|
||||
|
||||
extern "C" int ZT_isTemporaryV6Address(const char *ifname,const struct sockaddr_storage *a)
|
||||
{
|
||||
#ifndef __WINDOWS__
|
||||
static ZT_SOCKET s_tmpV6Socket = ZT_INVALID_SOCKET;
|
||||
static std::mutex s_lock;
|
||||
std::lock_guard<std::mutex> l(s_lock);
|
||||
if (s_tmpV6Socket == ZT_INVALID_SOCKET) {
|
||||
s_tmpV6Socket = socket(AF_INET6,SOCK_DGRAM,0);
|
||||
if (s_tmpV6Socket <= 0)
|
||||
return 0;
|
||||
}
|
||||
struct in6_ifreq ifr;
|
||||
strncpy(ifr.ifr_name,ifname,sizeof(ifr.ifr_name));
|
||||
memcpy(&(ifr.ifr_addr),a,sizeof(sockaddr_in6));
|
||||
if (ioctl(s_tmpV6Socket,SIOCGIFAFLAG_IN6,&ifr) < 0) {
|
||||
return 0;
|
||||
}
|
||||
return ((ifr.ifr_ifru.ifru_flags6 & IN6_IFF_TEMPORARY) != 0) ? 1 : 0;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -48,6 +48,8 @@ void ZT_GoTap_deviceName(ZT_GoTap *tap,char nbuf[256]);
|
|||
void ZT_GoTap_setFriendlyName(ZT_GoTap *tap,const char *friendlyName);
|
||||
void ZT_GoTap_setMtu(ZT_GoTap *tap,unsigned int mtu);
|
||||
|
||||
int ZT_isTemporaryV6Address(const char *ifname,const struct sockaddr_storage *a);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -7,6 +7,7 @@ package zerotier
|
|||
// static inline const struct sockaddr_storage *_getSS(const ZT_Endpoint *ep) { return &(ep->value.ss); }
|
||||
// static inline void _setSS(ZT_Endpoint *ep,const void *ss) { memcpy(&(ep->value.ss),ss,sizeof(struct sockaddr_storage)); }
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"unsafe"
|
||||
|
@ -131,3 +132,7 @@ func (ep *Endpoint) UnmarshalJSON(j []byte) error {
|
|||
*ep = *ep2
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ep *Endpoint) setFromCEndpoint(cp *C.ZT_Endpoint) {
|
||||
ep.cep = *cp
|
||||
}
|
||||
|
|
|
@ -18,14 +18,15 @@ import "C"
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type Fingerprint struct {
|
||||
Address Address `json:"address"`
|
||||
Hash []byte `json:"hash"`
|
||||
Address Address
|
||||
Hash []byte
|
||||
}
|
||||
|
||||
func NewFingerprintFromString(fps string) (*Fingerprint, error) {
|
||||
|
@ -82,3 +83,19 @@ func (fp *Fingerprint) cFingerprint() *C.ZT_Fingerprint {
|
|||
copy((*[48]byte)(unsafe.Pointer(&apifp.hash[0]))[:], fp.Hash[:])
|
||||
return &apifp
|
||||
}
|
||||
|
||||
func (fp *Fingerprint) MarshalJSON() ([]byte, error) {
|
||||
return []byte("\"" + fp.String() + "\""), nil
|
||||
}
|
||||
|
||||
func (fp *Fingerprint) UnmarshalJSON(j []byte) error {
|
||||
var s string
|
||||
err := json.Unmarshal(j, &s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fp2, err := NewFingerprintFromString(s)
|
||||
fp.Address = fp2.Address
|
||||
fp.Hash = fp2.Hash
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -197,34 +197,6 @@ func (ina *InetAddress) UnmarshalJSON(j []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (ina *InetAddress) unmarshalZT(b []byte) (int, error) {
|
||||
if len(b) <= 0 {
|
||||
return 0, ErrInvalidInetAddress
|
||||
}
|
||||
switch b[0] {
|
||||
case 0:
|
||||
ina.IP = nil
|
||||
ina.Port = 0
|
||||
return 1, nil
|
||||
case 4:
|
||||
if len(b) != 7 {
|
||||
return 0, ErrInvalidInetAddress
|
||||
}
|
||||
ina.IP = []byte{b[1], b[2], b[3], b[4]}
|
||||
ina.Port = int(binary.BigEndian.Uint16(b[5:7]))
|
||||
return 7, nil
|
||||
case 6:
|
||||
if len(b) != 19 {
|
||||
return 0, ErrInvalidInetAddress
|
||||
}
|
||||
ina.IP = append(make([]byte, 0, 16), b[1:17]...)
|
||||
ina.Port = int(binary.BigEndian.Uint16(b[17:19]))
|
||||
return 19, nil
|
||||
default:
|
||||
return 0, ErrInvalidInetAddress
|
||||
}
|
||||
}
|
||||
|
||||
// key returns a short array suitable for use as a map[] key for this IP
|
||||
func (ina *InetAddress) key() (k [3]uint64) {
|
||||
copy(((*[16]byte)(unsafe.Pointer(&k[0])))[:], ina.IP)
|
||||
|
|
|
@ -40,9 +40,6 @@ type LocalConfigSettings struct {
|
|||
// SecondaryPort is the secondary UDP port, set to 0 to disable (picked at random by default)
|
||||
SecondaryPort int `json:"secondaryPort"`
|
||||
|
||||
// PortSearch causes ZeroTier to try other ports automatically if it can't bind to configured ports
|
||||
PortSearch bool `json:"portSearch"`
|
||||
|
||||
// PortMapping enables uPnP and NAT-PMP support
|
||||
PortMapping bool `json:"portMapping"`
|
||||
|
||||
|
@ -77,7 +74,7 @@ type LocalConfig struct {
|
|||
}
|
||||
|
||||
// Read this local config from a file, initializing to defaults if the file does not exist.
|
||||
func (lc *LocalConfig) Read(p string, saveDefaultsIfNotExist bool, isTotallyNewNode bool) error {
|
||||
func (lc *LocalConfig) Read(p string, saveDefaultsIfNotExist, isTotallyNewNode bool) error {
|
||||
// Initialize defaults, which may be replaced if we read a file from disk.
|
||||
if !lc.initialized {
|
||||
lc.initialized = true
|
||||
|
@ -88,7 +85,6 @@ func (lc *LocalConfig) Read(p string, saveDefaultsIfNotExist bool, isTotallyNewN
|
|||
|
||||
lc.Settings.PrimaryPort = 9993
|
||||
lc.Settings.SecondaryPort = unassignedPrivilegedPorts[randomUInt()%uint(len(unassignedPrivilegedPorts))]
|
||||
lc.Settings.PortSearch = true
|
||||
lc.Settings.PortMapping = true
|
||||
lc.Settings.LogSizeMax = 128
|
||||
|
||||
|
|
|
@ -17,18 +17,17 @@ package zerotier
|
|||
import "C"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type Locator struct {
|
||||
cl unsafe.Pointer
|
||||
}
|
||||
|
||||
func locatorFinalizer(obj interface{}) {
|
||||
if obj != nil {
|
||||
C.ZT_Locator_delete(obj.(Locator).cl)
|
||||
}
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Fingerprint *Fingerprint `json:"fingerprint"`
|
||||
Endpoints []Endpoint `json:"endpoints"`
|
||||
String string `json:"string"`
|
||||
cl unsafe.Pointer
|
||||
}
|
||||
|
||||
func NewLocator(ts int64, endpoints []Endpoint, signer *Identity) (*Locator, error) {
|
||||
|
@ -44,9 +43,10 @@ func NewLocator(ts int64, endpoints []Endpoint, signer *Identity) (*Locator, err
|
|||
if uintptr(loc) == 0 {
|
||||
return nil, ErrInvalidParameter
|
||||
}
|
||||
|
||||
goloc := new(Locator)
|
||||
goloc.cl = unsafe.Pointer(loc)
|
||||
runtime.SetFinalizer(goloc, locatorFinalizer)
|
||||
goloc.init()
|
||||
return goloc, nil
|
||||
}
|
||||
|
||||
|
@ -58,9 +58,10 @@ func NewLocatorFromBytes(lb []byte) (*Locator, error) {
|
|||
if uintptr(loc) == 0 {
|
||||
return nil, ErrInvalidParameter
|
||||
}
|
||||
|
||||
goloc := new(Locator)
|
||||
goloc.cl = unsafe.Pointer(loc)
|
||||
runtime.SetFinalizer(goloc, locatorFinalizer)
|
||||
goloc.init()
|
||||
return goloc, nil
|
||||
}
|
||||
|
||||
|
@ -74,41 +75,73 @@ func NewLocatorFromString(s string) (*Locator, error) {
|
|||
if loc == nil {
|
||||
return nil, ErrInvalidParameter
|
||||
}
|
||||
|
||||
goloc := new(Locator)
|
||||
goloc.cl = unsafe.Pointer(loc)
|
||||
runtime.SetFinalizer(goloc, locatorFinalizer)
|
||||
goloc.init()
|
||||
return goloc, nil
|
||||
}
|
||||
|
||||
// GetInfo gets information about this locator, also validating its signature if 'id' is non-nil.
|
||||
// If 'id' is nil the 'valid' return value is undefined.
|
||||
func (loc *Locator) GetInfo(id *Identity) (ts int64, fp *Fingerprint, eps []Endpoint, valid bool, err error) {
|
||||
ts = int64(C.ZT_Locator_timestamp(loc.cl))
|
||||
cfp := C.ZT_Locator_fingerprint(loc.cl)
|
||||
if uintptr(unsafe.Pointer(cfp)) == 0 {
|
||||
err = ErrInternal
|
||||
return
|
||||
func (loc *Locator) Validate(id *Identity) bool {
|
||||
if id == nil {
|
||||
return false
|
||||
}
|
||||
fp = newFingerprintFromCFingerprint(cfp)
|
||||
epc := int(C.ZT_Locator_endpointCount(loc.cl))
|
||||
eps = make([]Endpoint, epc)
|
||||
for i := 0; i < epc; i++ {
|
||||
eps[i].cep = *C.ZT_Locator_endpoint(loc.cl, C.uint(i))
|
||||
}
|
||||
if id != nil {
|
||||
id.initCIdentityPtr()
|
||||
valid = C.ZT_Locator_verify(loc.cl, id.cid) != 0
|
||||
}
|
||||
return
|
||||
id.initCIdentityPtr()
|
||||
return C.ZT_Locator_verify(loc.cl, id.cid) != 0
|
||||
}
|
||||
|
||||
func (loc *Locator) String() string {
|
||||
func (loc *Locator) Bytes() []byte {
|
||||
var buf [4096]byte
|
||||
C.ZT_Locator_toString(loc.cl, (*C.char)(unsafe.Pointer(&buf[0])), 4096)
|
||||
for i := range buf {
|
||||
if buf[i] == 0 {
|
||||
return string(buf[0:i])
|
||||
}
|
||||
bl := C.ZT_Locator_marshal(loc.cl, unsafe.Pointer(&buf[0]), 4096)
|
||||
if bl <= 0 {
|
||||
return nil
|
||||
}
|
||||
return ""
|
||||
return buf[0:int(bl)]
|
||||
}
|
||||
|
||||
func (loc *Locator) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(loc)
|
||||
}
|
||||
|
||||
func (loc *Locator) UnmarshalJSON(j []byte) error {
|
||||
C.ZT_Locator_delete(loc.cl)
|
||||
loc.cl = unsafe.Pointer(nil)
|
||||
|
||||
err := json.Unmarshal(j, loc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sb := []byte(loc.String)
|
||||
sb = append(sb, 0)
|
||||
cl := C.ZT_Locator_fromString((*C.char)(unsafe.Pointer(&sb[0])))
|
||||
if cl == nil {
|
||||
return ErrInvalidParameter
|
||||
}
|
||||
loc.cl = cl
|
||||
loc.init()
|
||||
}
|
||||
|
||||
func locatorFinalizer(obj interface{}) {
|
||||
if obj != nil {
|
||||
C.ZT_Locator_delete(obj.(Locator).cl)
|
||||
}
|
||||
}
|
||||
|
||||
func (loc *Locator) init() error {
|
||||
loc.Timestamp = int64(C.ZT_Locator_timestamp(loc.cl))
|
||||
cfp := C.ZT_Locator_fingerprint(loc.cl)
|
||||
if uintptr(unsafe.Pointer(cfp)) == 0 {
|
||||
return ErrInternal
|
||||
}
|
||||
loc.Fingerprint = newFingerprintFromCFingerprint(cfp)
|
||||
epc := int(C.ZT_Locator_endpointCount(loc.cl))
|
||||
loc.Endpoints = make([]Endpoint, epc)
|
||||
for i := 0; i < epc; i++ {
|
||||
loc.Endpoints[i].cep = *C.ZT_Locator_endpoint(loc.cl, C.uint(i))
|
||||
}
|
||||
var buf [4096]byte
|
||||
loc.String = C.GoString(C.ZT_Locator_toString(loc.cl, (*C.char)(unsafe.Pointer(&buf[0])), 4096))
|
||||
runtime.SetFinalizer(loc, locatorFinalizer)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ package zerotier
|
|||
// #cgo linux android LDFLAGS: ${SRCDIR}/../../../build/go/native/libzt_go_native.a ${SRCDIR}/../../../build/node/libzt_core.a ${SRCDIR}/../../../build/osdep/libzt_osdep.a -lstdc++ -lpthread -lm
|
||||
// #include "../../native/GoGlue.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
|
@ -195,9 +196,12 @@ func NewNode(basePath string) (n *Node, err error) {
|
|||
}
|
||||
n.localConfigPath = path.Join(basePath, "local.conf")
|
||||
|
||||
_, identitySecretNotFoundErr := os.Stat(path.Join(basePath, "identity.secret"))
|
||||
// Read local configuration, initializing with defaults if not found. We
|
||||
// check for identity.secret's existence to determine if this is a new
|
||||
// node or one that already existed. This influences some of the defaults.
|
||||
_, isTotallyNewNode := os.Stat(path.Join(basePath, "identity.secret"))
|
||||
n.localConfig = new(LocalConfig)
|
||||
err = n.localConfig.Read(n.localConfigPath, true, identitySecretNotFoundErr != nil)
|
||||
err = n.localConfig.Read(n.localConfigPath, true, isTotallyNewNode != nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -220,59 +224,45 @@ func NewNode(basePath string) (n *Node, err error) {
|
|||
n.errLog = nullLogger
|
||||
}
|
||||
|
||||
if n.localConfig.Settings.PortSearch {
|
||||
portsChanged := false
|
||||
|
||||
portCheckCount := 0
|
||||
origPort := n.localConfig.Settings.PrimaryPort
|
||||
portsChanged := false
|
||||
portCheckCount := 0
|
||||
origPort := n.localConfig.Settings.PrimaryPort
|
||||
for portCheckCount < 256 {
|
||||
portCheckCount++
|
||||
if checkPort(n.localConfig.Settings.PrimaryPort) {
|
||||
if n.localConfig.Settings.PrimaryPort != origPort {
|
||||
n.infoLog.Printf("primary port %d unavailable, found port %d and saved in local.conf", origPort, n.localConfig.Settings.PrimaryPort)
|
||||
}
|
||||
break
|
||||
}
|
||||
n.localConfig.Settings.PrimaryPort = int(4096 + (randomUInt() % 16384))
|
||||
portsChanged = true
|
||||
}
|
||||
if portCheckCount == 256 {
|
||||
return nil, errors.New("unable to bind to primary port: tried configured port and 256 other random ports")
|
||||
}
|
||||
if n.localConfig.Settings.SecondaryPort > 0 {
|
||||
portCheckCount = 0
|
||||
origPort = n.localConfig.Settings.SecondaryPort
|
||||
for portCheckCount < 256 {
|
||||
portCheckCount++
|
||||
if checkPort(n.localConfig.Settings.PrimaryPort) {
|
||||
if n.localConfig.Settings.PrimaryPort != origPort {
|
||||
n.infoLog.Printf("primary port %d unavailable, found port %d and saved in local.conf", origPort, n.localConfig.Settings.PrimaryPort)
|
||||
if checkPort(n.localConfig.Settings.SecondaryPort) {
|
||||
if n.localConfig.Settings.SecondaryPort != origPort {
|
||||
n.infoLog.Printf("secondary port %d unavailable, found port %d (port search enabled)", origPort, n.localConfig.Settings.SecondaryPort)
|
||||
}
|
||||
break
|
||||
}
|
||||
n.localConfig.Settings.PrimaryPort = int(4096 + (randomUInt() % 16384))
|
||||
n.infoLog.Printf("secondary port %d unavailable, trying a random port (port search enabled)", n.localConfig.Settings.SecondaryPort)
|
||||
if portCheckCount <= 64 {
|
||||
n.localConfig.Settings.SecondaryPort = unassignedPrivilegedPorts[randomUInt()%uint(len(unassignedPrivilegedPorts))]
|
||||
} else {
|
||||
n.localConfig.Settings.SecondaryPort = int(16384 + (randomUInt() % 16384))
|
||||
}
|
||||
portsChanged = true
|
||||
}
|
||||
if portCheckCount == 256 {
|
||||
return nil, errors.New("unable to bind to primary port: tried configured port and 256 other random ports")
|
||||
}
|
||||
|
||||
if n.localConfig.Settings.SecondaryPort > 0 {
|
||||
portCheckCount = 0
|
||||
origPort = n.localConfig.Settings.SecondaryPort
|
||||
for portCheckCount < 256 {
|
||||
portCheckCount++
|
||||
if checkPort(n.localConfig.Settings.SecondaryPort) {
|
||||
if n.localConfig.Settings.SecondaryPort != origPort {
|
||||
n.infoLog.Printf("secondary port %d unavailable, found port %d (port search enabled)", origPort, n.localConfig.Settings.SecondaryPort)
|
||||
}
|
||||
break
|
||||
}
|
||||
n.infoLog.Printf("secondary port %d unavailable, trying a random port (port search enabled)", n.localConfig.Settings.SecondaryPort)
|
||||
if portCheckCount <= 64 {
|
||||
n.localConfig.Settings.SecondaryPort = unassignedPrivilegedPorts[randomUInt()%uint(len(unassignedPrivilegedPorts))]
|
||||
} else {
|
||||
n.localConfig.Settings.SecondaryPort = int(16384 + (randomUInt() % 16384))
|
||||
}
|
||||
portsChanged = true
|
||||
}
|
||||
}
|
||||
|
||||
if portsChanged {
|
||||
_ = n.localConfig.Write(n.localConfigPath)
|
||||
}
|
||||
} else {
|
||||
if !checkPort(n.localConfig.Settings.PrimaryPort) {
|
||||
return nil, errors.New("unable to bind to primary port")
|
||||
}
|
||||
if n.localConfig.Settings.SecondaryPort > 0 && n.localConfig.Settings.SecondaryPort < 65536 {
|
||||
if !checkPort(n.localConfig.Settings.SecondaryPort) {
|
||||
n.infoLog.Printf("WARNING: unable to bind secondary port %d", n.localConfig.Settings.SecondaryPort)
|
||||
}
|
||||
}
|
||||
}
|
||||
if portsChanged {
|
||||
_ = n.localConfig.Write(n.localConfigPath)
|
||||
}
|
||||
|
||||
n.namedSocketApiServer, n.tcpApiServer, err = createAPIServer(basePath, n)
|
||||
|
@ -481,34 +471,10 @@ func (n *Node) Peers() []*Peer {
|
|||
pl := C.ZT_Node_peers(n.zn)
|
||||
if pl != nil {
|
||||
for i := uintptr(0); i < uintptr(pl.peerCount); i++ {
|
||||
p := (*C.ZT_Peer)(unsafe.Pointer(uintptr(unsafe.Pointer(pl.peers)) + (i * C.sizeof_ZT_Peer)))
|
||||
p2 := new(Peer)
|
||||
p2.Address = Address(p.address)
|
||||
p2.Identity, _ = newIdentityFromCIdentity(unsafe.Pointer(p.identity))
|
||||
p2.Fingerprint.Address = p2.Address
|
||||
copy(p2.Fingerprint.Hash[:], ((*[48]byte)(unsafe.Pointer(&p.fingerprint.hash[0])))[:])
|
||||
p2.Version = [3]int{int(p.versionMajor), int(p.versionMinor), int(p.versionRev)}
|
||||
p2.Latency = int(p.latency)
|
||||
p2.Root = p.root != 0
|
||||
|
||||
p2.Paths = make([]Path, 0, int(p.pathCount))
|
||||
for j := 0; j < len(p2.Paths); j++ {
|
||||
pt := (*C.ZT_Path)(unsafe.Pointer(uintptr(unsafe.Pointer(p.paths)) + uintptr(j*C.sizeof_ZT_Path)))
|
||||
if pt.alive != 0 {
|
||||
ep := Endpoint{pt.endpoint}
|
||||
a := ep.InetAddress()
|
||||
if a != nil {
|
||||
p2.Paths = append(p2.Paths, Path{
|
||||
IP: a.IP,
|
||||
Port: a.Port,
|
||||
LastSend: int64(pt.lastSend),
|
||||
LastReceive: int64(pt.lastReceive),
|
||||
})
|
||||
}
|
||||
}
|
||||
p, _ := newPeerFromCPeer((*C.ZT_Peer)(unsafe.Pointer(uintptr(unsafe.Pointer(pl.peers)) + (i * C.sizeof_ZT_Peer))))
|
||||
if p != nil {
|
||||
peers = append(peers, p)
|
||||
}
|
||||
|
||||
peers = append(peers, p2)
|
||||
}
|
||||
C.ZT_freeQueryResult(unsafe.Pointer(pl))
|
||||
}
|
||||
|
@ -524,7 +490,8 @@ func (n *Node) runMaintenance() {
|
|||
n.localConfigLock.RLock()
|
||||
defer n.localConfigLock.RUnlock()
|
||||
|
||||
// Get local physical interface addresses, excluding blacklisted and ZeroTier-created interfaces
|
||||
// Get local physical interface addresses, excluding blacklisted and
|
||||
// ZeroTier-created interfaces.
|
||||
interfaceAddresses := make(map[string]net.IP)
|
||||
ifs, _ := net.Interfaces()
|
||||
if len(ifs) > 0 {
|
||||
|
@ -541,8 +508,21 @@ func (n *Node) runMaintenance() {
|
|||
addrs, _ := i.Addrs()
|
||||
for _, a := range addrs {
|
||||
ipn, _ := a.(*net.IPNet)
|
||||
if ipn != nil && len(ipn.IP) > 0 && !ipn.IP.IsLinkLocalUnicast() && !ipn.IP.IsMulticast() {
|
||||
interfaceAddresses[ipn.IP.String()] = ipn.IP
|
||||
if ipn != nil && len(ipn.IP) > 0 && ipn.IP.IsGlobalUnicast() {
|
||||
isTemporary := false
|
||||
if len(ipn.IP) == 16 {
|
||||
var ss C.struct_sockaddr_storage
|
||||
if makeSockaddrStorage(ipn.IP, 0, &ss) {
|
||||
cIfName := C.CString(i.Name)
|
||||
if C.ZT_isTemporaryV6Address(cIfName, &ss) != 0 {
|
||||
isTemporary = true
|
||||
}
|
||||
C.free(unsafe.Pointer(cIfName))
|
||||
}
|
||||
}
|
||||
if !isTemporary {
|
||||
interfaceAddresses[ipn.IP.String()] = ipn.IP
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,12 +13,18 @@
|
|||
|
||||
package zerotier
|
||||
|
||||
import "net"
|
||||
// #include "../../native/GoGlue.h"
|
||||
import "C"
|
||||
|
||||
// Path is a path to another peer on the network
|
||||
type Path struct {
|
||||
IP net.IP `json:"ip"`
|
||||
Port int `json:"port"`
|
||||
LastSend int64 `json:"lastSend"`
|
||||
LastReceive int64 `json:"lastReceive"`
|
||||
Endpoint Endpoint `json:"endpoint"`
|
||||
LastSend int64 `json:"lastSend"`
|
||||
LastReceive int64 `json:"lastReceive"`
|
||||
}
|
||||
|
||||
func (p *Path) setFromCPath(cp *C.ZT_Path) {
|
||||
p.Endpoint.setFromCEndpoint(&(cp.endpoint))
|
||||
p.LastSend = int64(cp.lastSend)
|
||||
p.LastReceive = int64(cp.lastReceive)
|
||||
}
|
||||
|
|
|
@ -13,15 +13,40 @@
|
|||
|
||||
package zerotier
|
||||
|
||||
// #include "../../native/GoGlue.h"
|
||||
import "C"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// Peer is another ZeroTier node
|
||||
type Peer struct {
|
||||
Address Address `json:"address"`
|
||||
Identity *Identity `json:"identity"`
|
||||
Fingerprint Fingerprint `json:"fingerprint"`
|
||||
Version [3]int `json:"version"`
|
||||
Latency int `json:"latency"`
|
||||
Root bool `json:"root"`
|
||||
Paths []Path `json:"paths,omitempty"`
|
||||
LocatorTimestamp int64 `json:"locatorTimestamp"`
|
||||
LocatorEndpoints []Endpoint `json:"locatorEndpoints,omitempty"`
|
||||
Address Address `json:"address"`
|
||||
Identity *Identity `json:"identity"`
|
||||
Fingerprint *Fingerprint `json:"fingerprint"`
|
||||
Version [3]int `json:"version"`
|
||||
Latency int `json:"latency"`
|
||||
Root bool `json:"root"`
|
||||
Paths []Path `json:"paths,omitempty"`
|
||||
Locator *Locator `json:"locator,omitempty"`
|
||||
}
|
||||
|
||||
func newPeerFromCPeer(cp *C.ZT_Peer) (p *Peer, err error) {
|
||||
p = new(Peer)
|
||||
p.Address = Address(cp.address)
|
||||
p.Identity, err = newIdentityFromCIdentity(cp.identity)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
p.Fingerprint = newFingerprintFromCFingerprint(&(cp.fingerprint))
|
||||
p.Version[0] = int(cp.versionMajor)
|
||||
p.Version[1] = int(cp.versionMinor)
|
||||
p.Version[2] = int(cp.versionRev)
|
||||
p.Latency = int(cp.latency)
|
||||
p.Root = cp.root != 0
|
||||
p.Paths = make([]Path, int(cp.pathCount))
|
||||
for i := range p.Paths {
|
||||
p.Paths[i].setFromCPath(&(cp.paths[i]))
|
||||
}
|
||||
p.Locator, err = NewLocatorFromBytes(C.GoBytes(unsafe.Pointer(cp.locator), C.int(cp.locatorSize)))
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1258,19 +1258,14 @@ typedef struct
|
|||
ZT_Path *paths;
|
||||
|
||||
/**
|
||||
* Timestamp of peer's locator or 0 if none on file
|
||||
* Size of locator in bytes or 0 if none
|
||||
*/
|
||||
int64_t locatorTimestamp;
|
||||
unsigned int locatorSize;
|
||||
|
||||
/**
|
||||
* Number of endpoints in locator
|
||||
* Serialized locator or NULL if none
|
||||
*/
|
||||
unsigned int locatorEndpointCount;
|
||||
|
||||
/**
|
||||
* Endpoints in peer's locator
|
||||
*/
|
||||
ZT_Endpoint *locatorEndpoints;
|
||||
const void *locator;
|
||||
} ZT_Peer;
|
||||
|
||||
/**
|
||||
|
|
|
@ -389,7 +389,7 @@ ZT_PeerList *Node::peers() const
|
|||
(sizeof(ZT_Peer) * peers.size()) +
|
||||
((sizeof(ZT_Path) * ZT_MAX_PEER_NETWORK_PATHS) * peers.size()) +
|
||||
(sizeof(Identity) * peers.size()) +
|
||||
((sizeof(ZT_Endpoint) * ZT_LOCATOR_MAX_ENDPOINTS) * peers.size());
|
||||
(ZT_LOCATOR_MARSHAL_SIZE_MAX * peers.size());
|
||||
char *buf = (char *)malloc(bufSize);
|
||||
if (!buf)
|
||||
return nullptr;
|
||||
|
@ -402,7 +402,7 @@ ZT_PeerList *Node::peers() const
|
|||
buf += (sizeof(ZT_Path) * ZT_MAX_PEER_NETWORK_PATHS) * peers.size();
|
||||
Identity *identities = reinterpret_cast<Identity *>(buf);
|
||||
buf += sizeof(Identity) * peers.size();
|
||||
ZT_Endpoint *locatorEndpoint = reinterpret_cast<ZT_Endpoint *>(buf);
|
||||
uint8_t *locatorBuf = reinterpret_cast<uint8_t *>(buf);
|
||||
|
||||
const int64_t now = m_now;
|
||||
|
||||
|
@ -430,7 +430,7 @@ ZT_PeerList *Node::peers() const
|
|||
p->networkCount = 0;
|
||||
// TODO: enumerate network memberships
|
||||
|
||||
Vector<SharedPtr<Path> > paths;
|
||||
Vector< SharedPtr<Path> > paths;
|
||||
(*pi)->getAllPaths(paths);
|
||||
p->pathCount = (unsigned int)paths.size();
|
||||
p->paths = peerPath;
|
||||
|
@ -446,11 +446,12 @@ ZT_PeerList *Node::peers() const
|
|||
|
||||
const SharedPtr<const Locator> loc((*pi)->locator());
|
||||
if (loc) {
|
||||
p->locatorTimestamp = loc->timestamp();
|
||||
p->locatorEndpointCount = (unsigned int)loc->endpoints().size();
|
||||
p->locatorEndpoints = locatorEndpoint;
|
||||
for (Vector<Endpoint>::const_iterator ep(loc->endpoints().begin());ep != loc->endpoints().end();++ep)
|
||||
*(locatorEndpoint++) = *ep;
|
||||
const int ls = loc->marshal(locatorBuf);
|
||||
if (ls > 0) {
|
||||
p->locatorSize = (unsigned int)ls;
|
||||
p->locator = locatorBuf;
|
||||
locatorBuf += ls;
|
||||
}
|
||||
}
|
||||
|
||||
++pl->peerCount;
|
||||
|
|
|
@ -128,6 +128,22 @@ void Peer::received(
|
|||
// Re-prioritize paths to include the new one.
|
||||
m_prioritizePaths(now);
|
||||
|
||||
// Add or update entry in the endpoint cache. If this endpoint
|
||||
// is already present, its timesSeen count is incremented. Otherwise
|
||||
// it replaces the lowest ranked entry.
|
||||
std::sort(m_endpointCache, m_endpointCache + ZT_PEER_ENDPOINT_CACHE_SIZE);
|
||||
Endpoint thisEndpoint(path->address());
|
||||
for (unsigned int i = 0;;++i) {
|
||||
if (i == (ZT_PEER_ENDPOINT_CACHE_SIZE - 1)) {
|
||||
m_endpointCache[i].target = thisEndpoint;
|
||||
m_endpointCache[i].lastSeen = now;
|
||||
break;
|
||||
} else if (m_endpointCache[i].target == thisEndpoint) {
|
||||
m_endpointCache[i].lastSeen = now;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
RR->t->learnedNewPath(tPtr, 0x582fabdd, packetId, m_id, path->address(), old);
|
||||
} else {
|
||||
path->sent(now, hello(tPtr, path->localSocket(), path->address(), now));
|
||||
|
@ -255,8 +271,8 @@ void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
|
|||
}
|
||||
}
|
||||
|
||||
for(unsigned int i=0;i<ZT_PEER_ENDPOINT_CACHE_SIZE;++i) {
|
||||
if ((m_endpointCache[i].firstSeen > 0) && (m_endpointCache[i].target.type == ZT_ENDPOINT_TYPE_IP_UDP)) {
|
||||
for (unsigned int i = 0;i < ZT_PEER_ENDPOINT_CACHE_SIZE;++i) {
|
||||
if ((m_endpointCache[i].lastSeen > 0) && (m_endpointCache[i].target.type == ZT_ENDPOINT_TYPE_IP_UDP)) {
|
||||
if (RR->node->shouldUsePathForZeroTierTraffic(tPtr, m_id, -1, m_endpointCache[i].target.ip())) {
|
||||
int64_t < = m_lastTried[m_endpointCache[i].target];
|
||||
if ((now - lt) > ZT_PATH_MIN_TRY_INTERVAL) {
|
||||
|
@ -352,7 +368,7 @@ void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
|
|||
// Discard front item unless the code skips to requeue_item.
|
||||
discard_queue_item:
|
||||
m_tryQueue.pop_front();
|
||||
if (attempts >= std::min((unsigned int)m_tryQueue.size(),(unsigned int)ZT_NAT_T_PORT_SCAN_MAX))
|
||||
if (attempts >= std::min((unsigned int)m_tryQueue.size(), (unsigned int)ZT_NAT_T_PORT_SCAN_MAX))
|
||||
break;
|
||||
else continue;
|
||||
|
||||
|
@ -360,7 +376,7 @@ void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
|
|||
requeue_item:
|
||||
if (m_tryQueue.size() > 1) // no point in doing this splice if there's only one item
|
||||
m_tryQueue.splice(m_tryQueue.end(), m_tryQueue, m_tryQueue.begin());
|
||||
if (attempts >= std::min((unsigned int)m_tryQueue.size(),(unsigned int)ZT_NAT_T_PORT_SCAN_MAX))
|
||||
if (attempts >= std::min((unsigned int)m_tryQueue.size(), (unsigned int)ZT_NAT_T_PORT_SCAN_MAX))
|
||||
break;
|
||||
else continue;
|
||||
}
|
||||
|
@ -398,7 +414,7 @@ void Peer::pulse(void *const tPtr, const int64_t now, const bool isRoot)
|
|||
}
|
||||
|
||||
// Clean m_lastTried
|
||||
for (Map<Endpoint,int64_t>::iterator i(m_lastTried.begin());i!=m_lastTried.end();) {
|
||||
for (Map<Endpoint, int64_t>::iterator i(m_lastTried.begin());i != m_lastTried.end();) {
|
||||
if ((now - i->second) > (ZT_PATH_MIN_TRY_INTERVAL * 4))
|
||||
m_lastTried.erase(i++);
|
||||
else ++i;
|
||||
|
@ -506,7 +522,7 @@ int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
|
|||
if (!m_identityKey)
|
||||
return -1;
|
||||
|
||||
data[0] = 0; // serialized peer version
|
||||
data[0] = 16; // serialized peer version
|
||||
|
||||
// Include our identity's address to detect if this changes and require
|
||||
// recomputation of m_identityKey.
|
||||
|
@ -515,11 +531,11 @@ int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
|
|||
// SECURITY: encryption in place is only to protect secrets if they are
|
||||
// cached to local storage. It's not used over the wire. Dumb ECB is fine
|
||||
// because secret keys are random and have no structure to reveal.
|
||||
RR->localCacheSymmetric.encrypt(m_identityKey->secret, data + 6);
|
||||
RR->localCacheSymmetric.encrypt(m_identityKey->secret + 22, data + 17);
|
||||
RR->localCacheSymmetric.encrypt(m_identityKey->secret + 38, data + 33);
|
||||
RR->localCacheSymmetric.encrypt(m_identityKey->secret, data + 1 + ZT_ADDRESS_LENGTH);
|
||||
RR->localCacheSymmetric.encrypt(m_identityKey->secret + 16, data + 1 + ZT_ADDRESS_LENGTH + 16);
|
||||
RR->localCacheSymmetric.encrypt(m_identityKey->secret + 32, data + 1 + ZT_ADDRESS_LENGTH + 32);
|
||||
|
||||
int p = 54;
|
||||
int p = 1 + ZT_ADDRESS_LENGTH + 48;
|
||||
|
||||
int s = m_id.marshal(data + p, false);
|
||||
if (s < 0)
|
||||
|
@ -536,6 +552,21 @@ int Peer::marshal(uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept
|
|||
data[p++] = 0;
|
||||
}
|
||||
|
||||
unsigned int cachedEndpointCount = 0;
|
||||
for (unsigned int i = 0;i < ZT_PEER_ENDPOINT_CACHE_SIZE;++i) {
|
||||
if (m_endpointCache[i].lastSeen > 0)
|
||||
++cachedEndpointCount;
|
||||
}
|
||||
Utils::storeBigEndian(data + p, (uint16_t)cachedEndpointCount);
|
||||
p += 2;
|
||||
for (unsigned int i = 0;i < ZT_PEER_ENDPOINT_CACHE_SIZE;++i) {
|
||||
Utils::storeBigEndian(data + p, (uint64_t)m_endpointCache[i].lastSeen);
|
||||
s = m_endpointCache[i].target.marshal(data + p);
|
||||
if (s <= 0)
|
||||
return -1;
|
||||
p += s;
|
||||
}
|
||||
|
||||
Utils::storeBigEndian(data + p, (uint16_t)m_vProto);
|
||||
p += 2;
|
||||
Utils::storeBigEndian(data + p, (uint16_t)m_vMajor);
|
||||
|
@ -555,7 +586,7 @@ int Peer::unmarshal(const uint8_t *restrict data, const int len) noexcept
|
|||
{
|
||||
RWMutex::Lock l(m_lock);
|
||||
|
||||
if ((len <= 54) || (data[0] != 0))
|
||||
if ((len <= (1 + ZT_ADDRESS_LENGTH + 48)) || (data[0] != 16))
|
||||
return -1;
|
||||
|
||||
m_identityKey.zero();
|
||||
|
@ -565,16 +596,16 @@ int Peer::unmarshal(const uint8_t *restrict data, const int len) noexcept
|
|||
if (Address(data + 1) == RR->identity.address()) {
|
||||
uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
|
||||
static_assert(ZT_SYMMETRIC_KEY_SIZE == 48, "marshal() and unmarshal() must be revisited if ZT_SYMMETRIC_KEY_SIZE is changed");
|
||||
RR->localCacheSymmetric.decrypt(data + 1, k);
|
||||
RR->localCacheSymmetric.decrypt(data + 17, k + 16);
|
||||
RR->localCacheSymmetric.decrypt(data + 33, k + 32);
|
||||
RR->localCacheSymmetric.decrypt(data + 1 + ZT_ADDRESS_LENGTH, k);
|
||||
RR->localCacheSymmetric.decrypt(data + 1 + ZT_ADDRESS_LENGTH + 16, k + 16);
|
||||
RR->localCacheSymmetric.decrypt(data + 1 + ZT_ADDRESS_LENGTH + 32, k + 32);
|
||||
m_identityKey.set(new SymmetricKey(RR->node->now(), k));
|
||||
Utils::burn(k, sizeof(k));
|
||||
}
|
||||
|
||||
int p = 49;
|
||||
int p = 1 + ZT_ADDRESS_LENGTH + 48;
|
||||
|
||||
int s = m_id.unmarshal(data + 38, len - 38);
|
||||
int s = m_id.unmarshal(data + p, len - p);
|
||||
if (s < 0)
|
||||
return s;
|
||||
p += s;
|
||||
|
@ -587,6 +618,8 @@ int Peer::unmarshal(const uint8_t *restrict data, const int len) noexcept
|
|||
Utils::burn(k, sizeof(k));
|
||||
}
|
||||
|
||||
if (p >= len)
|
||||
return -1;
|
||||
if (data[p] == 0) {
|
||||
++p;
|
||||
m_locator.zero();
|
||||
|
@ -602,6 +635,21 @@ int Peer::unmarshal(const uint8_t *restrict data, const int len) noexcept
|
|||
return -1;
|
||||
}
|
||||
|
||||
const unsigned int cachedEndpointCount = Utils::loadBigEndian<uint16_t>(data + p);
|
||||
p += 2;
|
||||
for (unsigned int i = 0;i < cachedEndpointCount;++i) {
|
||||
if (i < ZT_PEER_ENDPOINT_CACHE_SIZE) {
|
||||
if ((p + 8) >= len)
|
||||
return -1;
|
||||
m_endpointCache[i].lastSeen = (int64_t)Utils::loadBigEndian<uint64_t>(data + p);
|
||||
p += 8;
|
||||
s = m_endpointCache[i].target.unmarshal(data + p, len - p);
|
||||
if (s <= 0)
|
||||
return -1;
|
||||
p += s;
|
||||
}
|
||||
}
|
||||
|
||||
if ((p + 10) > len)
|
||||
return -1;
|
||||
m_vProto = Utils::loadBigEndian<uint16_t>(data + p);
|
||||
|
|
|
@ -32,7 +32,15 @@
|
|||
#include "SymmetricKey.hpp"
|
||||
#include "Containers.hpp"
|
||||
|
||||
#define ZT_PEER_MARSHAL_SIZE_MAX (1 + ZT_ADDRESS_LENGTH + ZT_SYMMETRIC_KEY_SIZE + ZT_IDENTITY_MARSHAL_SIZE_MAX + 1 + ZT_LOCATOR_MARSHAL_SIZE_MAX + 1 + (ZT_MAX_PEER_NETWORK_PATHS * ZT_ENDPOINT_MARSHAL_SIZE_MAX) + (2*4) + 2)
|
||||
#define ZT_PEER_MARSHAL_SIZE_MAX ( \
|
||||
1 + \
|
||||
ZT_ADDRESS_LENGTH + \
|
||||
ZT_SYMMETRIC_KEY_SIZE + \
|
||||
ZT_IDENTITY_MARSHAL_SIZE_MAX + \
|
||||
1 + ZT_LOCATOR_MARSHAL_SIZE_MAX + \
|
||||
2 + ((8 + ZT_ENDPOINT_MARSHAL_SIZE_MAX) * ZT_PEER_ENDPOINT_CACHE_SIZE) + \
|
||||
(2 * 4) + \
|
||||
2 )
|
||||
|
||||
#define ZT_PEER_DEDUP_BUFFER_SIZE 1024
|
||||
#define ZT_PEER_DEDUP_BUFFER_MASK 1023U
|
||||
|
@ -514,13 +522,12 @@ private:
|
|||
struct p_EndpointCacheItem
|
||||
{
|
||||
Endpoint target;
|
||||
uint64_t timesSeen;
|
||||
int64_t firstSeen;
|
||||
int64_t lastSeen;
|
||||
|
||||
ZT_INLINE bool operator<(const p_EndpointCacheItem &ci) const noexcept
|
||||
{ return (ci.timesSeen < timesSeen) || ((ci.timesSeen == timesSeen) && (ci.firstSeen < firstSeen)); }
|
||||
{ return lastSeen < ci.lastSeen; }
|
||||
|
||||
ZT_INLINE p_EndpointCacheItem() noexcept : target(), timesSeen(0), firstSeen(0)
|
||||
ZT_INLINE p_EndpointCacheItem() noexcept : target(), lastSeen(0)
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue