mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-06-08 05:23:44 +02:00
A bunch of simplification in SymmetricKey and the ephemeral re-keying logic. Not on yet, but the structure is all there.
This commit is contained in:
parent
de6fadc12d
commit
8d9067e982
39 changed files with 771 additions and 882 deletions
6
attic/root/thirdparty/cpp-httplib/README.md
vendored
6
attic/root/thirdparty/cpp-httplib/README.md
vendored
|
@ -154,20 +154,20 @@ auto res = cli.Post("/post", params);
|
|||
### PUT
|
||||
|
||||
```c++
|
||||
res = cli.Put("/resource/foo", "text", "text/plain");
|
||||
res = cli.Put("/resource/s_arbitraryByte", "text", "text/plain");
|
||||
```
|
||||
|
||||
### DELETE
|
||||
|
||||
```c++
|
||||
res = cli.Delete("/resource/foo");
|
||||
res = cli.Delete("/resource/s_arbitraryByte");
|
||||
```
|
||||
|
||||
### OPTIONS
|
||||
|
||||
```c++
|
||||
res = cli.Options("*");
|
||||
res = cli.Options("/resource/foo");
|
||||
res = cli.Options("/resource/s_arbitraryByte");
|
||||
```
|
||||
|
||||
### Connection Timeout
|
||||
|
|
22
attic/root/thirdparty/json/README.md
vendored
22
attic/root/thirdparty/json/README.md
vendored
|
@ -338,7 +338,7 @@ We designed the JSON class to behave just like an STL container. In fact, it sat
|
|||
```cpp
|
||||
// create an array using push_back
|
||||
json j;
|
||||
j.push_back("foo");
|
||||
j.push_back("s_arbitraryByte");
|
||||
j.push_back(1);
|
||||
j.push_back(true);
|
||||
|
||||
|
@ -358,10 +358,10 @@ for (auto& element : j) {
|
|||
// getter/setter
|
||||
const std::string tmp = j[0];
|
||||
j[1] = 42;
|
||||
bool foo = j.at(2);
|
||||
bool s_arbitraryByte = j.at(2);
|
||||
|
||||
// comparison
|
||||
j == "[\"foo\", 1, true]"_json; // true
|
||||
j == "[\"s_arbitraryByte\", 1, true]"_json; // true
|
||||
|
||||
// other stuff
|
||||
j.size(); // 3 entries
|
||||
|
@ -379,7 +379,7 @@ j.is_string();
|
|||
|
||||
// create an object
|
||||
json o;
|
||||
o["foo"] = 23;
|
||||
o["s_arbitraryByte"] = 23;
|
||||
o["bar"] = false;
|
||||
o["baz"] = 3.141;
|
||||
|
||||
|
@ -392,16 +392,16 @@ for (json::iterator it = o.begin(); it != o.end(); ++it) {
|
|||
}
|
||||
|
||||
// find an entry
|
||||
if (o.find("foo") != o.end()) {
|
||||
// there is an entry with key "foo"
|
||||
if (o.find("s_arbitraryByte") != o.end()) {
|
||||
// there is an entry with key "s_arbitraryByte"
|
||||
}
|
||||
|
||||
// or simpler using count()
|
||||
int foo_present = o.count("foo"); // 1
|
||||
int foo_present = o.count("s_arbitraryByte"); // 1
|
||||
int fob_present = o.count("fob"); // 0
|
||||
|
||||
// delete an entry
|
||||
o.erase("foo");
|
||||
o.erase("s_arbitraryByte");
|
||||
```
|
||||
|
||||
|
||||
|
@ -475,7 +475,7 @@ The library supports **JSON Pointer** ([RFC 6901](https://tools.ietf.org/html/rf
|
|||
// a JSON value
|
||||
json j_original = R"({
|
||||
"baz": ["one", "two", "three"],
|
||||
"foo": "bar"
|
||||
"s_arbitraryByte": "bar"
|
||||
})"_json;
|
||||
|
||||
// access members with a JSON pointer (RFC 6901)
|
||||
|
@ -486,7 +486,7 @@ j_original["/baz/1"_json_pointer];
|
|||
json j_patch = R"([
|
||||
{ "op": "replace", "path": "/baz", "value": "boo" },
|
||||
{ "op": "add", "path": "/hello", "value": ["world"] },
|
||||
{ "op": "remove", "path": "/foo"}
|
||||
{ "op": "remove", "path": "/s_arbitraryByte"}
|
||||
])"_json;
|
||||
|
||||
// apply the patch
|
||||
|
@ -501,7 +501,7 @@ json::diff(j_result, j_original);
|
|||
// [
|
||||
// { "op":" replace", "path": "/baz", "value": ["one", "two", "three"] },
|
||||
// { "op": "remove","path": "/hello" },
|
||||
// { "op": "add", "path": "/foo", "value": "bar" }
|
||||
// { "op": "add", "path": "/s_arbitraryByte", "value": "bar" }
|
||||
// ]
|
||||
```
|
||||
|
||||
|
|
6
controller/thirdparty/cpp-httplib/README.md
vendored
6
controller/thirdparty/cpp-httplib/README.md
vendored
|
@ -154,20 +154,20 @@ auto res = cli.Post("/post", params);
|
|||
### PUT
|
||||
|
||||
```c++
|
||||
res = cli.Put("/resource/foo", "text", "text/plain");
|
||||
res = cli.Put("/resource/s_arbitraryByte", "text", "text/plain");
|
||||
```
|
||||
|
||||
### DELETE
|
||||
|
||||
```c++
|
||||
res = cli.Delete("/resource/foo");
|
||||
res = cli.Delete("/resource/s_arbitraryByte");
|
||||
```
|
||||
|
||||
### OPTIONS
|
||||
|
||||
```c++
|
||||
res = cli.Options("*");
|
||||
res = cli.Options("/resource/foo");
|
||||
res = cli.Options("/resource/s_arbitraryByte");
|
||||
```
|
||||
|
||||
### Connection Timeout
|
||||
|
|
22
controller/thirdparty/json/README.md
vendored
22
controller/thirdparty/json/README.md
vendored
|
@ -338,7 +338,7 @@ We designed the JSON class to behave just like an STL container. In fact, it sat
|
|||
```cpp
|
||||
// create an array using push_back
|
||||
json j;
|
||||
j.push_back("foo");
|
||||
j.push_back("s_arbitraryByte");
|
||||
j.push_back(1);
|
||||
j.push_back(true);
|
||||
|
||||
|
@ -358,10 +358,10 @@ for (auto& element : j) {
|
|||
// getter/setter
|
||||
const std::string tmp = j[0];
|
||||
j[1] = 42;
|
||||
bool foo = j.at(2);
|
||||
bool s_arbitraryByte = j.at(2);
|
||||
|
||||
// comparison
|
||||
j == "[\"foo\", 1, true]"_json; // true
|
||||
j == "[\"s_arbitraryByte\", 1, true]"_json; // true
|
||||
|
||||
// other stuff
|
||||
j.size(); // 3 entries
|
||||
|
@ -379,7 +379,7 @@ j.is_string();
|
|||
|
||||
// create an object
|
||||
json o;
|
||||
o["foo"] = 23;
|
||||
o["s_arbitraryByte"] = 23;
|
||||
o["bar"] = false;
|
||||
o["baz"] = 3.141;
|
||||
|
||||
|
@ -392,16 +392,16 @@ for (json::iterator it = o.begin(); it != o.end(); ++it) {
|
|||
}
|
||||
|
||||
// find an entry
|
||||
if (o.find("foo") != o.end()) {
|
||||
// there is an entry with key "foo"
|
||||
if (o.find("s_arbitraryByte") != o.end()) {
|
||||
// there is an entry with key "s_arbitraryByte"
|
||||
}
|
||||
|
||||
// or simpler using count()
|
||||
int foo_present = o.count("foo"); // 1
|
||||
int foo_present = o.count("s_arbitraryByte"); // 1
|
||||
int fob_present = o.count("fob"); // 0
|
||||
|
||||
// delete an entry
|
||||
o.erase("foo");
|
||||
o.erase("s_arbitraryByte");
|
||||
```
|
||||
|
||||
|
||||
|
@ -475,7 +475,7 @@ The library supports **JSON Pointer** ([RFC 6901](https://tools.ietf.org/html/rf
|
|||
// a JSON value
|
||||
json j_original = R"({
|
||||
"baz": ["one", "two", "three"],
|
||||
"foo": "bar"
|
||||
"s_arbitraryByte": "bar"
|
||||
})"_json;
|
||||
|
||||
// access members with a JSON pointer (RFC 6901)
|
||||
|
@ -486,7 +486,7 @@ j_original["/baz/1"_json_pointer];
|
|||
json j_patch = R"([
|
||||
{ "op": "replace", "path": "/baz", "value": "boo" },
|
||||
{ "op": "add", "path": "/hello", "value": ["world"] },
|
||||
{ "op": "remove", "path": "/foo"}
|
||||
{ "op": "remove", "path": "/s_arbitraryByte"}
|
||||
])"_json;
|
||||
|
||||
// apply the patch
|
||||
|
@ -501,7 +501,7 @@ json::diff(j_result, j_original);
|
|||
// [
|
||||
// { "op":" replace", "path": "/baz", "value": ["one", "two", "three"] },
|
||||
// { "op": "remove","path": "/hello" },
|
||||
// { "op": "add", "path": "/foo", "value": "bar" }
|
||||
// { "op": "add", "path": "/s_arbitraryByte", "value": "bar" }
|
||||
// ]
|
||||
```
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ ZT_MAYBE_UNUSED enum ZT_ResultCode ZT_Node_processWirePacket(
|
|||
const void *packetData,
|
||||
unsigned int packetLength,
|
||||
int isZtBuffer,
|
||||
volatile int64_t *nextBackgroundTaskDeadline)
|
||||
volatile int64_t *)
|
||||
{
|
||||
try {
|
||||
ZeroTier::CallContext cc(clock, ticks, tptr);
|
||||
|
@ -149,7 +149,7 @@ ZT_MAYBE_UNUSED enum ZT_ResultCode ZT_Node_processVirtualNetworkFrame(
|
|||
const void *frameData,
|
||||
unsigned int frameLength,
|
||||
int isZtBuffer,
|
||||
volatile int64_t *nextBackgroundTaskDeadline)
|
||||
volatile int64_t *)
|
||||
{
|
||||
try {
|
||||
ZeroTier::CallContext cc(clock, ticks, tptr);
|
||||
|
@ -269,9 +269,9 @@ ZT_MAYBE_UNUSED const ZT_Identity *ZT_Node_identity(ZT_Node *node)
|
|||
|
||||
ZT_MAYBE_UNUSED void ZT_Node_status(
|
||||
ZT_Node *node,
|
||||
int64_t clock,
|
||||
int64_t ticks,
|
||||
void *tptr,
|
||||
int64_t,
|
||||
int64_t,
|
||||
void *,
|
||||
ZT_NodeStatus *status)
|
||||
{
|
||||
try {
|
||||
|
@ -295,9 +295,9 @@ ZT_MAYBE_UNUSED ZT_PeerList *ZT_Node_peers(
|
|||
|
||||
ZT_MAYBE_UNUSED ZT_VirtualNetworkConfig *ZT_Node_networkConfig(
|
||||
ZT_Node *node,
|
||||
int64_t clock,
|
||||
int64_t ticks,
|
||||
void *tptr,
|
||||
int64_t,
|
||||
int64_t,
|
||||
void *,
|
||||
uint64_t nwid)
|
||||
{
|
||||
try {
|
||||
|
@ -328,9 +328,9 @@ ZT_MAYBE_UNUSED void ZT_Node_setNetworkUserPtr(
|
|||
|
||||
ZT_MAYBE_UNUSED void ZT_Node_setInterfaceAddresses(
|
||||
ZT_Node *node,
|
||||
int64_t clock,
|
||||
int64_t ticks,
|
||||
void *tptr,
|
||||
int64_t,
|
||||
int64_t,
|
||||
void *,
|
||||
const ZT_InterfaceAddress *addrs,
|
||||
unsigned int addrCount)
|
||||
{
|
||||
|
@ -374,9 +374,9 @@ ZT_MAYBE_UNUSED enum ZT_ResultCode ZT_Node_deleteCertificate(
|
|||
|
||||
ZT_MAYBE_UNUSED ZT_CertificateList *ZT_Node_listCertificates(
|
||||
ZT_Node *node,
|
||||
int64_t clock,
|
||||
int64_t ticks,
|
||||
void *tptr)
|
||||
int64_t,
|
||||
int64_t,
|
||||
void *)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->listCertificates();
|
||||
|
@ -417,7 +417,7 @@ ZT_MAYBE_UNUSED void ZT_Node_setController(
|
|||
ZT_MAYBE_UNUSED ZT_Locator *ZT_Locator_create(
|
||||
int64_t rev,
|
||||
const ZT_Endpoint *endpoints,
|
||||
const ZT_EndpointAttributes *endpointAttributes,
|
||||
const ZT_EndpointAttributes *,
|
||||
unsigned int endpointCount,
|
||||
const ZT_Identity *signer)
|
||||
{
|
||||
|
|
|
@ -24,7 +24,6 @@ set(core_headers
|
|||
Defragmenter.hpp
|
||||
Dictionary.hpp
|
||||
ECC384.hpp
|
||||
EphemeralKey.hpp
|
||||
Expect.hpp
|
||||
FCV.hpp
|
||||
Fingerprint.hpp
|
||||
|
|
|
@ -18,6 +18,24 @@
|
|||
|
||||
namespace ZeroTier {
|
||||
|
||||
CapabilityCredential::CapabilityCredential(
|
||||
const uint32_t id,
|
||||
const uint64_t nwid,
|
||||
const int64_t timestamp,
|
||||
const ZT_VirtualNetworkRule *const rules,
|
||||
const unsigned int ruleCount) noexcept:
|
||||
m_nwid(nwid),
|
||||
m_timestamp(timestamp),
|
||||
m_id(id),
|
||||
m_ruleCount((ruleCount < ZT_MAX_CAPABILITY_RULES) ? ruleCount : ZT_MAX_CAPABILITY_RULES),
|
||||
m_signatureLength(0)
|
||||
{
|
||||
Utils::zero< sizeof(m_rules) >(m_rules);
|
||||
if (m_ruleCount > 0)
|
||||
Utils::copy(m_rules, rules, sizeof(ZT_VirtualNetworkRule) * m_ruleCount);
|
||||
Utils::zero< sizeof(m_signature) >(m_signature);
|
||||
}
|
||||
|
||||
bool CapabilityCredential::sign(const Identity &from, const Address &to) noexcept
|
||||
{
|
||||
uint8_t buf[ZT_CAPABILITY_MARSHAL_SIZE_MAX + 16];
|
||||
|
|
|
@ -65,16 +65,12 @@ public:
|
|||
* @param rules Network flow rules for this capability
|
||||
* @param ruleCount Number of flow rules
|
||||
*/
|
||||
ZT_INLINE CapabilityCredential(const uint32_t id, const uint64_t nwid, const int64_t timestamp, const ZT_VirtualNetworkRule *const rules, const unsigned int ruleCount) noexcept: // NOLINT(cppcoreguidelines-pro-type-member-init,hicpp-member-init)
|
||||
m_nwid(nwid),
|
||||
m_timestamp(timestamp),
|
||||
m_id(id),
|
||||
m_ruleCount((ruleCount < ZT_MAX_CAPABILITY_RULES) ? ruleCount : ZT_MAX_CAPABILITY_RULES),
|
||||
m_signatureLength(0)
|
||||
{
|
||||
if (m_ruleCount > 0)
|
||||
Utils::copy(m_rules, rules, sizeof(ZT_VirtualNetworkRule) * m_ruleCount);
|
||||
}
|
||||
CapabilityCredential(
|
||||
const uint32_t id,
|
||||
const uint64_t nwid,
|
||||
const int64_t timestamp,
|
||||
const ZT_VirtualNetworkRule *const rules,
|
||||
const unsigned int ruleCount) noexcept;
|
||||
|
||||
/**
|
||||
* @return Rules -- see ruleCount() for size of array
|
||||
|
@ -139,7 +135,6 @@ public:
|
|||
{ return ZT_CAPABILITY_MARSHAL_SIZE_MAX; }
|
||||
|
||||
int marshal(uint8_t data[ZT_CAPABILITY_MARSHAL_SIZE_MAX], bool forSign = false) const noexcept;
|
||||
|
||||
int unmarshal(const uint8_t *data, int len) noexcept;
|
||||
|
||||
/**
|
||||
|
|
|
@ -144,7 +144,7 @@
|
|||
/**
|
||||
* Delay between calls to the pulse() method in Peer for each peer
|
||||
*/
|
||||
#define ZT_PEER_PULSE_INTERVAL 8000
|
||||
#define ZT_PEER_PULSE_INTERVAL 10000
|
||||
|
||||
/**
|
||||
* Interval between HELLOs to peers.
|
||||
|
@ -165,16 +165,6 @@
|
|||
*/
|
||||
#define ZT_PEER_GLOBAL_TIMEOUT 2592000000LL
|
||||
|
||||
/**
|
||||
* Interval between sort/prioritize of paths for a peer
|
||||
*/
|
||||
#define ZT_PEER_PRIORITIZE_PATHS_INTERVAL 5000
|
||||
|
||||
/**
|
||||
* Number of previous endpoints to cache in peer records.
|
||||
*/
|
||||
#define ZT_PEER_ENDPOINT_CACHE_SIZE 8
|
||||
|
||||
/**
|
||||
* Delay between requests for updated network autoconf information
|
||||
*
|
||||
|
|
|
@ -18,10 +18,6 @@
|
|||
#include "TriviallyCopyable.hpp"
|
||||
#include "CallContext.hpp"
|
||||
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
class CapabilityCredential;
|
||||
|
|
|
@ -1,137 +0,0 @@
|
|||
/*
|
||||
* Copyright (c)2013-2021 ZeroTier, Inc.
|
||||
*
|
||||
* Use of this software is governed by the Business Source License included
|
||||
* in the LICENSE.TXT file in the project's root directory.
|
||||
*
|
||||
* Change Date: 2026-01-01
|
||||
*
|
||||
* On the date above, in accordance with the Business Source License, use
|
||||
* of this software will be governed by version 2.0 of the Apache License.
|
||||
*/
|
||||
/****/
|
||||
|
||||
#ifndef ZT_EPHEMERALKEY_HPP
|
||||
#define ZT_EPHEMERALKEY_HPP
|
||||
|
||||
#include "Constants.hpp"
|
||||
#include "C25519.hpp"
|
||||
#include "ECC384.hpp"
|
||||
#include "SHA512.hpp"
|
||||
#include "Utils.hpp"
|
||||
|
||||
#define ZT_EPHEMERALKEY_PUBLIC_SIZE (1 + ZT_C25519_ECDH_PUBLIC_KEY_SIZE + ZT_ECC384_PUBLIC_KEY_SIZE)
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
/**
|
||||
* Container for ephemeral key pair sets used in forward secrecy negotiation.
|
||||
*
|
||||
* The ephemeral public key consists of public key(s) prefixed by a type byte.
|
||||
* In the current version there are two keys: a Curve25519 ECDH public key and
|
||||
* a NIST P-384 public key. Both are sent, and key agreement is performed by
|
||||
* agreeing with both and then hashing the results together with the long-lived
|
||||
* identity shared secret to produce a shared symmetric ephemeral key.
|
||||
*
|
||||
* Unlike identities the private key never leaves this class. It dies when
|
||||
* a new key pair is generated or when the node is shut down.
|
||||
*
|
||||
* Each peer holds a copy of its current ephemeral key. This is re-generated
|
||||
* after one half ZT_SYMMETRIC_KEY_TTL or after the the symmetric key has
|
||||
* been used one half of ZT_SYMMETRIC_KEY_TTL_MESSAGES times. Half the total
|
||||
* TTL is chosen to provide plenty of margin.
|
||||
*/
|
||||
class EphemeralKey
|
||||
{
|
||||
public:
|
||||
enum Type
|
||||
{
|
||||
TYPE_NIL = 0,
|
||||
TYPE_C25519_P384 = 1
|
||||
};
|
||||
|
||||
/**
|
||||
* The ephemeral public key(s)
|
||||
*
|
||||
* This is sent with HELLO or OK(HELLO) and is re-written when
|
||||
* generate() is called. Its size is static.
|
||||
*/
|
||||
const uint8_t pub[ZT_EPHEMERALKEY_PUBLIC_SIZE];
|
||||
|
||||
/**
|
||||
* Create an uninitialized ephemeral key (must call generate())
|
||||
*/
|
||||
ZT_INLINE EphemeralKey() noexcept:
|
||||
pub()
|
||||
{
|
||||
const_cast<uint8_t *>(pub)[0] = (uint8_t) TYPE_NIL;
|
||||
Utils::memoryLock(this, sizeof(EphemeralKey));
|
||||
}
|
||||
|
||||
ZT_INLINE ~EphemeralKey() noexcept
|
||||
{
|
||||
Utils::burn(m_priv, sizeof(m_priv));
|
||||
Utils::memoryUnlock(this, sizeof(EphemeralKey));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return True if this ephemeral key has been initialized with generate()
|
||||
*/
|
||||
ZT_INLINE operator bool() const noexcept
|
||||
{ return pub[0] != (uint8_t) TYPE_NIL; }
|
||||
|
||||
/**
|
||||
* Generate or re-generate key pair.
|
||||
*/
|
||||
ZT_INLINE void generate() noexcept
|
||||
{
|
||||
uint8_t *const p = const_cast<uint8_t *>(pub);
|
||||
p[0] = (uint8_t) TYPE_C25519_P384;
|
||||
C25519::generateC25519(p + 1, m_priv);
|
||||
ECC384GenerateKey(p + 1 + ZT_C25519_ECDH_PUBLIC_KEY_SIZE, m_priv + ZT_C25519_ECDH_PRIVATE_KEY_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute key agreement with another ephemeral public key set.
|
||||
*
|
||||
* Final key is produced by hashing the two ECDH keys followed by
|
||||
* the identity secret key with SHA384.
|
||||
*
|
||||
* @param identityKey Raw identity key shared between this node and peer
|
||||
* @param otherPub Other public key (prefixed by type)
|
||||
* @param key Key buffer to fill with symmetric key
|
||||
* @return True on success
|
||||
*/
|
||||
ZT_INLINE bool agree(const uint8_t identityKey[ZT_SYMMETRIC_KEY_SIZE], const uint8_t *otherPub, const unsigned int otherPubLength, uint8_t key[ZT_SYMMETRIC_KEY_SIZE]) const noexcept
|
||||
{
|
||||
if ((otherPubLength < ZT_EPHEMERALKEY_PUBLIC_SIZE) || (otherPub[0] != (uint8_t) TYPE_C25519_P384))
|
||||
return false;
|
||||
uint8_t tmp[ZT_C25519_ECDH_SHARED_SECRET_SIZE + ZT_ECC384_SHARED_SECRET_SIZE];
|
||||
C25519::agree(m_priv, otherPub + 1, tmp);
|
||||
if (!ECC384ECDH(otherPub + 1 + ZT_C25519_ECDH_PUBLIC_KEY_SIZE, m_priv + ZT_C25519_ECDH_PRIVATE_KEY_SIZE, tmp + ZT_C25519_ECDH_SHARED_SECRET_SIZE))
|
||||
return false;
|
||||
SHA384(key, tmp, ZT_C25519_ECDH_SHARED_SECRET_SIZE + ZT_ECC384_SHARED_SECRET_SIZE, identityKey, ZT_SYMMETRIC_KEY_SIZE);
|
||||
Utils::burn(tmp, ZT_C25519_ECDH_SHARED_SECRET_SIZE + ZT_ECC384_SHARED_SECRET_SIZE);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check and see if an acknowledgement hash returned via OK(HELLO) matches our public key.
|
||||
*
|
||||
* @param ackHash Hash provided in OK(HELLO)
|
||||
* @return True if this matches the hash of this ephemeral key
|
||||
*/
|
||||
ZT_INLINE bool acknowledged(const uint8_t ackHash[ZT_SHA384_DIGEST_SIZE]) const noexcept
|
||||
{
|
||||
uint8_t h[ZT_SHA384_DIGEST_SIZE];
|
||||
SHA384(h, pub, ZT_EPHEMERALKEY_PUBLIC_SIZE);
|
||||
return Utils::secureEq(ackHash, h, ZT_SHA384_DIGEST_SIZE);
|
||||
}
|
||||
|
||||
private:
|
||||
uint8_t m_priv[ZT_C25519_ECDH_PRIVATE_KEY_SIZE + ZT_ECC384_PRIVATE_KEY_SIZE];
|
||||
};
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
||||
#endif
|
|
@ -29,10 +29,10 @@ namespace {
|
|||
// This is the memory-intensive hash function used to compute v0 identities from v0 public keys.
|
||||
#define ZT_V0_IDENTITY_GEN_MEMORY 2097152
|
||||
|
||||
void identityV0ProofOfWorkFrankenhash(const void *const restrict publicKey, unsigned int publicKeyBytes, void *const restrict digest, void *const restrict genmem) noexcept
|
||||
void identityV0ProofOfWorkFrankenhash(const void *const restrict c25519CombinedPublicKey, void *const restrict digest, void *const restrict genmem) noexcept
|
||||
{
|
||||
// Digest publicKey[] to obtain initial digest
|
||||
SHA512(digest, publicKey, publicKeyBytes);
|
||||
SHA512(digest, c25519CombinedPublicKey, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE);
|
||||
|
||||
// Initialize genmem[] using Salsa20 in a CBC-like configuration since
|
||||
// ordinary Salsa20 is randomly seek-able. This is good for a cipher
|
||||
|
@ -55,8 +55,8 @@ void identityV0ProofOfWorkFrankenhash(const void *const restrict publicKey, unsi
|
|||
|
||||
// Render final digest using genmem as a lookup table
|
||||
for (unsigned long i = 0; i < (ZT_V0_IDENTITY_GEN_MEMORY / sizeof(uint64_t));) {
|
||||
unsigned long idx1 = (unsigned long)(Utils::ntoh(((uint64_t *)genmem)[i++]) % (64 / sizeof(uint64_t))); // NOLINT(hicpp-use-auto,modernize-use-auto)
|
||||
unsigned long idx2 = (unsigned long)(Utils::ntoh(((uint64_t *)genmem)[i++]) % (ZT_V0_IDENTITY_GEN_MEMORY / sizeof(uint64_t))); // NOLINT(hicpp-use-auto,modernize-use-auto)
|
||||
unsigned long idx1 = (unsigned long)(Utils::ntoh(((uint64_t *)genmem)[i++]) % (64 / sizeof(uint64_t)));
|
||||
unsigned long idx2 = (unsigned long)(Utils::ntoh(((uint64_t *)genmem)[i++]) % (ZT_V0_IDENTITY_GEN_MEMORY / sizeof(uint64_t)));
|
||||
uint64_t tmp = ((uint64_t *)genmem)[idx2];
|
||||
((uint64_t *)genmem)[idx2] = ((uint64_t *)digest)[idx1];
|
||||
((uint64_t *)digest)[idx1] = tmp;
|
||||
|
@ -71,12 +71,12 @@ struct identityV0ProofOfWorkCriteria
|
|||
|
||||
ZT_INLINE bool operator()(const uint8_t pub[ZT_C25519_COMBINED_PUBLIC_KEY_SIZE]) const noexcept
|
||||
{
|
||||
identityV0ProofOfWorkFrankenhash(pub, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE, digest, genmem);
|
||||
identityV0ProofOfWorkFrankenhash(pub, digest, genmem);
|
||||
return (digest[0] < 17);
|
||||
}
|
||||
|
||||
unsigned char *digest;
|
||||
char *genmem;
|
||||
unsigned char *restrict digest;
|
||||
char *restrict genmem;
|
||||
};
|
||||
|
||||
void v1ChallengeFromPub(const uint8_t pub[ZT_IDENTITY_P384_COMPOUND_PUBLIC_KEY_SIZE], uint64_t challenge[4])
|
||||
|
@ -168,7 +168,7 @@ bool Identity::locallyValidate() const noexcept
|
|||
char *const genmem = (char *)malloc(ZT_V0_IDENTITY_GEN_MEMORY);
|
||||
if (!genmem)
|
||||
return false;
|
||||
identityV0ProofOfWorkFrankenhash(m_pub, ZT_C25519_COMBINED_PUBLIC_KEY_SIZE, digest, genmem);
|
||||
identityV0ProofOfWorkFrankenhash(m_pub, digest, genmem);
|
||||
free(genmem);
|
||||
return ((Address(digest + 59) == m_fp.address) && (digest[0] < 17));
|
||||
}
|
||||
|
@ -254,7 +254,7 @@ bool Identity::verify(const void *data, unsigned int len, const void *sig, unsig
|
|||
bool Identity::agree(const Identity &id, uint8_t key[ZT_SYMMETRIC_KEY_SIZE]) const
|
||||
{
|
||||
uint8_t rawkey[128], h[64];
|
||||
if (likely(m_hasPrivate)) {
|
||||
if (m_hasPrivate) {
|
||||
if ((m_type == C25519) || (id.m_type == C25519)) {
|
||||
// If we are a C25519 key we can agree with another C25519 key or with only the
|
||||
// C25519 portion of a type 1 P-384 key.
|
||||
|
|
|
@ -62,16 +62,10 @@ public:
|
|||
static const Identity NIL;
|
||||
|
||||
ZT_INLINE Identity() noexcept
|
||||
{
|
||||
Utils::memoryLock(this, sizeof(Identity));
|
||||
memoryZero(this);
|
||||
}
|
||||
{ memoryZero(this); }
|
||||
|
||||
ZT_INLINE Identity(const Identity &id) noexcept
|
||||
{
|
||||
Utils::memoryLock(this, sizeof(Identity));
|
||||
Utils::copy< sizeof(Identity) >(this, &id);
|
||||
}
|
||||
{ Utils::copy< sizeof(Identity) >(this, &id); }
|
||||
|
||||
/**
|
||||
* Construct identity from string
|
||||
|
@ -82,16 +76,10 @@ public:
|
|||
* @param str Identity in canonical string format
|
||||
*/
|
||||
explicit ZT_INLINE Identity(const char *str)
|
||||
{
|
||||
Utils::memoryLock(this, sizeof(Identity));
|
||||
fromString(str);
|
||||
}
|
||||
{ fromString(str); }
|
||||
|
||||
ZT_INLINE ~Identity()
|
||||
{
|
||||
Utils::memoryUnlock(this, sizeof(Identity));
|
||||
Utils::burn(reinterpret_cast<void *>(&this->m_priv), sizeof(this->m_priv));
|
||||
}
|
||||
{ Utils::burn(reinterpret_cast<void *>(&this->m_priv), sizeof(this->m_priv)); }
|
||||
|
||||
ZT_INLINE Identity &operator=(const Identity &id) noexcept
|
||||
{
|
||||
|
|
|
@ -134,7 +134,7 @@ public:
|
|||
/**
|
||||
* @return Endpoints specified in locator
|
||||
*/
|
||||
ZT_INLINE const Vector< std::pair< Endpoint, SharedPtr< const EndpointAttributes > > > &endpoints() const noexcept
|
||||
ZT_INLINE const Vector <std::pair< Endpoint, SharedPtr< const EndpointAttributes > >> &endpoints() const noexcept
|
||||
{ return m_endpoints; }
|
||||
|
||||
/**
|
||||
|
@ -201,6 +201,7 @@ public:
|
|||
|
||||
static constexpr int marshalSizeMax() noexcept
|
||||
{ return ZT_LOCATOR_MARSHAL_SIZE_MAX; }
|
||||
|
||||
int marshal(uint8_t data[ZT_LOCATOR_MARSHAL_SIZE_MAX], bool excludeSignature = false) const noexcept;
|
||||
int unmarshal(const uint8_t *data, int len) noexcept;
|
||||
|
||||
|
@ -208,7 +209,7 @@ public:
|
|||
{
|
||||
const unsigned long es = (unsigned long)m_endpoints.size();
|
||||
if ((m_revision == l.m_revision) && (m_signer == l.m_signer) && (es == (unsigned long)l.m_endpoints.size()) && (m_signature == l.m_signature)) {
|
||||
for(unsigned long i=0;i<es;++i) {
|
||||
for (unsigned long i = 0; i < es; ++i) {
|
||||
if (m_endpoints[i].first != l.m_endpoints[i].first)
|
||||
return false;
|
||||
if (!m_endpoints[i].second) {
|
||||
|
@ -223,6 +224,7 @@ public:
|
|||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
ZT_INLINE bool operator!=(const Locator &l) const noexcept
|
||||
{ return !(*this == l); }
|
||||
|
||||
|
@ -231,7 +233,7 @@ private:
|
|||
|
||||
int64_t m_revision;
|
||||
Fingerprint m_signer;
|
||||
Vector< std::pair< Endpoint, SharedPtr< const EndpointAttributes > > > m_endpoints;
|
||||
Vector <std::pair< Endpoint, SharedPtr< const EndpointAttributes > >> m_endpoints;
|
||||
FCV< uint8_t, ZT_SIGNATURE_BUFFER_SIZE > m_signature;
|
||||
std::atomic< int > __refCount;
|
||||
};
|
||||
|
|
|
@ -264,7 +264,7 @@ private:
|
|||
uint64_t m_mac;
|
||||
};
|
||||
|
||||
static_assert(sizeof(MAC) == sizeof(uint64_t),"MAC contains unnecessary padding");
|
||||
static_assert(sizeof(MAC) == sizeof(uint64_t), "MAC contains unnecessary padding");
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
||||
|
|
|
@ -211,8 +211,7 @@ public:
|
|||
m_hti(m.m_remoteCaps.begin()),
|
||||
m_parent(m),
|
||||
m_nconf(nconf)
|
||||
{
|
||||
}
|
||||
{}
|
||||
|
||||
ZT_INLINE CapabilityCredential *next() noexcept
|
||||
{
|
||||
|
|
|
@ -101,12 +101,14 @@ class MembershipCredential : public Credential
|
|||
friend class Credential;
|
||||
|
||||
public:
|
||||
static constexpr ZT_CredentialType credentialType() noexcept { return ZT_CREDENTIAL_TYPE_COM; }
|
||||
static constexpr ZT_CredentialType credentialType() noexcept
|
||||
{ return ZT_CREDENTIAL_TYPE_COM; }
|
||||
|
||||
/**
|
||||
* Create an empty certificate of membership
|
||||
*/
|
||||
ZT_INLINE MembershipCredential() noexcept { memoryZero(this); }
|
||||
ZT_INLINE MembershipCredential() noexcept
|
||||
{ memoryZero(this); }
|
||||
|
||||
/**
|
||||
* Create from required fields common to all networks
|
||||
|
@ -121,17 +123,20 @@ public:
|
|||
/**
|
||||
* @return True if there's something here
|
||||
*/
|
||||
ZT_INLINE operator bool() const noexcept { return (m_networkId != 0); }
|
||||
ZT_INLINE operator bool() const noexcept
|
||||
{ return (m_networkId != 0); }
|
||||
|
||||
/**
|
||||
* @return Credential ID, always 0 for COMs
|
||||
*/
|
||||
ZT_INLINE uint32_t id() const noexcept { return 0; }
|
||||
ZT_INLINE uint32_t id() const noexcept
|
||||
{ return 0; }
|
||||
|
||||
/**
|
||||
* @return Timestamp for this cert and maximum delta for timestamp
|
||||
*/
|
||||
ZT_INLINE int64_t timestamp() const noexcept { return m_timestamp; }
|
||||
ZT_INLINE int64_t timestamp() const noexcept
|
||||
{ return m_timestamp; }
|
||||
|
||||
ZT_INLINE int64_t revision() const noexcept
|
||||
{ return m_timestamp; }
|
||||
|
@ -139,17 +144,20 @@ public:
|
|||
/**
|
||||
* @return Maximum allowed difference between timestamps
|
||||
*/
|
||||
ZT_INLINE int64_t timestampMaxDelta() const noexcept { return m_timestampMaxDelta; }
|
||||
ZT_INLINE int64_t timestampMaxDelta() const noexcept
|
||||
{ return m_timestampMaxDelta; }
|
||||
|
||||
/**
|
||||
* @return Fingerprint of identity to which this cert was issued
|
||||
*/
|
||||
ZT_INLINE const Fingerprint &issuedTo() const noexcept { return m_issuedTo; }
|
||||
ZT_INLINE const Fingerprint &issuedTo() const noexcept
|
||||
{ return m_issuedTo; }
|
||||
|
||||
/**
|
||||
* @return Network ID for which this cert was issued
|
||||
*/
|
||||
ZT_INLINE uint64_t networkId() const noexcept { return m_networkId; }
|
||||
ZT_INLINE uint64_t networkId() const noexcept
|
||||
{ return m_networkId; }
|
||||
|
||||
/**
|
||||
* Compare two certificates for parameter agreement
|
||||
|
@ -180,28 +188,34 @@ public:
|
|||
* @param RR Runtime environment for looking up peers
|
||||
* @param tPtr Thread pointer to be handed through to any callbacks called as a result of this call
|
||||
*/
|
||||
ZT_INLINE Credential::VerifyResult verify(const Context &ctx, const CallContext &cc) const { return s_verify(ctx, cc, *this); }
|
||||
ZT_INLINE Credential::VerifyResult verify(const Context &ctx, const CallContext &cc) const
|
||||
{ return s_verify(ctx, cc, *this); }
|
||||
|
||||
static constexpr int marshalSizeMax() noexcept
|
||||
{ return ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX; }
|
||||
|
||||
// NOTE: right now we use v1 serialization format which works with both ZeroTier 1.x and 2.x. V2 format
|
||||
// will be switched on once 1.x is pretty much dead and out of support.
|
||||
static constexpr int marshalSizeMax() noexcept { return ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX; }
|
||||
int marshal(uint8_t data[ZT_MEMBERSHIP_CREDENTIAL_MARSHAL_SIZE_MAX], bool v2 = false) const noexcept;
|
||||
int unmarshal(const uint8_t *data,int len) noexcept;
|
||||
int unmarshal(const uint8_t *data, int len) noexcept;
|
||||
|
||||
private:
|
||||
unsigned int m_fillSigningBuf(uint64_t *buf) const noexcept;
|
||||
|
||||
struct p_Qualifier
|
||||
{
|
||||
ZT_INLINE p_Qualifier() noexcept : id(0), value(0), delta(0) {}
|
||||
ZT_INLINE p_Qualifier(const uint64_t id_, const uint64_t value_, const uint64_t delta_) noexcept : id(id_), value(value_), delta(delta_) {}
|
||||
ZT_INLINE p_Qualifier() noexcept: id(0), value(0), delta(0)
|
||||
{}
|
||||
|
||||
ZT_INLINE p_Qualifier(const uint64_t id_, const uint64_t value_, const uint64_t delta_) noexcept: id(id_), value(value_), delta(delta_)
|
||||
{}
|
||||
|
||||
uint64_t id;
|
||||
uint64_t value;
|
||||
uint64_t delta;
|
||||
ZT_INLINE bool operator<(const p_Qualifier &q) const noexcept { return (id < q.id); } // sort order
|
||||
ZT_INLINE bool operator<(const p_Qualifier &q) const noexcept
|
||||
{ return (id < q.id); } // sort order
|
||||
};
|
||||
|
||||
FCV<p_Qualifier,ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS> m_additionalQualifiers;
|
||||
FCV< p_Qualifier, ZT_MEMBERSHIP_CREDENTIAL_MAX_ADDITIONAL_QUALIFIERS > m_additionalQualifiers;
|
||||
int64_t m_timestamp;
|
||||
int64_t m_timestampMaxDelta;
|
||||
uint64_t m_networkId;
|
||||
|
|
|
@ -58,7 +58,7 @@ public:
|
|||
// the log size and then if it's a new bucket setting it or otherwise adding
|
||||
// to it.
|
||||
const unsigned long bucket = ((unsigned long)(ts / TUNIT)) % LSIZE;
|
||||
if (m_bucket.exchange(bucket, std::memory_order_relaxed) != bucket) {
|
||||
if (unlikely(m_bucket.exchange(bucket, std::memory_order_relaxed) != bucket)) {
|
||||
m_totalExclCounts.fetch_add(m_counts[bucket].exchange(count, std::memory_order_relaxed), std::memory_order_relaxed);
|
||||
} else {
|
||||
m_counts[bucket].fetch_add(count, std::memory_order_relaxed);
|
||||
|
|
|
@ -40,8 +40,11 @@ namespace ZeroTier {
|
|||
class MulticastGroup : public TriviallyCopyable
|
||||
{
|
||||
public:
|
||||
ZT_INLINE MulticastGroup() noexcept : m_mac(), m_adi(0) {}
|
||||
ZT_INLINE MulticastGroup(const MAC &m,uint32_t a) noexcept : m_mac(m), m_adi(a) {}
|
||||
ZT_INLINE MulticastGroup() noexcept: m_mac(), m_adi(0)
|
||||
{}
|
||||
|
||||
ZT_INLINE MulticastGroup(const MAC &m, uint32_t a) noexcept: m_mac(m), m_adi(a)
|
||||
{}
|
||||
|
||||
/**
|
||||
* Derive the multicast group used for address resolution (ARP/NDP) for an IP
|
||||
|
@ -56,14 +59,14 @@ public:
|
|||
// the Multicast Group ADI field. Making V4 ARP work is basically why
|
||||
// ADI was added, as well as handling other things that want mindless
|
||||
// Ethernet broadcast to all.
|
||||
return MulticastGroup(MAC(0xffffffffffffULL),Utils::ntoh(*((const uint32_t *)ip.rawIpData())));
|
||||
return MulticastGroup(MAC(0xffffffffffffULL), Utils::ntoh(*((const uint32_t *)ip.rawIpData())));
|
||||
} else if (ip.isV6()) {
|
||||
// IPv6 is better designed in this respect. We can compute the IPv6
|
||||
// multicast address directly from the IP address, and it gives us
|
||||
// 24 bits of uniqueness. Collisions aren't likely to be common enough
|
||||
// to care about.
|
||||
const uint8_t *const a = reinterpret_cast<const uint8_t *>(ip.rawIpData()); // NOLINT(hicpp-use-auto,modernize-use-auto)
|
||||
return MulticastGroup(MAC(0x33,0x33,0xff,a[13],a[14],a[15]),0);
|
||||
return MulticastGroup(MAC(0x33, 0x33, 0xff, a[13], a[14], a[15]), 0);
|
||||
}
|
||||
return MulticastGroup(); // NOLINT(modernize-return-braced-init-list)
|
||||
}
|
||||
|
@ -71,15 +74,21 @@ public:
|
|||
/**
|
||||
* @return Ethernet MAC portion of multicast group
|
||||
*/
|
||||
ZT_INLINE const MAC &mac() const noexcept { return m_mac; }
|
||||
ZT_INLINE const MAC &mac() const noexcept
|
||||
{ return m_mac; }
|
||||
|
||||
/**
|
||||
* @return Additional distinguishing information, which is normally zero except for IPv4 ARP where it's the IPv4 address
|
||||
*/
|
||||
ZT_INLINE uint32_t adi() const { return m_adi; }
|
||||
ZT_INLINE uint32_t adi() const
|
||||
{ return m_adi; }
|
||||
|
||||
ZT_INLINE bool operator==(const MulticastGroup &g) const noexcept
|
||||
{ return ((m_mac == g.m_mac) && (m_adi == g.m_adi)); }
|
||||
|
||||
ZT_INLINE bool operator!=(const MulticastGroup &g) const noexcept
|
||||
{ return ((m_mac != g.m_mac) || (m_adi != g.m_adi)); }
|
||||
|
||||
ZT_INLINE bool operator==(const MulticastGroup &g) const noexcept { return ((m_mac == g.m_mac) && (m_adi == g.m_adi)); }
|
||||
ZT_INLINE bool operator!=(const MulticastGroup &g) const noexcept { return ((m_mac != g.m_mac) || (m_adi != g.m_adi)); }
|
||||
ZT_INLINE bool operator<(const MulticastGroup &g) const noexcept
|
||||
{
|
||||
if (m_mac < g.m_mac)
|
||||
|
@ -88,11 +97,18 @@ public:
|
|||
return (m_adi < g.m_adi);
|
||||
return false;
|
||||
}
|
||||
ZT_INLINE bool operator>(const MulticastGroup &g) const noexcept { return (g < *this); }
|
||||
ZT_INLINE bool operator<=(const MulticastGroup &g) const noexcept { return !(g < *this); }
|
||||
ZT_INLINE bool operator>=(const MulticastGroup &g) const noexcept { return !(*this < g); }
|
||||
|
||||
ZT_INLINE unsigned long hashCode() const noexcept { return (m_mac.hashCode() + (unsigned long)m_adi); }
|
||||
ZT_INLINE bool operator>(const MulticastGroup &g) const noexcept
|
||||
{ return (g < *this); }
|
||||
|
||||
ZT_INLINE bool operator<=(const MulticastGroup &g) const noexcept
|
||||
{ return !(g < *this); }
|
||||
|
||||
ZT_INLINE bool operator>=(const MulticastGroup &g) const noexcept
|
||||
{ return !(*this < g); }
|
||||
|
||||
ZT_INLINE unsigned long hashCode() const noexcept
|
||||
{ return (m_mac.hashCode() + (unsigned long)m_adi); }
|
||||
|
||||
private:
|
||||
MAC m_mac;
|
||||
|
|
|
@ -67,12 +67,9 @@ struct _NodeObjects
|
|||
|
||||
} // anonymous namespace
|
||||
|
||||
Node::Node(
|
||||
void *uPtr,
|
||||
const struct ZT_Node_Callbacks *callbacks,
|
||||
const CallContext &cc) :
|
||||
Node::Node(void *uPtr, const struct ZT_Node_Callbacks *callbacks, const CallContext &cc) :
|
||||
m_ctx(this),
|
||||
m_store(&m_ctx),
|
||||
m_store(m_ctx),
|
||||
m_objects(nullptr),
|
||||
m_lastPeerPulse(0),
|
||||
m_lastHousekeepingRun(0),
|
||||
|
@ -167,9 +164,7 @@ void Node::shutdown(const CallContext &cc)
|
|||
m_ctx.topology->saveAll(cc);
|
||||
}
|
||||
|
||||
ZT_ResultCode Node::processBackgroundTasks(
|
||||
const CallContext &cc,
|
||||
volatile int64_t *nextBackgroundTaskDeadline)
|
||||
ZT_ResultCode Node::processBackgroundTasks(const CallContext &cc, volatile int64_t *nextBackgroundTaskDeadline)
|
||||
{
|
||||
Mutex::Lock bl(m_backgroundTasksLock);
|
||||
|
||||
|
@ -179,7 +174,7 @@ ZT_ResultCode Node::processBackgroundTasks(
|
|||
// certificates. This also happens on demand when the trust store is changed.
|
||||
if ((cc.ticks - m_lastTrustStoreUpdate) >= ZT_TRUSTSTORE_UPDATE_PERIOD) {
|
||||
m_lastTrustStoreUpdate = cc.ticks;
|
||||
if (m_ctx.ts->update(cc.ticks, nullptr))
|
||||
if (unlikely(m_ctx.ts->update(cc.ticks, nullptr)))
|
||||
m_ctx.topology->trustStoreChanged(cc);
|
||||
}
|
||||
|
||||
|
@ -196,7 +191,6 @@ ZT_ResultCode Node::processBackgroundTasks(
|
|||
if ((cc.ticks - m_lastHousekeepingRun) >= ZT_HOUSEKEEPING_PERIOD) {
|
||||
m_lastHousekeepingRun = cc.ticks;
|
||||
ZT_SPEW("running housekeeping...");
|
||||
|
||||
m_ctx.topology->doPeriodicTasks(cc);
|
||||
m_ctx.sa->clean(cc);
|
||||
}
|
||||
|
@ -215,12 +209,13 @@ ZT_ResultCode Node::processBackgroundTasks(
|
|||
|
||||
bool online = false;
|
||||
for (Vector< SharedPtr< Peer > >::iterator p(allPeers.begin()); p != allPeers.end(); ++p) {
|
||||
const bool isRoot = std::binary_search(rootPeers.begin(), rootPeers.end(), *p);
|
||||
(*p)->pulse(m_ctx, cc, isRoot);
|
||||
online |= ((isRoot || rootPeers.empty()) && (*p)->directlyConnected(cc));
|
||||
(*p)->pulse(m_ctx, cc);
|
||||
if (!online) {
|
||||
online = ((std::binary_search(rootPeers.begin(), rootPeers.end(), *p) || rootPeers.empty()) && (*p)->directlyConnected());
|
||||
}
|
||||
}
|
||||
|
||||
if (m_online.exchange(online, std::memory_order_relaxed) != online)
|
||||
if (unlikely(m_online.exchange(online, std::memory_order_relaxed) != online))
|
||||
postEvent(cc.tPtr, online ? ZT_EVENT_ONLINE : ZT_EVENT_OFFLINE);
|
||||
|
||||
ZT_SPEW("ranking roots...");
|
||||
|
@ -238,11 +233,7 @@ ZT_ResultCode Node::processBackgroundTasks(
|
|||
return ZT_RESULT_OK;
|
||||
}
|
||||
|
||||
ZT_ResultCode Node::join(
|
||||
uint64_t nwid,
|
||||
const ZT_Fingerprint *controllerFingerprint,
|
||||
void *uptr,
|
||||
const CallContext &cc)
|
||||
ZT_ResultCode Node::join(uint64_t nwid, const ZT_Fingerprint *controllerFingerprint, void *uptr, const CallContext &cc)
|
||||
{
|
||||
Mutex::Lock l(m_allNetworks_l);
|
||||
|
||||
|
@ -265,10 +256,7 @@ ZT_ResultCode Node::join(
|
|||
return ZT_RESULT_OK;
|
||||
}
|
||||
|
||||
ZT_ResultCode Node::leave(
|
||||
uint64_t nwid,
|
||||
void **uptr,
|
||||
const CallContext &cc)
|
||||
ZT_ResultCode Node::leave(uint64_t nwid, void **uptr, const CallContext &cc)
|
||||
{
|
||||
Mutex::Lock l(m_allNetworks_l);
|
||||
|
||||
|
@ -302,11 +290,7 @@ ZT_ResultCode Node::leave(
|
|||
}
|
||||
}
|
||||
|
||||
ZT_ResultCode Node::multicastSubscribe(
|
||||
const CallContext &cc,
|
||||
uint64_t nwid,
|
||||
uint64_t multicastGroup,
|
||||
unsigned long multicastAdi)
|
||||
ZT_ResultCode Node::multicastSubscribe(const CallContext &cc, uint64_t nwid, uint64_t multicastGroup, unsigned long multicastAdi)
|
||||
{
|
||||
ZT_SPEW("multicast subscribe to %s:%lu", MAC(multicastGroup).toString().c_str(), multicastAdi);
|
||||
const SharedPtr< Network > nw(m_ctx.networks->get(nwid));
|
||||
|
@ -318,11 +302,7 @@ ZT_ResultCode Node::multicastSubscribe(
|
|||
}
|
||||
}
|
||||
|
||||
ZT_ResultCode Node::multicastUnsubscribe(
|
||||
const CallContext &cc,
|
||||
uint64_t nwid,
|
||||
uint64_t multicastGroup,
|
||||
unsigned long multicastAdi)
|
||||
ZT_ResultCode Node::multicastUnsubscribe(const CallContext &cc, uint64_t nwid, uint64_t multicastGroup, unsigned long multicastAdi)
|
||||
{
|
||||
ZT_SPEW("multicast unsubscribe from %s:%lu", MAC(multicastGroup).toString().c_str(), multicastAdi);
|
||||
const SharedPtr< Network > nw(m_ctx.networks->get(nwid));
|
||||
|
@ -385,11 +365,12 @@ ZT_PeerList *Node::peers(const CallContext &cc) const
|
|||
pl->p_identities.push_front(pp.identity());
|
||||
p.identity = reinterpret_cast<const ZT_Identity *>(&(pl->p_identities.front()));
|
||||
p.fingerprint = &(pl->p_identities.front().fingerprint());
|
||||
if (pp.remoteVersionKnown()) {
|
||||
p.versionMajor = (int)pp.remoteVersionMajor();
|
||||
p.versionMinor = (int)pp.remoteVersionMinor();
|
||||
p.versionRev = (int)pp.remoteVersionRevision();
|
||||
p.versionProto = (int)pp.remoteVersionProtocol();
|
||||
uint16_t vProto, vMajor, vMinor, vRevision;
|
||||
if (pp.remoteVersion(vProto, vMajor, vMinor, vRevision)) {
|
||||
p.versionMajor = (int)vMajor;
|
||||
p.versionMinor = (int)vMinor;
|
||||
p.versionRev = (int)vRevision;
|
||||
p.versionProto = (int)vProto;
|
||||
} else {
|
||||
p.versionMajor = -1;
|
||||
p.versionMinor = -1;
|
||||
|
@ -612,7 +593,7 @@ void Node::setController(void *networkControllerInstance)
|
|||
m_ctx.localNetworkController->init(m_ctx.identity, this);
|
||||
}
|
||||
|
||||
bool Node::shouldUsePathForZeroTierTraffic(void *tPtr, const Identity &id, const int64_t localSocket, const InetAddress &remoteAddress)
|
||||
bool Node::filterPotentialPath(void *tPtr, const Identity &id, int64_t localSocket, const InetAddress &remoteAddress)
|
||||
{
|
||||
{
|
||||
Mutex::Lock l(m_allNetworks_l);
|
||||
|
|
|
@ -127,7 +127,7 @@ public:
|
|||
* @param md Event data or NULL if none
|
||||
* @param mdSize Size of event data
|
||||
*/
|
||||
ZT_INLINE void postEvent(void *tPtr, ZT_Event ev, const void *md = nullptr, const unsigned int mdSize = 0) noexcept
|
||||
ZT_INLINE void postEvent(void *const tPtr, const ZT_Event ev, const void *const md = nullptr, const unsigned int mdSize = 0) noexcept
|
||||
{ m_ctx.cb.eventCallback(reinterpret_cast<ZT_Node *>(this), m_ctx.uPtr, tPtr, ev, md, mdSize); }
|
||||
|
||||
/**
|
||||
|
@ -141,7 +141,7 @@ public:
|
|||
* @param remoteAddress Remote address
|
||||
* @return True if path should be used
|
||||
*/
|
||||
bool shouldUsePathForZeroTierTraffic(void *tPtr, const Identity &id, int64_t localSocket, const InetAddress &remoteAddress);
|
||||
bool filterPotentialPath(void *tPtr, const Identity &id, int64_t localSocket, const InetAddress &remoteAddress);
|
||||
|
||||
/**
|
||||
* Query callback for a physical address for a peer
|
||||
|
|
|
@ -155,7 +155,6 @@ public:
|
|||
{ return ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX; }
|
||||
|
||||
int marshal(uint8_t data[ZT_CERTIFICATEOFOWNERSHIP_MARSHAL_SIZE_MAX], bool forSign = false) const noexcept;
|
||||
|
||||
int unmarshal(const uint8_t *data, int len) noexcept;
|
||||
|
||||
// Provides natural sort order by ID
|
||||
|
|
486
core/Peer.cpp
486
core/Peer.cpp
|
@ -24,17 +24,20 @@
|
|||
|
||||
namespace ZeroTier {
|
||||
|
||||
// An arbitrary byte to send in single byte probes, incremented on each probe.
|
||||
static uint8_t s_arbitraryByte = (uint8_t)Utils::random();
|
||||
|
||||
Peer::Peer() :
|
||||
m_ephemeralPairTimestamp(0),
|
||||
m_key((uintptr_t)&m_identityKey),
|
||||
m_keyRenegotiationNeeded(false),
|
||||
m_lastReceive(0),
|
||||
m_lastSend(0),
|
||||
m_lastSentHello(0),
|
||||
m_lastWhoisRequestReceived(0),
|
||||
m_lastEchoRequestReceived(0),
|
||||
m_lastPrioritizedPaths(0),
|
||||
m_lastProbeReceived(0),
|
||||
m_alivePathCount(0),
|
||||
m_tryQueue(),
|
||||
m_bestPath(0),
|
||||
m_vProto(0),
|
||||
m_vMajor(0),
|
||||
m_vMinor(0),
|
||||
|
@ -48,14 +51,12 @@ bool Peer::init(const Context &ctx, const CallContext &cc, const Identity &peerI
|
|||
{
|
||||
RWMutex::Lock l(m_lock);
|
||||
|
||||
if (m_id) // already initialized sanity check
|
||||
return false;
|
||||
m_id = peerIdentity;
|
||||
|
||||
uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
|
||||
if (!ctx.identity.agree(peerIdentity, k))
|
||||
if (unlikely(!ctx.identity.agree(peerIdentity, k)))
|
||||
return false;
|
||||
m_identityKey.set(new SymmetricKey(cc.ticks, k));
|
||||
m_identityKey.init(cc.ticks, k);
|
||||
Utils::burn(k, sizeof(k));
|
||||
|
||||
m_deriveSecondaryIdentityKeys();
|
||||
|
@ -71,12 +72,15 @@ void Peer::received(
|
|||
const uint64_t packetId,
|
||||
const unsigned int payloadLength,
|
||||
const Protocol::Verb verb,
|
||||
const Protocol::Verb inReVerb)
|
||||
const Protocol::Verb /*inReVerb*/)
|
||||
{
|
||||
m_lastReceive = cc.ticks;
|
||||
m_lastReceive.store(cc.ticks, std::memory_order_relaxed);
|
||||
m_inMeter.log(cc.ticks, payloadLength);
|
||||
|
||||
if (hops == 0) {
|
||||
// NOTE: in the most common scenario we will be talking via the best path.
|
||||
// This does a check without a full mutex lock and if so there's nothing more
|
||||
// to do, which speeds things up in that case.
|
||||
if ((hops == 0) && ((uintptr_t)path.ptr() != m_bestPath.load(std::memory_order_relaxed))) {
|
||||
RWMutex::RMaybeWLock l(m_lock);
|
||||
|
||||
// If this matches an existing path, skip path learning stuff. For the small number
|
||||
|
@ -87,62 +91,43 @@ void Peer::received(
|
|||
}
|
||||
|
||||
// If we made it here, we don't already know this path.
|
||||
if (ctx.node->shouldUsePathForZeroTierTraffic(cc.tPtr, m_id, path->localSocket(), path->address())) {
|
||||
if (ctx.node->filterPotentialPath(cc.tPtr, m_id, path->localSocket(), path->address())) {
|
||||
// SECURITY: note that if we've made it here we expected this OK, see Expect.hpp.
|
||||
// There is replay protection in effect for OK responses.
|
||||
if (verb == Protocol::VERB_OK) {
|
||||
// If we're learning a new path convert the lock to an exclusive write lock.
|
||||
// Acquire write access to the object and thus path set.
|
||||
l.writing();
|
||||
|
||||
// If the path list is full, replace the least recently active path. Otherwise append new path.
|
||||
unsigned int newPathIdx = 0;
|
||||
unsigned int newPathIdx;
|
||||
if (m_alivePathCount == ZT_MAX_PEER_NETWORK_PATHS) {
|
||||
int64_t lastReceiveTimeMax = 0;
|
||||
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
|
||||
if ((m_paths[i]->address().as.sa.sa_family == path->address().as.sa.sa_family) &&
|
||||
(m_paths[i]->localSocket() == path->localSocket()) && // TODO: should be localInterface when multipath is integrated
|
||||
(m_paths[i]->address().ipsEqual2(path->address()))) {
|
||||
// Replace older path if everything is the same except the port number, since NAT/firewall reboots
|
||||
// and other wacky stuff can change port number assignments.
|
||||
m_paths[i] = path;
|
||||
return;
|
||||
} else if (m_paths[i]->lastIn() >= lastReceiveTimeMax) {
|
||||
lastReceiveTimeMax = m_paths[i]->lastIn();
|
||||
newPathIdx = i;
|
||||
}
|
||||
m_prioritizePaths(cc);
|
||||
if (m_alivePathCount == ZT_MAX_PEER_NETWORK_PATHS) {
|
||||
newPathIdx = ZT_MAX_PEER_NETWORK_PATHS - 1;
|
||||
} else {
|
||||
newPathIdx = m_alivePathCount++;
|
||||
}
|
||||
} else {
|
||||
newPathIdx = m_alivePathCount++;
|
||||
}
|
||||
|
||||
InetAddress old;
|
||||
if (m_paths[newPathIdx])
|
||||
old = m_paths[newPathIdx]->address();
|
||||
// Save a reference to the current path in case we replace it. This
|
||||
// should technically never happen, but this ensures safety if it does.
|
||||
const SharedPtr< Path > currentBest(reinterpret_cast<Path *>(m_bestPath.load(std::memory_order_acquire)));
|
||||
|
||||
SharedPtr< Path > old;
|
||||
old.move(m_paths[newPathIdx]);
|
||||
m_paths[newPathIdx] = path;
|
||||
|
||||
// Re-prioritize paths to include the new one.
|
||||
m_prioritizePaths(cc);
|
||||
|
||||
// Add or update entry in the endpoint cache. If this endpoint
|
||||
// is already present, its timesSeen count is incremented. Otherwise
|
||||
// it replaces the lowest ranked entry.
|
||||
std::sort(m_endpointCache, m_endpointCache + ZT_PEER_ENDPOINT_CACHE_SIZE);
|
||||
Endpoint thisEndpoint(path->address());
|
||||
for (unsigned int i = 0;; ++i) {
|
||||
if (i == (ZT_PEER_ENDPOINT_CACHE_SIZE - 1)) {
|
||||
m_endpointCache[i].target = thisEndpoint;
|
||||
m_endpointCache[i].lastSeen = cc.ticks;
|
||||
break;
|
||||
} else if (m_endpointCache[i].target == thisEndpoint) {
|
||||
m_endpointCache[i].lastSeen = cc.ticks;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ctx.t->learnedNewPath(cc, 0x582fabdd, packetId, m_id, path->address(), old);
|
||||
ctx.t->learnedNewPath(cc, 0x582fabdd, packetId, m_id, path->address(), (old) ? old->address() : InetAddress());
|
||||
} else {
|
||||
path->sent(cc, hello(ctx, cc, path->localSocket(), path->address()));
|
||||
ctx.t->tryingNewPath(cc, 0xb7747ddd, m_id, path->address(), path->address(), packetId, (uint8_t)verb, m_id);
|
||||
int64_t < = m_lastTried[Endpoint(path->address())];
|
||||
if ((cc.ticks - lt) < ZT_PATH_MIN_TRY_INTERVAL) {
|
||||
lt = cc.ticks;
|
||||
path->sent(cc, m_hello(ctx, cc, path->localSocket(), path->address(), false));
|
||||
ctx.t->tryingNewPath(cc, 0xb7747ddd, m_id, path->address(), path->address(), packetId, (uint8_t)verb, m_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -150,97 +135,35 @@ void Peer::received(
|
|||
|
||||
void Peer::send(const Context &ctx, const CallContext &cc, const void *data, unsigned int len) noexcept
|
||||
{
|
||||
SharedPtr< Path > via(this->path(cc));
|
||||
if (via) {
|
||||
via->send(ctx, cc, data, len);
|
||||
SharedPtr< Path > via(reinterpret_cast<Path *>(m_bestPath.load(std::memory_order_acquire)));
|
||||
if (likely(via)) {
|
||||
if (likely(via->send(ctx, cc, data, len)))
|
||||
this->sent(cc, len);
|
||||
} else {
|
||||
const SharedPtr< Peer > root(ctx.topology->root());
|
||||
if ((root) && (root.ptr() != this)) {
|
||||
if (likely((root) && (root.ptr() != this))) {
|
||||
via = root->path(cc);
|
||||
if (via) {
|
||||
via->send(ctx, cc, data, len);
|
||||
root->relayed(cc, len);
|
||||
} else {
|
||||
return;
|
||||
if (likely(via)) {
|
||||
if (likely(via->send(ctx, cc, data, len))) {
|
||||
root->relayed(cc, len);
|
||||
this->sent(cc, len);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
sent(cc, len);
|
||||
}
|
||||
|
||||
unsigned int Peer::hello(const Context &ctx, const CallContext &cc, int64_t localSocket, const InetAddress &atAddress)
|
||||
{
|
||||
Buf outp;
|
||||
|
||||
const uint64_t packetId = m_identityKey->nextMessage(ctx.identity.address(), m_id.address());
|
||||
int ii = Protocol::newPacket(outp, packetId, m_id.address(), ctx.identity.address(), Protocol::VERB_HELLO);
|
||||
|
||||
outp.wI8(ii, ZT_PROTO_VERSION);
|
||||
outp.wI8(ii, ZEROTIER_VERSION_MAJOR);
|
||||
outp.wI8(ii, ZEROTIER_VERSION_MINOR);
|
||||
outp.wI16(ii, ZEROTIER_VERSION_REVISION);
|
||||
outp.wI64(ii, (uint64_t)cc.clock);
|
||||
outp.wO(ii, ctx.identity);
|
||||
outp.wO(ii, atAddress);
|
||||
|
||||
const int ivStart = ii;
|
||||
outp.wR(ii, 12);
|
||||
|
||||
// LEGACY: the six reserved bytes after the IV exist for legacy compatibility with v1.x nodes.
|
||||
// Once those are dead they'll become just reserved bytes for future use as flags etc.
|
||||
outp.wI32(ii, 0); // reserved bytes
|
||||
void *const legacyMoonCountStart = outp.unsafeData + ii;
|
||||
outp.wI16(ii, 0);
|
||||
const uint64_t legacySalsaIv = packetId & ZT_CONST_TO_BE_UINT64(0xfffffffffffffff8ULL);
|
||||
Salsa20(m_identityKey->secret, &legacySalsaIv).crypt12(legacyMoonCountStart, legacyMoonCountStart, 2);
|
||||
|
||||
const int cryptSectionStart = ii;
|
||||
FCV< uint8_t, 4096 > md;
|
||||
Dictionary::append(md, ZT_PROTO_HELLO_NODE_META_INSTANCE_ID, ctx.instanceId);
|
||||
outp.wI16(ii, (uint16_t)md.size());
|
||||
outp.wB(ii, md.data(), (unsigned int)md.size());
|
||||
|
||||
if (unlikely((ii + ZT_HMACSHA384_LEN) > ZT_BUF_SIZE)) // sanity check: should be impossible
|
||||
return 0;
|
||||
|
||||
AES::CTR ctr(m_helloCipher);
|
||||
void *const cryptSection = outp.unsafeData + ii;
|
||||
ctr.init(outp.unsafeData + ivStart, 0, cryptSection);
|
||||
ctr.crypt(cryptSection, ii - cryptSectionStart);
|
||||
ctr.finish();
|
||||
|
||||
HMACSHA384(m_helloMacKey, outp.unsafeData, ii, outp.unsafeData + ii);
|
||||
ii += ZT_HMACSHA384_LEN;
|
||||
|
||||
// LEGACY: we also need Poly1305 for v1.x peers.
|
||||
uint8_t polyKey[ZT_POLY1305_KEY_SIZE], perPacketKey[ZT_SALSA20_KEY_SIZE];
|
||||
Protocol::salsa2012DeriveKey(m_identityKey->secret, perPacketKey, outp, ii);
|
||||
Salsa20(perPacketKey, &packetId).crypt12(Utils::ZERO256, polyKey, sizeof(polyKey));
|
||||
Poly1305 p1305(polyKey);
|
||||
p1305.update(outp.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START, ii - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START);
|
||||
uint64_t polyMac[2];
|
||||
p1305.finish(polyMac);
|
||||
Utils::storeMachineEndian< uint64_t >(outp.unsafeData + ZT_PROTO_PACKET_MAC_INDEX, polyMac[0]);
|
||||
|
||||
return (likely(ctx.cb.wirePacketSendFunction(reinterpret_cast<ZT_Node *>(ctx.node), ctx.uPtr, cc.tPtr, localSocket, reinterpret_cast<const ZT_InetAddress *>(&atAddress), outp.unsafeData, ii, 0) == 0)) ? ii : 0;
|
||||
}
|
||||
|
||||
void Peer::pulse(const Context &ctx, const CallContext &cc, const bool isRoot)
|
||||
void Peer::pulse(const Context &ctx, const CallContext &cc)
|
||||
{
|
||||
RWMutex::Lock l(m_lock);
|
||||
|
||||
// Grab current key (this is never NULL).
|
||||
SymmetricKey *const key = reinterpret_cast<SymmetricKey *>(m_key.load(std::memory_order_relaxed));
|
||||
|
||||
// Determine if we need a new ephemeral key pair and if a new HELLO needs
|
||||
// to be sent. The latter happens every ZT_PEER_HELLO_INTERVAL or if a new
|
||||
// ephemeral key pair is generated.
|
||||
bool needHello = false;
|
||||
if ((m_vProto >= 11) && (((cc.ticks - m_ephemeralPairTimestamp) >= (ZT_SYMMETRIC_KEY_TTL / 2)) || ((m_ephemeralKeys[0]) && (m_ephemeralKeys[0]->odometer() >= (ZT_SYMMETRIC_KEY_TTL_MESSAGES / 2))))) {
|
||||
m_ephemeralPair.generate();
|
||||
needHello = true;
|
||||
} else if ((cc.ticks - m_lastSentHello) >= ZT_PEER_HELLO_INTERVAL) {
|
||||
needHello = true;
|
||||
}
|
||||
bool needHello = (((m_vProto >= 20) && (m_keyRenegotiationNeeded || (key == &m_identityKey) || ((cc.ticks - key->timestamp()) >= (ZT_SYMMETRIC_KEY_TTL / 2)) || (key->odometer() > (ZT_SYMMETRIC_KEY_TTL_MESSAGES / 2)))) || ((cc.ticks - m_lastSentHello) >= ZT_PEER_HELLO_INTERVAL));
|
||||
|
||||
// Prioritize paths and more importantly for here forget dead ones.
|
||||
m_prioritizePaths(cc);
|
||||
|
@ -252,9 +175,9 @@ void Peer::pulse(const Context &ctx, const CallContext &cc, const bool isRoot)
|
|||
// callback (if one was supplied).
|
||||
|
||||
if (m_locator) {
|
||||
for (Vector< std::pair<Endpoint, SharedPtr< const Locator::EndpointAttributes > > >::const_iterator ep(m_locator->endpoints().begin()); ep != m_locator->endpoints().end(); ++ep) {
|
||||
for (Vector< std::pair< Endpoint, SharedPtr< const Locator::EndpointAttributes > > >::const_iterator ep(m_locator->endpoints().begin()); ep != m_locator->endpoints().end(); ++ep) {
|
||||
if (ep->first.type == ZT_ENDPOINT_TYPE_IP_UDP) {
|
||||
if (ctx.node->shouldUsePathForZeroTierTraffic(cc.tPtr, m_id, -1, ep->first.ip())) {
|
||||
if (ctx.node->filterPotentialPath(cc.tPtr, m_id, -1, ep->first.ip())) {
|
||||
int64_t < = m_lastTried[ep->first];
|
||||
if ((cc.ticks - lt) > ZT_PATH_MIN_TRY_INTERVAL) {
|
||||
lt = cc.ticks;
|
||||
|
@ -266,22 +189,9 @@ void Peer::pulse(const Context &ctx, const CallContext &cc, const bool isRoot)
|
|||
}
|
||||
}
|
||||
|
||||
for (unsigned int i = 0; i < ZT_PEER_ENDPOINT_CACHE_SIZE; ++i) {
|
||||
if ((m_endpointCache[i].lastSeen > 0) && (m_endpointCache[i].target.type == ZT_ENDPOINT_TYPE_IP_UDP)) {
|
||||
if (ctx.node->shouldUsePathForZeroTierTraffic(cc.tPtr, m_id, -1, m_endpointCache[i].target.ip())) {
|
||||
int64_t < = m_lastTried[m_endpointCache[i].target];
|
||||
if ((cc.ticks - lt) > ZT_PATH_MIN_TRY_INTERVAL) {
|
||||
lt = cc.ticks;
|
||||
ctx.t->tryingNewPath(cc, 0x84b22343, m_id, m_endpointCache[i].target.ip(), InetAddress::NIL, 0, 0, Identity::NIL);
|
||||
sent(cc, m_sendProbe(ctx, cc, -1, m_endpointCache[i].target.ip(), nullptr, 0));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
InetAddress addr;
|
||||
if (ctx.node->externalPathLookup(cc.tPtr, m_id, -1, addr)) {
|
||||
if ((addr) && ctx.node->shouldUsePathForZeroTierTraffic(cc.tPtr, m_id, -1, addr)) {
|
||||
if ((addr) && ctx.node->filterPotentialPath(cc.tPtr, m_id, -1, addr)) {
|
||||
int64_t < = m_lastTried[Endpoint(addr)];
|
||||
if ((cc.ticks - lt) > ZT_PATH_MIN_TRY_INTERVAL) {
|
||||
lt = cc.ticks;
|
||||
|
@ -292,11 +202,6 @@ void Peer::pulse(const Context &ctx, const CallContext &cc, const bool isRoot)
|
|||
}
|
||||
}
|
||||
} else {
|
||||
// Attempt up to ZT_NAT_T_MAX_QUEUED_ATTEMPTS_PER_PULSE queued addresses.
|
||||
|
||||
// Note that m_lastTried is checked when contact() is called and something
|
||||
// is added to the try queue, not here.
|
||||
|
||||
unsigned int attempts = 0;
|
||||
for (;;) {
|
||||
p_TryQueueItem &qi = m_tryQueue.front();
|
||||
|
@ -310,6 +215,8 @@ void Peer::pulse(const Context &ctx, const CallContext &cc, const bool isRoot)
|
|||
}
|
||||
|
||||
if (qi.target.type == ZT_ENDPOINT_TYPE_IP_UDP) {
|
||||
// TODO: need to send something like a NOP for older target nodes.
|
||||
|
||||
++attempts;
|
||||
if (qi.iteration < 0) {
|
||||
|
||||
|
@ -379,16 +286,19 @@ void Peer::pulse(const Context &ctx, const CallContext &cc, const bool isRoot)
|
|||
|
||||
// Do keepalive on all currently active paths, sending HELLO to the first
|
||||
// if needHello is true and sending small keepalives to others.
|
||||
uint64_t randomJunk = Utils::random();
|
||||
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
|
||||
if (needHello) {
|
||||
needHello = false;
|
||||
const unsigned int bytes = hello(ctx, cc, m_paths[i]->localSocket(), m_paths[i]->address());
|
||||
m_paths[i]->sent(cc, bytes);
|
||||
sent(cc, bytes);
|
||||
m_lastSentHello = cc.ticks;
|
||||
const unsigned int bytes = m_hello(ctx, cc, m_paths[i]->localSocket(), m_paths[i]->address(), m_keyRenegotiationNeeded);
|
||||
if (bytes) {
|
||||
m_paths[i]->sent(cc, bytes);
|
||||
sent(cc, bytes);
|
||||
m_lastSentHello = cc.ticks;
|
||||
m_keyRenegotiationNeeded = false;
|
||||
}
|
||||
} else if ((cc.ticks - m_paths[i]->lastOut()) >= ZT_PATH_KEEPALIVE_PERIOD) {
|
||||
m_paths[i]->send(ctx, cc, reinterpret_cast<uint8_t *>(&randomJunk) + (i & 7U), 1);
|
||||
m_paths[i]->send(ctx, cc, &s_arbitraryByte, 1);
|
||||
++s_arbitraryByte;
|
||||
sent(cc, 1);
|
||||
}
|
||||
}
|
||||
|
@ -399,18 +309,21 @@ void Peer::pulse(const Context &ctx, const CallContext &cc, const bool isRoot)
|
|||
if (root) {
|
||||
const SharedPtr< Path > via(root->path(cc));
|
||||
if (via) {
|
||||
const unsigned int bytes = hello(ctx, cc, via->localSocket(), via->address());
|
||||
via->sent(cc, bytes);
|
||||
root->relayed(cc, bytes);
|
||||
sent(cc, bytes);
|
||||
m_lastSentHello = cc.ticks;
|
||||
const unsigned int bytes = m_hello(ctx, cc, via->localSocket(), via->address(), m_keyRenegotiationNeeded);
|
||||
if (bytes) {
|
||||
via->sent(cc, bytes);
|
||||
root->relayed(cc, bytes);
|
||||
sent(cc, bytes);
|
||||
m_lastSentHello = cc.ticks;
|
||||
m_keyRenegotiationNeeded = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean m_lastTried
|
||||
for (Map< Endpoint, int64_t >::iterator i(m_lastTried.begin()); i != m_lastTried.end();) {
|
||||
if ((cc.ticks - i->second) > (ZT_PATH_MIN_TRY_INTERVAL * 4))
|
||||
if ((cc.ticks - i->second) > (ZT_PATH_MIN_TRY_INTERVAL * 3))
|
||||
m_lastTried.erase(i++);
|
||||
else ++i;
|
||||
}
|
||||
|
@ -418,14 +331,10 @@ void Peer::pulse(const Context &ctx, const CallContext &cc, const bool isRoot)
|
|||
|
||||
void Peer::contact(const Context &ctx, const CallContext &cc, const Endpoint &ep, int tries)
|
||||
{
|
||||
static uint8_t foo = 0;
|
||||
RWMutex::Lock l(m_lock);
|
||||
|
||||
// See if there's already a path to this endpoint and if so ignore it.
|
||||
if (ep.isInetAddr()) {
|
||||
if ((cc.ticks - m_lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
|
||||
m_prioritizePaths(cc);
|
||||
}
|
||||
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
|
||||
if (m_paths[i]->address().ipsEqual(ep.ip()))
|
||||
return;
|
||||
|
@ -441,8 +350,8 @@ void Peer::contact(const Context &ctx, const CallContext &cc, const Endpoint &ep
|
|||
// For IPv4 addresses we send a tiny packet with a low TTL, which helps to
|
||||
// traverse some NAT types. It has no effect otherwise.
|
||||
if (ep.isInetAddr() && ep.ip().isV4()) {
|
||||
++foo;
|
||||
ctx.cb.wirePacketSendFunction(reinterpret_cast<ZT_Node *>(ctx.node), ctx.uPtr, cc.tPtr, -1, reinterpret_cast<const ZT_InetAddress *>(&ep.ip()), &foo, 1, 2);
|
||||
ctx.cb.wirePacketSendFunction(reinterpret_cast<ZT_Node *>(ctx.node), ctx.uPtr, cc.tPtr, -1, reinterpret_cast<const ZT_InetAddress *>(&ep.ip()), &s_arbitraryByte, 1, 2);
|
||||
++s_arbitraryByte;
|
||||
}
|
||||
|
||||
// Make sure address is not already in the try queue. If so just update it.
|
||||
|
@ -475,26 +384,6 @@ void Peer::resetWithinScope(const Context &ctx, const CallContext &cc, InetAddre
|
|||
m_paths[pc++].zero();
|
||||
}
|
||||
|
||||
bool Peer::directlyConnected(const CallContext &cc)
|
||||
{
|
||||
if ((cc.ticks - m_lastPrioritizedPaths) > ZT_PEER_PRIORITIZE_PATHS_INTERVAL) {
|
||||
RWMutex::Lock l(m_lock);
|
||||
m_prioritizePaths(cc);
|
||||
return m_alivePathCount > 0;
|
||||
} else {
|
||||
RWMutex::RLock l(m_lock);
|
||||
return m_alivePathCount > 0;
|
||||
}
|
||||
}
|
||||
|
||||
void Peer::getAllPaths(Vector< SharedPtr< Path > > &paths)
|
||||
{
|
||||
RWMutex::RLock l(m_lock);
|
||||
paths.clear();
|
||||
paths.reserve(m_alivePathCount);
|
||||
paths.assign(m_paths, m_paths + m_alivePathCount);
|
||||
}
|
||||
|
||||
void Peer::save(const Context &ctx, const CallContext &cc) const
|
||||
{
|
||||
uint8_t buf[8 + ZT_PEER_MARSHAL_SIZE_MAX];
|
||||
|
@ -515,9 +404,6 @@ int Peer::marshal(const Context &ctx, uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) co
|
|||
{
|
||||
RWMutex::RLock l(m_lock);
|
||||
|
||||
if (!m_identityKey)
|
||||
return -1;
|
||||
|
||||
data[0] = 16; // serialized peer version
|
||||
|
||||
// Include our identity's address to detect if this changes and require
|
||||
|
@ -527,9 +413,9 @@ int Peer::marshal(const Context &ctx, uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) co
|
|||
// SECURITY: encryption in place is only to protect secrets if they are
|
||||
// cached to local storage. It's not used over the wire. Dumb ECB is fine
|
||||
// because secret keys are random and have no structure to reveal.
|
||||
ctx.localSecretCipher.encrypt(m_identityKey->secret, data + 1 + ZT_ADDRESS_LENGTH);
|
||||
ctx.localSecretCipher.encrypt(m_identityKey->secret + 16, data + 1 + ZT_ADDRESS_LENGTH + 16);
|
||||
ctx.localSecretCipher.encrypt(m_identityKey->secret + 32, data + 1 + ZT_ADDRESS_LENGTH + 32);
|
||||
ctx.localSecretCipher.encrypt(m_identityKey.key(), data + 1 + ZT_ADDRESS_LENGTH);
|
||||
ctx.localSecretCipher.encrypt(m_identityKey.key() + 16, data + 1 + ZT_ADDRESS_LENGTH + 16);
|
||||
ctx.localSecretCipher.encrypt(m_identityKey.key() + 32, data + 1 + ZT_ADDRESS_LENGTH + 32);
|
||||
|
||||
int p = 1 + ZT_ADDRESS_LENGTH + 48;
|
||||
|
||||
|
@ -548,21 +434,6 @@ int Peer::marshal(const Context &ctx, uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) co
|
|||
data[p++] = 0;
|
||||
}
|
||||
|
||||
unsigned int cachedEndpointCount = 0;
|
||||
for (unsigned int i = 0; i < ZT_PEER_ENDPOINT_CACHE_SIZE; ++i) {
|
||||
if (m_endpointCache[i].lastSeen > 0)
|
||||
++cachedEndpointCount;
|
||||
}
|
||||
Utils::storeBigEndian(data + p, (uint16_t)cachedEndpointCount);
|
||||
p += 2;
|
||||
for (unsigned int i = 0; i < ZT_PEER_ENDPOINT_CACHE_SIZE; ++i) {
|
||||
Utils::storeBigEndian(data + p, (uint64_t)m_endpointCache[i].lastSeen);
|
||||
s = m_endpointCache[i].target.marshal(data + p);
|
||||
if (s <= 0)
|
||||
return -1;
|
||||
p += s;
|
||||
}
|
||||
|
||||
Utils::storeBigEndian(data + p, (uint16_t)m_vProto);
|
||||
p += 2;
|
||||
Utils::storeBigEndian(data + p, (uint16_t)m_vMajor);
|
||||
|
@ -585,18 +456,22 @@ int Peer::unmarshal(const Context &ctx, const int64_t ticks, const uint8_t *rest
|
|||
if ((len <= (1 + ZT_ADDRESS_LENGTH + 48)) || (data[0] != 16))
|
||||
return -1;
|
||||
|
||||
m_identityKey.zero();
|
||||
m_ephemeralKeys[0].zero();
|
||||
m_ephemeralKeys[1].zero();
|
||||
for (unsigned int i = 0; i < ZT_PEER_EPHEMERAL_KEY_BUFFER_SIZE; ++i) {
|
||||
m_ephemeralKeysSent[i].creationTime = -1;
|
||||
m_ephemeralSessions[i].established = false;
|
||||
}
|
||||
m_key.store((uintptr_t)&m_identityKey, std::memory_order_relaxed);
|
||||
|
||||
bool identityKeyRestored = false;
|
||||
if (Address(data + 1) == ctx.identity.address()) {
|
||||
uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
|
||||
static_assert(ZT_SYMMETRIC_KEY_SIZE == 48, "marshal() and unmarshal() must be revisited if ZT_SYMMETRIC_KEY_SIZE is changed");
|
||||
ctx.localSecretCipher.decrypt(data + 1 + ZT_ADDRESS_LENGTH, k);
|
||||
ctx.localSecretCipher.decrypt(data + 1 + ZT_ADDRESS_LENGTH + 16, k + 16);
|
||||
ctx.localSecretCipher.decrypt(data + 1 + ZT_ADDRESS_LENGTH + 32, k + 32);
|
||||
m_identityKey.set(new SymmetricKey(ticks, k));
|
||||
m_identityKey.init(ticks, k);
|
||||
Utils::burn(k, sizeof(k));
|
||||
identityKeyRestored = true;
|
||||
}
|
||||
|
||||
int p = 1 + ZT_ADDRESS_LENGTH + 48;
|
||||
|
@ -606,11 +481,11 @@ int Peer::unmarshal(const Context &ctx, const int64_t ticks, const uint8_t *rest
|
|||
return s;
|
||||
p += s;
|
||||
|
||||
if (!m_identityKey) {
|
||||
if (!identityKeyRestored) {
|
||||
uint8_t k[ZT_SYMMETRIC_KEY_SIZE];
|
||||
if (!ctx.identity.agree(m_id, k))
|
||||
return -1;
|
||||
m_identityKey.set(new SymmetricKey(ticks, k));
|
||||
m_identityKey.init(ticks, k);
|
||||
Utils::burn(k, sizeof(k));
|
||||
}
|
||||
|
||||
|
@ -631,21 +506,6 @@ int Peer::unmarshal(const Context &ctx, const int64_t ticks, const uint8_t *rest
|
|||
return -1;
|
||||
}
|
||||
|
||||
const unsigned int cachedEndpointCount = Utils::loadBigEndian< uint16_t >(data + p);
|
||||
p += 2;
|
||||
for (unsigned int i = 0; i < cachedEndpointCount; ++i) {
|
||||
if (i < ZT_PEER_ENDPOINT_CACHE_SIZE) {
|
||||
if ((p + 8) >= len)
|
||||
return -1;
|
||||
m_endpointCache[i].lastSeen = (int64_t)Utils::loadBigEndian< uint64_t >(data + p);
|
||||
p += 8;
|
||||
s = m_endpointCache[i].target.unmarshal(data + p, len - p);
|
||||
if (s <= 0)
|
||||
return -1;
|
||||
p += s;
|
||||
}
|
||||
}
|
||||
|
||||
if ((p + 10) > len)
|
||||
return -1;
|
||||
m_vProto = Utils::loadBigEndian< uint16_t >(data + p);
|
||||
|
@ -667,48 +527,60 @@ struct _PathPriorityComparisonOperator
|
|||
{
|
||||
ZT_INLINE bool operator()(const SharedPtr< Path > &a, const SharedPtr< Path > &b) const noexcept
|
||||
{
|
||||
// Sort in descending order of most recent receive time.
|
||||
return (a->lastIn() > b->lastIn());
|
||||
if (a) {
|
||||
if (b)
|
||||
return (a->lastIn() > b->lastIn());
|
||||
else return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void Peer::m_prioritizePaths(const CallContext &cc)
|
||||
{
|
||||
// assumes _lock is locked for writing
|
||||
m_lastPrioritizedPaths = cc.ticks;
|
||||
// assumes m_lock is locked
|
||||
|
||||
if (m_alivePathCount > 0) {
|
||||
// Sort paths in descending order of priority.
|
||||
std::sort(m_paths, m_paths + m_alivePathCount, _PathPriorityComparisonOperator());
|
||||
// Need to hold the current best just in case we drop it before changing the atomic.
|
||||
const SharedPtr< Path > oldBest(reinterpret_cast<Path *>(m_bestPath.load(std::memory_order_acquire)));
|
||||
|
||||
// Let go of paths that have expired.
|
||||
for (unsigned int i = 0; i < ZT_MAX_PEER_NETWORK_PATHS; ++i) {
|
||||
if ((!m_paths[i]) || (!m_paths[i]->alive(cc))) {
|
||||
m_alivePathCount = i;
|
||||
for (; i < ZT_MAX_PEER_NETWORK_PATHS; ++i)
|
||||
m_paths[i].zero();
|
||||
break;
|
||||
// Clean and reprioritize paths.
|
||||
if (m_alivePathCount != 0) {
|
||||
unsigned int newCnt = 0;
|
||||
for (unsigned int i = 0; i < m_alivePathCount; ++i) {
|
||||
if ((m_paths[i]) && (m_paths[i]->alive(cc))) {
|
||||
if (i != newCnt)
|
||||
m_paths[newCnt].move(m_paths[i]);
|
||||
++newCnt;
|
||||
}
|
||||
}
|
||||
for (unsigned int i = newCnt; i < m_alivePathCount; ++i)
|
||||
m_paths[i].zero();
|
||||
m_alivePathCount = newCnt;
|
||||
|
||||
std::sort(m_paths, m_paths + newCnt, _PathPriorityComparisonOperator());
|
||||
}
|
||||
|
||||
// Update atomic holding pointer to best path.
|
||||
m_bestPath.store((m_alivePathCount != 0) ? (uintptr_t)m_paths[0].ptr() : (uintptr_t)0, std::memory_order_release);
|
||||
}
|
||||
|
||||
unsigned int Peer::m_sendProbe(const Context &ctx, const CallContext &cc, int64_t localSocket, const InetAddress &atAddress, const uint16_t *ports, const unsigned int numPorts)
|
||||
{
|
||||
// Assumes m_lock is locked
|
||||
const SharedPtr< SymmetricKey > k(m_key());
|
||||
const uint64_t packetId = k->nextMessage(ctx.identity.address(), m_id.address());
|
||||
|
||||
// SECURITY: we use the long-lived identity key here since this is used for
|
||||
// trial contacts, etc. It contains no meaningful payload so who cares if
|
||||
// some future attacker compromises it.
|
||||
|
||||
uint8_t p[ZT_PROTO_MIN_PACKET_LENGTH];
|
||||
Utils::storeMachineEndian< uint64_t >(p + ZT_PROTO_PACKET_ID_INDEX, packetId);
|
||||
Utils::storeMachineEndian< uint64_t >(p + ZT_PROTO_PACKET_ID_INDEX, m_identityKey.nextMessage(ctx.identity.address(), m_id.address()));
|
||||
m_id.address().copyTo(p + ZT_PROTO_PACKET_DESTINATION_INDEX);
|
||||
ctx.identity.address().copyTo(p + ZT_PROTO_PACKET_SOURCE_INDEX);
|
||||
p[ZT_PROTO_PACKET_FLAGS_INDEX] = 0;
|
||||
p[ZT_PROTO_PACKET_VERB_INDEX] = Protocol::VERB_ECHO;
|
||||
p[ZT_PROTO_PACKET_VERB_INDEX] = Protocol::VERB_NOP;
|
||||
|
||||
Protocol::armor(p, ZT_PROTO_MIN_PACKET_LENGTH, k, cipher());
|
||||
|
||||
ctx.expect->sending(packetId, cc.ticks);
|
||||
ctx.expect->sending(Protocol::armor(p, ZT_PROTO_MIN_PACKET_LENGTH, m_identityKey, cipher()), cc.ticks);
|
||||
|
||||
if (numPorts > 0) {
|
||||
InetAddress tmp(atAddress);
|
||||
|
@ -725,11 +597,123 @@ unsigned int Peer::m_sendProbe(const Context &ctx, const CallContext &cc, int64_
|
|||
|
||||
void Peer::m_deriveSecondaryIdentityKeys() noexcept
|
||||
{
|
||||
// This is called in init() and unmarshal() to use KBKDF to derive keys
|
||||
// for encrypting the dictionary portion of HELLOs and HELLO HMAC from the
|
||||
// primary long-lived identity key.
|
||||
|
||||
uint8_t hk[ZT_SYMMETRIC_KEY_SIZE];
|
||||
KBKDFHMACSHA384(m_identityKey->secret, ZT_KBKDF_LABEL_HELLO_DICTIONARY_ENCRYPT, 0, 0, hk);
|
||||
KBKDFHMACSHA384(m_identityKey.key(), ZT_KBKDF_LABEL_HELLO_DICTIONARY_ENCRYPT, 0, 0, hk);
|
||||
m_helloCipher.init(hk);
|
||||
Utils::burn(hk, sizeof(hk));
|
||||
KBKDFHMACSHA384(m_identityKey->secret, ZT_KBKDF_LABEL_PACKET_HMAC, 0, 0, m_helloMacKey);
|
||||
|
||||
KBKDFHMACSHA384(m_identityKey.key(), ZT_KBKDF_LABEL_PACKET_HMAC, 0, 0, m_helloMacKey);
|
||||
}
|
||||
|
||||
unsigned int Peer::m_hello(const Context &ctx, const CallContext &cc, int64_t localSocket, const InetAddress &atAddress, const bool forceNewKey)
|
||||
{
|
||||
// assumes m_lock is at least locked for reading
|
||||
|
||||
/* SECURITY: note that HELLO is sent mostly in the clear and always uses
|
||||
* the long-lived identity key. This allows us to always bootstrap regardless
|
||||
* of ephemeral key state. HELLO contains nothing particularly sensitive,
|
||||
* though part of the message is encrypted with another derived key just to
|
||||
* conceal things like ephemeral public keys for defense in depth. HELLO is
|
||||
* always sent with the old salsa/poly algorithm (but minus salsa of course
|
||||
* as it's plaintext), but terminates with an additional HMAC-SHA3
|
||||
* authenticator to add extra hardness to the key exchange. The use of HMAC
|
||||
* here is also needed to satisfy some FIPS/NIST type requirements. */
|
||||
|
||||
// Pick or generate an ephemeral key to send with this HELLO.
|
||||
p_EphemeralPrivate *ephemeral;
|
||||
{
|
||||
p_EphemeralPrivate *earliest = m_ephemeralKeysSent;
|
||||
p_EphemeralPrivate *latest = nullptr;
|
||||
int64_t earliestEphemeralPrivate = 9223372036854775807LL;
|
||||
int64_t latestEphemeralPrivate = 0;
|
||||
for (unsigned int k = 0; k < ZT_PEER_EPHEMERAL_KEY_BUFFER_SIZE; ++k) {
|
||||
const int64_t ct = m_ephemeralKeysSent[k].creationTime;
|
||||
if (ct <= earliestEphemeralPrivate) {
|
||||
earliestEphemeralPrivate = ct;
|
||||
earliest = m_ephemeralKeysSent + k;
|
||||
} else if (ct >= latestEphemeralPrivate) { // creationTime will be -1 if not initialized
|
||||
latestEphemeralPrivate = ct;
|
||||
latest = m_ephemeralKeysSent + k;
|
||||
}
|
||||
}
|
||||
|
||||
if ((latest != nullptr) && (!forceNewKey) && ((cc.ticks - latest->creationTime) < (ZT_SYMMETRIC_KEY_TTL / 2))) {
|
||||
ephemeral = latest;
|
||||
} else {
|
||||
earliest->creationTime = cc.ticks;
|
||||
earliest->pub.type = ZT_PROTO_EPHEMERAL_KEY_TYPE_C25519_P384;
|
||||
C25519::generateC25519(earliest->pub.c25519Public, earliest->c25519Private);
|
||||
ECC384GenerateKey(earliest->pub.p384Public, earliest->p384Private);
|
||||
SHA384(earliest->sha384OfPublic, &earliest->pub, sizeof(earliest->pub));
|
||||
ephemeral = earliest;
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize packet and add basic fields like identity and sent-to address.
|
||||
Buf outp;
|
||||
const uint64_t packetId = m_identityKey.nextMessage(ctx.identity.address(), m_id.address());
|
||||
int ii = Protocol::newPacket(outp, packetId, m_id.address(), ctx.identity.address(), Protocol::VERB_HELLO);
|
||||
outp.wI8(ii, ZT_PROTO_VERSION);
|
||||
outp.wI8(ii, ZEROTIER_VERSION_MAJOR);
|
||||
outp.wI8(ii, ZEROTIER_VERSION_MINOR);
|
||||
outp.wI16(ii, ZEROTIER_VERSION_REVISION);
|
||||
outp.wI64(ii, (uint64_t)cc.clock);
|
||||
outp.wO(ii, ctx.identity);
|
||||
outp.wO(ii, atAddress);
|
||||
|
||||
// Add 12 random bytes to act as an IV for the encrypted dictionary field.
|
||||
const int ivStart = ii;
|
||||
outp.wR(ii, 12);
|
||||
|
||||
// LEGACY: the six reserved bytes after the IV exist for legacy compatibility with v1.x nodes.
|
||||
// Once those are dead they'll become just reserved bytes for future use as flags etc.
|
||||
outp.wI32(ii, 0); // reserved bytes
|
||||
void *const legacyMoonCountStart = outp.unsafeData + ii;
|
||||
outp.wI16(ii, 0);
|
||||
const uint64_t legacySalsaIv = packetId & ZT_CONST_TO_BE_UINT64(0xfffffffffffffff8ULL);
|
||||
Salsa20(m_identityKey.key(), &legacySalsaIv).crypt12(legacyMoonCountStart, legacyMoonCountStart, 2);
|
||||
|
||||
// Append dictionary containinig meta-data and ephemeral key info.
|
||||
const int cryptSectionStart = ii;
|
||||
FCV< uint8_t, 2048 > md;
|
||||
Dictionary::append(md, ZT_PROTO_HELLO_NODE_META_INSTANCE_ID, ctx.instanceId);
|
||||
// TODO: add other fields and ephemeral key info
|
||||
outp.wI16(ii, (uint16_t)md.size());
|
||||
outp.wB(ii, md.data(), (unsigned int)md.size());
|
||||
|
||||
if (unlikely((ii + ZT_HMACSHA384_LEN) > ZT_BUF_SIZE)) // sanity check, should be impossible
|
||||
return 0;
|
||||
|
||||
// Encrypt the meta-data dictionary using a derived static key and the IV
|
||||
// we generated above. This isn't strictly necessary as the data in there is
|
||||
// not "secret," but it's not a bad idea to hide it for defense in depth. In
|
||||
// particular this means that the public keys exchanged for ephemeral keying
|
||||
// are concealed from any observer.
|
||||
AES::CTR ctr(m_helloCipher);
|
||||
void *const cryptSection = outp.unsafeData + ii;
|
||||
ctr.init(outp.unsafeData + ivStart, 0, cryptSection);
|
||||
ctr.crypt(cryptSection, ii - cryptSectionStart);
|
||||
ctr.finish();
|
||||
|
||||
// Add HMAC at the end for strong verification by v2 nodes.
|
||||
HMACSHA384(m_helloMacKey, outp.unsafeData, ii, outp.unsafeData + ii);
|
||||
ii += ZT_HMACSHA384_LEN;
|
||||
|
||||
// Add poly1305 MAC for v1 nodes.
|
||||
uint8_t polyKey[ZT_POLY1305_KEY_SIZE], perPacketKey[ZT_SALSA20_KEY_SIZE];
|
||||
Protocol::salsa2012DeriveKey(m_identityKey.key(), perPacketKey, outp, ii);
|
||||
Salsa20(perPacketKey, &packetId).crypt12(Utils::ZERO256, polyKey, sizeof(polyKey));
|
||||
Poly1305 p1305(polyKey);
|
||||
p1305.update(outp.unsafeData + ZT_PROTO_PACKET_ENCRYPTED_SECTION_START, ii - ZT_PROTO_PACKET_ENCRYPTED_SECTION_START);
|
||||
uint64_t polyMac[2];
|
||||
p1305.finish(polyMac);
|
||||
Utils::storeMachineEndian< uint64_t >(outp.unsafeData + ZT_PROTO_PACKET_MAC_INDEX, polyMac[0]);
|
||||
|
||||
return (likely(ctx.cb.wirePacketSendFunction(reinterpret_cast<ZT_Node *>(ctx.node), ctx.uPtr, cc.tPtr, localSocket, reinterpret_cast<const ZT_InetAddress *>(&atAddress), outp.unsafeData, ii, 0) == 0)) ? (unsigned int)ii : 0U;
|
||||
}
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
|
261
core/Peer.hpp
261
core/Peer.hpp
|
@ -28,7 +28,6 @@
|
|||
#include "Locator.hpp"
|
||||
#include "Protocol.hpp"
|
||||
#include "AES.hpp"
|
||||
#include "EphemeralKey.hpp"
|
||||
#include "SymmetricKey.hpp"
|
||||
#include "Containers.hpp"
|
||||
|
||||
|
@ -38,12 +37,13 @@
|
|||
ZT_SYMMETRIC_KEY_SIZE + \
|
||||
ZT_IDENTITY_MARSHAL_SIZE_MAX + \
|
||||
1 + ZT_LOCATOR_MARSHAL_SIZE_MAX + \
|
||||
2 + ((8 + ZT_ENDPOINT_MARSHAL_SIZE_MAX) * ZT_PEER_ENDPOINT_CACHE_SIZE) + \
|
||||
(2 * 4) + \
|
||||
2 )
|
||||
|
||||
#define ZT_PEER_DEDUP_BUFFER_SIZE 1024
|
||||
#define ZT_PEER_DEDUP_BUFFER_MASK 1023U
|
||||
#define ZT_PEER_EPHEMERAL_KEY_BUFFER_SIZE 3
|
||||
#define ZT_PEER_EPHEMERAL_KEY_COUNT_MAX (ZT_PEER_EPHEMERAL_KEY_BUFFER_SIZE + 1)
|
||||
|
||||
namespace ZeroTier {
|
||||
|
||||
|
@ -91,7 +91,7 @@ public:
|
|||
/**
|
||||
* @return Current locator or NULL if no locator is known
|
||||
*/
|
||||
ZT_INLINE const SharedPtr< const Locator > &locator() const noexcept
|
||||
ZT_INLINE const SharedPtr< const Locator > locator() const noexcept
|
||||
{
|
||||
RWMutex::RLock l(m_lock);
|
||||
return m_locator;
|
||||
|
@ -107,7 +107,7 @@ public:
|
|||
* @param verify If true, verify locator's signature and structure
|
||||
* @return New locator or previous if it was not replaced.
|
||||
*/
|
||||
ZT_INLINE SharedPtr< const Locator > setLocator(const SharedPtr< const Locator > &loc, bool verify) noexcept
|
||||
ZT_INLINE SharedPtr< const Locator > setLocator(const SharedPtr< const Locator > &loc, const bool verify) noexcept
|
||||
{
|
||||
RWMutex::Lock l(m_lock);
|
||||
if ((loc) && ((!m_locator) || (m_locator->revision() < loc->revision()))) {
|
||||
|
@ -146,7 +146,7 @@ public:
|
|||
*/
|
||||
ZT_INLINE void sent(const CallContext &cc, const unsigned int bytes) noexcept
|
||||
{
|
||||
m_lastSend = cc.ticks;
|
||||
m_lastSend.store(cc.ticks, std::memory_order_relaxed);
|
||||
m_outMeter.log(cc.ticks, bytes);
|
||||
}
|
||||
|
||||
|
@ -164,19 +164,7 @@ public:
|
|||
* @return Current best path or NULL if there is no direct path
|
||||
*/
|
||||
ZT_INLINE SharedPtr< Path > path(const CallContext &cc) noexcept
|
||||
{
|
||||
if (likely((cc.ticks - m_lastPrioritizedPaths) < ZT_PEER_PRIORITIZE_PATHS_INTERVAL)) {
|
||||
RWMutex::RLock l(m_lock);
|
||||
if (m_alivePathCount > 0)
|
||||
return m_paths[0];
|
||||
} else {
|
||||
RWMutex::Lock l(m_lock);
|
||||
m_prioritizePaths(cc);
|
||||
if (m_alivePathCount > 0)
|
||||
return m_paths[0];
|
||||
}
|
||||
return SharedPtr< Path >();
|
||||
}
|
||||
{ return SharedPtr< Path >(reinterpret_cast<Path *>(m_bestPath.load(std::memory_order_acquire))); }
|
||||
|
||||
/**
|
||||
* Send data to this peer over a specific path only
|
||||
|
@ -203,24 +191,16 @@ public:
|
|||
void send(const Context &ctx, const CallContext &cc, const void *data, unsigned int len) noexcept;
|
||||
|
||||
/**
|
||||
* Send a HELLO to this peer at a specified physical address.
|
||||
*
|
||||
* @param localSocket Local source socket
|
||||
* @param atAddress Destination address
|
||||
* @return Number of bytes sent
|
||||
* Do ping, probes, re-keying, and keepalive with this peer, as needed.
|
||||
*/
|
||||
unsigned int hello(const Context &ctx, const CallContext &cc, int64_t localSocket, const InetAddress &atAddress);
|
||||
|
||||
/**
|
||||
* Ping this peer if needed and/or perform other periodic tasks.
|
||||
*
|
||||
* @param isRoot True if this peer is a root
|
||||
*/
|
||||
void pulse(const Context &ctx, const CallContext &cc, bool isRoot);
|
||||
void pulse(const Context &ctx, const CallContext &cc);
|
||||
|
||||
/**
|
||||
* Attempt to contact this peer at a given endpoint.
|
||||
*
|
||||
* The attempt doesn't happen immediately. It's added to a queue for the
|
||||
* next invocation of pulse().
|
||||
*
|
||||
* @param ep Endpoint to attempt to contact
|
||||
* @param tries Number of times to try (default: 1)
|
||||
*/
|
||||
|
@ -270,56 +250,77 @@ public:
|
|||
{
|
||||
//if (m_vProto >= 11)
|
||||
// return ZT_PROTO_CIPHER_SUITE__AES_GMAC_SIV;
|
||||
return ZT_PROTO_CIPHER_SUITE__POLY1305_SALSA2012;
|
||||
return ZT_PROTO_CIPHER_POLY1305_SALSA2012;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The permanent shared key for this peer computed by simple identity agreement
|
||||
*/
|
||||
ZT_INLINE SharedPtr< SymmetricKey > identityKey() noexcept
|
||||
ZT_INLINE SymmetricKey &identityKey() noexcept
|
||||
{ return m_identityKey; }
|
||||
|
||||
/**
|
||||
* @return AES instance for HELLO dictionary / encrypted section encryption/decryption
|
||||
*/
|
||||
ZT_INLINE const AES &identityHelloDictionaryEncryptionCipher() noexcept
|
||||
ZT_INLINE const AES &identityHelloDictionaryEncryptionCipher() const noexcept
|
||||
{ return m_helloCipher; }
|
||||
|
||||
/**
|
||||
* @return Key for HMAC on HELLOs
|
||||
*/
|
||||
ZT_INLINE const uint8_t *identityHelloHmacKey() noexcept
|
||||
ZT_INLINE const uint8_t *identityHelloHmacKey() const noexcept
|
||||
{ return m_helloMacKey; }
|
||||
|
||||
/**
|
||||
* @return Raw identity key bytes
|
||||
*/
|
||||
ZT_INLINE const uint8_t *rawIdentityKey() noexcept
|
||||
{
|
||||
RWMutex::RLock l(m_lock);
|
||||
return m_identityKey->secret;
|
||||
}
|
||||
ZT_INLINE const uint8_t *rawIdentityKey() const noexcept
|
||||
{ return m_identityKey.key(); }
|
||||
|
||||
/**
|
||||
* @return Current best key: either the latest ephemeral or the identity key
|
||||
*/
|
||||
ZT_INLINE SharedPtr< SymmetricKey > key() noexcept
|
||||
ZT_INLINE SymmetricKey &key() noexcept
|
||||
{ return *reinterpret_cast<SymmetricKey *>(m_key.load(std::memory_order_relaxed)); }
|
||||
|
||||
/**
|
||||
* Get keys other than a key we have already tried.
|
||||
*
|
||||
* This is used when a packet arrives that doesn't decrypt with the preferred
|
||||
* key. It fills notYetTried[] with other keys that haven't been tried yet,
|
||||
* which can include the identity key and any older session keys.
|
||||
*
|
||||
* @param alreadyTried Key we've already tried or NULL if none
|
||||
* @param notYetTried All keys known (long lived or session) other than alreadyTried
|
||||
* @return Number of pointers written to notYetTried[]
|
||||
*/
|
||||
ZT_INLINE int getOtherKeys(const SymmetricKey *const alreadyTried, SymmetricKey *notYetTried[ZT_PEER_EPHEMERAL_KEY_COUNT_MAX]) noexcept
|
||||
{
|
||||
RWMutex::RLock l(m_lock);
|
||||
return m_key();
|
||||
int cnt = 0;
|
||||
if (alreadyTried != &m_identityKey)
|
||||
notYetTried[cnt++] = &m_identityKey;
|
||||
for (unsigned int k=0;k<ZT_PEER_EPHEMERAL_KEY_BUFFER_SIZE;++k) {
|
||||
SymmetricKey *const kk = &m_ephemeralSessions[k].key;
|
||||
if (m_ephemeralSessions[k].established && (alreadyTried != kk))
|
||||
notYetTried[cnt++] = kk;
|
||||
}
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether a key is ephemeral
|
||||
* Set a flag ordering a key renegotiation ASAP.
|
||||
*
|
||||
* This is used to check whether a packet is received with forward secrecy enabled
|
||||
* or not.
|
||||
*
|
||||
* @param k Key to check
|
||||
* @return True if this key is ephemeral, false if it's the long-lived identity key
|
||||
* This can be called if there's any hint of an issue with the current key.
|
||||
* It's also called if any of the secondary possible keys returned by
|
||||
* getOtherKeys() decrypt a valid packet, indicating a desynchronization
|
||||
* in which key should be used.
|
||||
*/
|
||||
ZT_INLINE bool isEphemeral(const SharedPtr< SymmetricKey > &k) const noexcept
|
||||
{ return m_identityKey != k; }
|
||||
ZT_INLINE void setKeyRenegotiationNeeded() noexcept
|
||||
{
|
||||
RWMutex::Lock l(m_lock);
|
||||
m_keyRenegotiationNeeded = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the currently known remote version of this peer's client
|
||||
|
@ -331,51 +332,60 @@ public:
|
|||
*/
|
||||
ZT_INLINE void setRemoteVersion(unsigned int vproto, unsigned int vmaj, unsigned int vmin, unsigned int vrev) noexcept
|
||||
{
|
||||
RWMutex::Lock l(m_lock);
|
||||
m_vProto = (uint16_t)vproto;
|
||||
m_vMajor = (uint16_t)vmaj;
|
||||
m_vMinor = (uint16_t)vmin;
|
||||
m_vRevision = (uint16_t)vrev;
|
||||
}
|
||||
|
||||
ZT_INLINE unsigned int remoteVersionProtocol() const noexcept
|
||||
{ return m_vProto; }
|
||||
|
||||
ZT_INLINE unsigned int remoteVersionMajor() const noexcept
|
||||
{ return m_vMajor; }
|
||||
|
||||
ZT_INLINE unsigned int remoteVersionMinor() const noexcept
|
||||
{ return m_vMinor; }
|
||||
|
||||
ZT_INLINE unsigned int remoteVersionRevision() const noexcept
|
||||
{ return m_vRevision; }
|
||||
|
||||
ZT_INLINE bool remoteVersionKnown() const noexcept
|
||||
{ return (m_vMajor > 0) || (m_vMinor > 0) || (m_vRevision > 0); }
|
||||
/**
|
||||
* Get the remote version of this peer.
|
||||
*
|
||||
* If false is returned, the value of the value-result variables is
|
||||
* undefined.
|
||||
*
|
||||
* @param vProto Set to protocol version
|
||||
* @param vMajor Set to major version
|
||||
* @param vMinor Set to minor version
|
||||
* @param vRevision Set to revision
|
||||
* @return True if remote version is known
|
||||
*/
|
||||
ZT_INLINE bool remoteVersion(uint16_t &vProto, uint16_t &vMajor, uint16_t &vMinor, uint16_t &vRevision)
|
||||
{
|
||||
RWMutex::RLock l(m_lock);
|
||||
return (((vProto = m_vProto)|(vMajor = m_vMajor)|(vMinor = m_vMinor)|(vRevision = m_vRevision)) != 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return True if there is at least one alive direct path
|
||||
*/
|
||||
bool directlyConnected(const CallContext &cc);
|
||||
ZT_INLINE bool directlyConnected() const noexcept
|
||||
{
|
||||
RWMutex::RLock l(m_lock);
|
||||
return m_alivePathCount > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all paths
|
||||
*
|
||||
* @param paths Vector of paths with the first path being the current preferred path
|
||||
*/
|
||||
void getAllPaths(Vector< SharedPtr< Path > > &paths);
|
||||
ZT_INLINE void getAllPaths(Vector< SharedPtr< Path > > &paths) const
|
||||
{
|
||||
RWMutex::RLock l(m_lock);
|
||||
paths.assign(m_paths, m_paths + m_alivePathCount);
|
||||
}
|
||||
|
||||
/**
|
||||
* Save the latest version of this peer to the data store
|
||||
*/
|
||||
void save(const Context &ctx, const CallContext &cc) const;
|
||||
|
||||
// NOTE: peer marshal/unmarshal only saves/restores the identity, locator, most
|
||||
// recent bootstrap address, and version information.
|
||||
static constexpr int marshalSizeMax() noexcept
|
||||
{ return ZT_PEER_MARSHAL_SIZE_MAX; }
|
||||
|
||||
int marshal(const Context &ctx, uint8_t data[ZT_PEER_MARSHAL_SIZE_MAX]) const noexcept;
|
||||
|
||||
int unmarshal(const Context &ctx, int64_t ticks, const uint8_t *restrict data, int len) noexcept;
|
||||
|
||||
/**
|
||||
|
@ -383,8 +393,8 @@ public:
|
|||
*/
|
||||
ZT_INLINE bool rateGateInboundWhoisRequest(CallContext &cc) noexcept
|
||||
{
|
||||
if ((cc.ticks - m_lastWhoisRequestReceived) >= ZT_PEER_WHOIS_RATE_LIMIT) {
|
||||
m_lastWhoisRequestReceived = cc.ticks;
|
||||
if ((cc.ticks - m_lastWhoisRequestReceived.load(std::memory_order_relaxed)) >= ZT_PEER_WHOIS_RATE_LIMIT) {
|
||||
m_lastWhoisRequestReceived.store(cc.ticks, std::memory_order_relaxed);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -395,8 +405,8 @@ public:
|
|||
*/
|
||||
ZT_INLINE bool rateGateEchoRequest(CallContext &cc) noexcept
|
||||
{
|
||||
if ((cc.ticks - m_lastEchoRequestReceived) >= ZT_PEER_GENERAL_RATE_LIMIT) {
|
||||
m_lastEchoRequestReceived = cc.ticks;
|
||||
if ((cc.ticks - m_lastEchoRequestReceived.load(std::memory_order_relaxed)) >= ZT_PEER_GENERAL_RATE_LIMIT) {
|
||||
m_lastEchoRequestReceived.store(cc.ticks, std::memory_order_relaxed);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -407,8 +417,8 @@ public:
|
|||
*/
|
||||
ZT_INLINE bool rateGateProbeRequest(CallContext &cc) noexcept
|
||||
{
|
||||
if ((cc.ticks - m_lastProbeReceived) > ZT_PEER_PROBE_RESPONSE_RATE_LIMIT) {
|
||||
m_lastProbeReceived = cc.ticks;
|
||||
if ((cc.ticks - m_lastProbeReceived.load(std::memory_order_relaxed)) > ZT_PEER_PROBE_RESPONSE_RATE_LIMIT) {
|
||||
m_lastProbeReceived.store(cc.ticks, std::memory_order_relaxed);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -424,27 +434,53 @@ public:
|
|||
* @return True if this is a duplicate
|
||||
*/
|
||||
ZT_INLINE bool deduplicateIncomingPacket(const uint64_t packetId) noexcept
|
||||
{
|
||||
// TODO: should take instance ID into account too, but this isn't fully wired.
|
||||
return m_dedup[Utils::hash32((uint32_t)packetId) & ZT_PEER_DEDUP_BUFFER_MASK].exchange(packetId) == packetId;
|
||||
}
|
||||
{ return m_dedup[Utils::hash32((uint32_t)packetId) & ZT_PEER_DEDUP_BUFFER_MASK].exchange(packetId, std::memory_order_relaxed) == packetId; }
|
||||
|
||||
private:
|
||||
struct p_EphemeralPublic
|
||||
{
|
||||
uint8_t type;
|
||||
uint8_t c25519Public[ZT_C25519_ECDH_PUBLIC_KEY_SIZE];
|
||||
uint8_t p384Public[ZT_ECC384_PUBLIC_KEY_SIZE];
|
||||
};
|
||||
|
||||
static_assert(sizeof(p_EphemeralPublic) == (1 + ZT_C25519_ECDH_PUBLIC_KEY_SIZE + ZT_ECC384_PUBLIC_KEY_SIZE), "p_EphemeralPublic has extra padding");
|
||||
|
||||
struct p_EphemeralPrivate
|
||||
{
|
||||
ZT_INLINE p_EphemeralPrivate() noexcept: creationTime(-1)
|
||||
{}
|
||||
|
||||
ZT_INLINE ~p_EphemeralPrivate()
|
||||
{ Utils::burn(this, sizeof(p_EphemeralPublic)); }
|
||||
|
||||
int64_t creationTime;
|
||||
uint64_t sha384OfPublic[6];
|
||||
p_EphemeralPublic pub;
|
||||
uint8_t c25519Private[ZT_C25519_ECDH_PRIVATE_KEY_SIZE];
|
||||
uint8_t p384Private[ZT_ECC384_PRIVATE_KEY_SIZE];
|
||||
};
|
||||
|
||||
struct p_EphemeralSession
|
||||
{
|
||||
ZT_INLINE p_EphemeralSession() noexcept: established(false)
|
||||
{}
|
||||
|
||||
uint64_t sha384OfPeerPublic[6];
|
||||
SymmetricKey key;
|
||||
bool established;
|
||||
};
|
||||
|
||||
void m_prioritizePaths(const CallContext &cc);
|
||||
unsigned int m_sendProbe(const Context &ctx, const CallContext &cc, int64_t localSocket, const InetAddress &atAddress, const uint16_t *ports, unsigned int numPorts);
|
||||
void m_deriveSecondaryIdentityKeys() noexcept;
|
||||
unsigned int m_hello(const Context &ctx, const CallContext &cc, int64_t localSocket, const InetAddress &atAddress, bool forceNewKey);
|
||||
|
||||
ZT_INLINE SharedPtr< SymmetricKey > m_key() noexcept
|
||||
{
|
||||
// assumes m_lock is locked (for read at least)
|
||||
return (m_ephemeralKeys[0]) ? m_ephemeralKeys[0] : m_identityKey;
|
||||
}
|
||||
|
||||
// Read/write mutex for non-atomic non-const fields.
|
||||
// Guards all fields except those otherwise indicated (and atomics of course).
|
||||
RWMutex m_lock;
|
||||
|
||||
// Static identity key
|
||||
SharedPtr< SymmetricKey > m_identityKey;
|
||||
// Long lived key resulting from agreement with this peer's identity.
|
||||
SymmetricKey m_identityKey;
|
||||
|
||||
// Cipher for encrypting or decrypting the encrypted section of HELLO packets.
|
||||
AES m_helloCipher;
|
||||
|
@ -452,17 +488,25 @@ private:
|
|||
// Key for HELLO HMAC-SHA384
|
||||
uint8_t m_helloMacKey[ZT_SYMMETRIC_KEY_SIZE];
|
||||
|
||||
// Currently active ephemeral public key pair
|
||||
EphemeralKey m_ephemeralPair;
|
||||
int64_t m_ephemeralPairTimestamp;
|
||||
// Keys we have generated and sent.
|
||||
p_EphemeralPrivate m_ephemeralKeysSent[ZT_PEER_EPHEMERAL_KEY_BUFFER_SIZE];
|
||||
|
||||
// Current and previous ephemeral key
|
||||
SharedPtr< SymmetricKey > m_ephemeralKeys[2];
|
||||
// Sessions created when OK(HELLO) is received.
|
||||
p_EphemeralSession m_ephemeralSessions[ZT_PEER_EPHEMERAL_KEY_BUFFER_SIZE];
|
||||
|
||||
// Pointer to active key (SymmetricKey).
|
||||
std::atomic< uintptr_t > m_key;
|
||||
|
||||
// Flag indicating that we should rekey at next pulse().
|
||||
bool m_keyRenegotiationNeeded;
|
||||
|
||||
// This peer's public identity.
|
||||
Identity m_id;
|
||||
|
||||
// This peer's most recent (by revision) locator, or NULL if none on file.
|
||||
SharedPtr< const Locator > m_locator;
|
||||
|
||||
// the last time something was sent or received from this peer (direct or indirect).
|
||||
// The last time something was received or sent.
|
||||
std::atomic< int64_t > m_lastReceive;
|
||||
std::atomic< int64_t > m_lastSend;
|
||||
|
||||
|
@ -475,13 +519,10 @@ private:
|
|||
// The last time an ECHO request was received from this peer (anti-DOS / anti-flood).
|
||||
std::atomic< int64_t > m_lastEchoRequestReceived;
|
||||
|
||||
// The last time we sorted paths in order of preference. (This happens pretty often.)
|
||||
std::atomic< int64_t > m_lastPrioritizedPaths;
|
||||
|
||||
// The last time we got a probe from this peer.
|
||||
std::atomic< int64_t > m_lastProbeReceived;
|
||||
|
||||
// Deduplication buffer
|
||||
// Deduplication buffer.
|
||||
std::atomic< uint64_t > m_dedup[ZT_PEER_DEDUP_BUFFER_SIZE];
|
||||
|
||||
// Meters measuring actual bandwidth in, out, and relayed via this peer (mostly if this is a root).
|
||||
|
@ -492,27 +533,15 @@ private:
|
|||
// Direct paths sorted in descending order of preference.
|
||||
SharedPtr< Path > m_paths[ZT_MAX_PEER_NETWORK_PATHS];
|
||||
|
||||
// Number of paths current alive (number of non-NULL entries in _paths).
|
||||
// Size of m_paths[] in non-NULL paths (max: MAX_PEER_NETWORK_PATHS).
|
||||
unsigned int m_alivePathCount;
|
||||
|
||||
// Current best path (pointer to Path).
|
||||
std::atomic<uintptr_t> m_bestPath;
|
||||
|
||||
// For SharedPtr<>
|
||||
std::atomic< int > __refCount;
|
||||
|
||||
struct p_EndpointCacheItem
|
||||
{
|
||||
Endpoint target;
|
||||
int64_t lastSeen;
|
||||
|
||||
ZT_INLINE bool operator<(const p_EndpointCacheItem &ci) const noexcept
|
||||
{ return lastSeen < ci.lastSeen; }
|
||||
|
||||
ZT_INLINE p_EndpointCacheItem() noexcept: target(), lastSeen(0)
|
||||
{}
|
||||
};
|
||||
|
||||
// Endpoint cache sorted in ascending order of times seen followed by first seen time.
|
||||
p_EndpointCacheItem m_endpointCache[ZT_PEER_ENDPOINT_CACHE_SIZE];
|
||||
|
||||
struct p_TryQueueItem
|
||||
{
|
||||
ZT_INLINE p_TryQueueItem() :
|
||||
|
@ -529,9 +558,13 @@ private:
|
|||
int iteration;
|
||||
};
|
||||
|
||||
// Queue of endpoints to try.
|
||||
List< p_TryQueueItem > m_tryQueue;
|
||||
|
||||
// Time each endpoint was last tried, for rate limiting.
|
||||
Map< Endpoint, int64_t > m_lastTried;
|
||||
|
||||
// Version of remote peer, if known.
|
||||
uint16_t m_vProto;
|
||||
uint16_t m_vMajor;
|
||||
uint16_t m_vMinor;
|
||||
|
|
|
@ -32,9 +32,7 @@ public:
|
|||
{ this->init(key); }
|
||||
|
||||
void init(const void *key) noexcept;
|
||||
|
||||
void update(const void *data, unsigned int len) noexcept;
|
||||
|
||||
void finish(void *auth) noexcept;
|
||||
|
||||
static ZT_INLINE void compute(void *const auth, const void *const data, const unsigned int len, const void *const key) noexcept
|
||||
|
|
|
@ -106,6 +106,8 @@
|
|||
* 9 - 1.2.0 ... 1.2.14
|
||||
* 10 - 1.4.0 ... 1.4.6
|
||||
* + Contained early pre-alpha versions of multipath, which are deprecated
|
||||
* 11 - 1.6.0 ... 2.0.0
|
||||
* + Supports AES-GMAC-SIV symmetric crypto, backported from v2 tree.
|
||||
* 20 - 2.0.0 ... CURRENT
|
||||
* + New more WAN-efficient P2P-assisted multicast algorithm
|
||||
* + HELLO and OK(HELLO) include an extra HMAC to harden authentication
|
||||
|
@ -156,22 +158,27 @@
|
|||
/**
|
||||
* NONE/Poly1305 (used for HELLO for backward compatibility)
|
||||
*/
|
||||
#define ZT_PROTO_CIPHER_SUITE__POLY1305_NONE 0
|
||||
#define ZT_PROTO_CIPHER_POLY1305_NONE 0
|
||||
|
||||
/**
|
||||
* Salsa2012/Poly1305 (legacy)
|
||||
*/
|
||||
#define ZT_PROTO_CIPHER_SUITE__POLY1305_SALSA2012 1
|
||||
#define ZT_PROTO_CIPHER_POLY1305_SALSA2012 1
|
||||
|
||||
/**
|
||||
* Deprecated, not currently used.
|
||||
*/
|
||||
#define ZT_PROTO_CIPHER_SUITE__NONE 2
|
||||
#define ZT_PROTO_CIPHER_NONE 2
|
||||
|
||||
/**
|
||||
* AES-GMAC-SIV
|
||||
*/
|
||||
#define ZT_PROTO_CIPHER_SUITE__AES_GMAC_SIV 3
|
||||
#define ZT_PROTO_CIPHER_AES_GMAC_SIV 3
|
||||
|
||||
/**
|
||||
* Ephemeral key consisting of both a C25519 and a NIST P-384 key pair.
|
||||
*/
|
||||
#define ZT_PROTO_EPHEMERAL_KEY_TYPE_C25519_P384 1
|
||||
|
||||
/**
|
||||
* Minimum viable length for a fragment
|
||||
|
@ -245,8 +252,8 @@
|
|||
#define ZT_PROTO_HELLO_NODE_META_EPHEMERAL_PUBLIC "e"
|
||||
#define ZT_PROTO_HELLO_NODE_META_EPHEMERAL_ACK "E"
|
||||
|
||||
static_assert(ZT_PROTO_MAX_PACKET_LENGTH < ZT_BUF_MEM_SIZE,"maximum packet length won't fit in Buf");
|
||||
static_assert(ZT_PROTO_PACKET_ENCRYPTED_SECTION_START == (ZT_PROTO_MIN_PACKET_LENGTH-1),"encrypted packet section must start right before protocol verb at one less than minimum packet size");
|
||||
static_assert(ZT_PROTO_MAX_PACKET_LENGTH < ZT_BUF_MEM_SIZE, "maximum packet length won't fit in Buf");
|
||||
static_assert(ZT_PROTO_PACKET_ENCRYPTED_SECTION_START == (ZT_PROTO_MIN_PACKET_LENGTH - 1), "encrypted packet section must start right before protocol verb at one less than minimum packet size");
|
||||
|
||||
namespace ZeroTier {
|
||||
namespace Protocol {
|
||||
|
@ -328,17 +335,20 @@ enum Verb
|
|||
* Dictionary fields (defines start with ZT_PROTO_HELLO_NODE_META_):
|
||||
*
|
||||
* INSTANCE_ID - a 64-bit unique value generated on each node start
|
||||
* PREFERRED_CIPHER_MODE - preferred symmetric encryption mode
|
||||
* LOCATOR - signed record enumerating this node's trusted contact points
|
||||
* EPHEMERAL_PUBLIC - Ephemeral public key(s)
|
||||
*
|
||||
* OK will contain EPHEMERAL_PUBLIC (of the sender) and:
|
||||
* OK will contain EPHEMERAL_PUBLIC of the responding node and:
|
||||
*
|
||||
* EPHEMERAL_ACK - SHA384 of EPHEMERAL_PUBLIC received
|
||||
* EPHEMERAL_ACK - SHA384(EPHEMERAL_PUBLIC from HELLO)
|
||||
*
|
||||
* The following optional fields may also be present:
|
||||
*
|
||||
* PREFERRED_CIPHER - preferred symmetric encryption mode
|
||||
* HOSTNAME - arbitrary short host name for this node
|
||||
* ARCH - system architecture (CPU type, bits, etc.)
|
||||
* OSNAME - system operating system name
|
||||
* OSVERSION - operating system version
|
||||
* CONTACT - arbitrary short contact information string for this node
|
||||
* SOFTWARE_VENDOR - short name or description of vendor, such as a URL
|
||||
* COMPLIANCE - bit mask containing bits for e.g. a FIPS-compliant node
|
||||
|
@ -674,31 +684,53 @@ enum Verb
|
|||
};
|
||||
|
||||
#ifdef ZT_DEBUG_SPEW
|
||||
|
||||
static ZT_INLINE const char *verbName(const Verb v) noexcept
|
||||
{
|
||||
switch(v) {
|
||||
case VERB_NOP: return "NOP";
|
||||
case VERB_HELLO: return "HELLO";
|
||||
case VERB_ERROR: return "ERROR";
|
||||
case VERB_OK: return "OK";
|
||||
case VERB_WHOIS: return "WHOIS";
|
||||
case VERB_RENDEZVOUS: return "RENDEZVOUS";
|
||||
case VERB_FRAME: return "FRAME";
|
||||
case VERB_EXT_FRAME: return "EXT_FRAME";
|
||||
case VERB_ECHO: return "ECHO";
|
||||
case VERB_MULTICAST_LIKE: return "MULTICAST_LIKE";
|
||||
case VERB_NETWORK_CREDENTIALS: return "NETWORK_CREDENTIALS";
|
||||
case VERB_NETWORK_CONFIG_REQUEST: return "NETWORK_CONFIG_REQUEST";
|
||||
case VERB_NETWORK_CONFIG: return "NETWORK_CONFIG";
|
||||
case VERB_MULTICAST_GATHER: return "MULTICAST_GATHER";
|
||||
case VERB_MULTICAST_FRAME_deprecated: return "MULTICAST_FRAME_deprecated";
|
||||
case VERB_PUSH_DIRECT_PATHS: return "PUSH_DIRECT_PATHS";
|
||||
case VERB_USER_MESSAGE: return "USER_MESSAGE";
|
||||
case VERB_MULTICAST: return "MULTICAST";
|
||||
case VERB_ENCAP: return "ENCAP";
|
||||
default: return "(unknown)";
|
||||
switch (v) {
|
||||
case VERB_NOP:
|
||||
return "NOP";
|
||||
case VERB_HELLO:
|
||||
return "HELLO";
|
||||
case VERB_ERROR:
|
||||
return "ERROR";
|
||||
case VERB_OK:
|
||||
return "OK";
|
||||
case VERB_WHOIS:
|
||||
return "WHOIS";
|
||||
case VERB_RENDEZVOUS:
|
||||
return "RENDEZVOUS";
|
||||
case VERB_FRAME:
|
||||
return "FRAME";
|
||||
case VERB_EXT_FRAME:
|
||||
return "EXT_FRAME";
|
||||
case VERB_ECHO:
|
||||
return "ECHO";
|
||||
case VERB_MULTICAST_LIKE:
|
||||
return "MULTICAST_LIKE";
|
||||
case VERB_NETWORK_CREDENTIALS:
|
||||
return "NETWORK_CREDENTIALS";
|
||||
case VERB_NETWORK_CONFIG_REQUEST:
|
||||
return "NETWORK_CONFIG_REQUEST";
|
||||
case VERB_NETWORK_CONFIG:
|
||||
return "NETWORK_CONFIG";
|
||||
case VERB_MULTICAST_GATHER:
|
||||
return "MULTICAST_GATHER";
|
||||
case VERB_MULTICAST_FRAME_deprecated:
|
||||
return "MULTICAST_FRAME_deprecated";
|
||||
case VERB_PUSH_DIRECT_PATHS:
|
||||
return "PUSH_DIRECT_PATHS";
|
||||
case VERB_USER_MESSAGE:
|
||||
return "USER_MESSAGE";
|
||||
case VERB_MULTICAST:
|
||||
return "MULTICAST";
|
||||
case VERB_ENCAP:
|
||||
return "ENCAP";
|
||||
default:
|
||||
return "(unknown)";
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
@ -784,7 +816,7 @@ enum NetworkConfigFlag
|
|||
* @param in Input key (32 bytes)
|
||||
* @param out Output buffer (32 bytes)
|
||||
*/
|
||||
static ZT_INLINE void salsa2012DeriveKey(const uint8_t *const in,uint8_t *const out,const Buf &packet,const unsigned int packetSize) noexcept
|
||||
static ZT_INLINE void salsa2012DeriveKey(const uint8_t *const in, uint8_t *const out, const Buf &packet, const unsigned int packetSize) noexcept
|
||||
{
|
||||
// IV and source/destination addresses. Using the addresses divides the
|
||||
// key space into two halves-- A->B and B->A (since order will change).
|
||||
|
@ -827,7 +859,7 @@ static ZT_INLINE void salsa2012DeriveKey(const uint8_t *const in,uint8_t *const
|
|||
* @param verb Protocol verb
|
||||
* @return Index of packet start
|
||||
*/
|
||||
static ZT_INLINE int newPacket(uint8_t pkt[28],const uint64_t packetId,const Address destination,const Address source,const Verb verb) noexcept
|
||||
static ZT_INLINE int newPacket(uint8_t pkt[28], const uint64_t packetId, const Address destination, const Address source, const Verb verb) noexcept
|
||||
{
|
||||
Utils::storeMachineEndian< uint64_t >(pkt + ZT_PROTO_PACKET_ID_INDEX, packetId);
|
||||
destination.copyTo(pkt + ZT_PROTO_PACKET_DESTINATION_INDEX);
|
||||
|
@ -837,7 +869,9 @@ static ZT_INLINE int newPacket(uint8_t pkt[28],const uint64_t packetId,const Add
|
|||
pkt[ZT_PROTO_PACKET_VERB_INDEX] = (uint8_t)verb;
|
||||
return ZT_PROTO_PACKET_VERB_INDEX + 1;
|
||||
}
|
||||
static ZT_INLINE int newPacket(Buf &pkt,const uint64_t packetId,const Address destination,const Address source,const Verb verb) noexcept { return newPacket(pkt.unsafeData,packetId,destination,source,verb); }
|
||||
|
||||
static ZT_INLINE int newPacket(Buf &pkt, const uint64_t packetId, const Address destination, const Address source, const Verb verb) noexcept
|
||||
{ return newPacket(pkt.unsafeData, packetId, destination, source, verb); }
|
||||
|
||||
/**
|
||||
* Encrypt and compute packet MAC
|
||||
|
@ -846,9 +880,11 @@ static ZT_INLINE int newPacket(Buf &pkt,const uint64_t packetId,const Address de
|
|||
* @param packetSize Packet size, must be at least ZT_PROTO_MIN_PACKET_LENGTH or crash will occur
|
||||
* @param key Key to use for encryption
|
||||
* @param cipherSuite Cipher suite to use for AEAD encryption or just MAC
|
||||
* @return Packet ID of packet (which may change!)
|
||||
*/
|
||||
static ZT_INLINE void armor(uint8_t *const pkt,const int packetSize,const SharedPtr<SymmetricKey> &key,const uint8_t cipherSuite) noexcept
|
||||
static ZT_INLINE uint64_t armor(uint8_t *const pkt, const int packetSize, const SymmetricKey &key, const uint8_t cipherSuite) noexcept
|
||||
{
|
||||
// TODO
|
||||
#if 0
|
||||
Protocol::Header &ph = pkt.as<Protocol::Header>(); // NOLINT(hicpp-use-auto,modernize-use-auto)
|
||||
ph.flags = (ph.flags & 0xc7U) | ((cipherSuite << 3U) & 0x38U); // flags: FFCCCHHH where CCC is cipher
|
||||
|
@ -889,6 +925,7 @@ static ZT_INLINE void armor(uint8_t *const pkt,const int packetSize,const Shared
|
|||
} break;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -903,7 +940,7 @@ static ZT_INLINE void armor(uint8_t *const pkt,const int packetSize,const Shared
|
|||
* @param packetSize Total size of packet in bytes (including headers)
|
||||
* @return New size of packet after compression or original size of compression wasn't helpful
|
||||
*/
|
||||
static ZT_INLINE int compress(SharedPtr<Buf> &pkt,int packetSize) noexcept
|
||||
static ZT_INLINE int compress(Buf &pkt, int packetSize) noexcept
|
||||
{
|
||||
// TODO
|
||||
return packetSize;
|
||||
|
|
|
@ -117,7 +117,6 @@ public:
|
|||
{ return ZT_REVOCATION_MARSHAL_SIZE_MAX; }
|
||||
|
||||
int marshal(uint8_t data[ZT_REVOCATION_MARSHAL_SIZE_MAX], bool forSign = false) const noexcept;
|
||||
|
||||
int unmarshal(const uint8_t *restrict data, int len) noexcept;
|
||||
|
||||
private:
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
namespace ZeroTier {
|
||||
|
||||
class Identity;
|
||||
|
||||
class Context;
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
namespace ZeroTier {
|
||||
|
||||
/**
|
||||
* An introspective reference counted pointer.
|
||||
* An intrusive reference counted pointer.
|
||||
*
|
||||
* Classes must have an atomic<int> field called __refCount and set this class
|
||||
* as a friend to be used with this.
|
||||
|
@ -35,17 +35,17 @@ public:
|
|||
explicit ZT_INLINE SharedPtr(T *obj) noexcept: m_ptr(obj)
|
||||
{ if (likely(obj != nullptr)) const_cast<std::atomic< int > *>(&(obj->__refCount))->fetch_add(1, std::memory_order_acquire); }
|
||||
|
||||
ZT_INLINE SharedPtr(const SharedPtr &sp) noexcept: m_ptr(sp._getAndInc())
|
||||
ZT_INLINE SharedPtr(const SharedPtr &sp) noexcept: m_ptr(sp.m_acquire())
|
||||
{}
|
||||
|
||||
ZT_INLINE ~SharedPtr()
|
||||
{ _release(); }
|
||||
{ m_release(); }
|
||||
|
||||
ZT_INLINE SharedPtr &operator=(const SharedPtr &sp)
|
||||
{
|
||||
if (likely(m_ptr != sp.m_ptr)) {
|
||||
T *const p = sp._getAndInc();
|
||||
_release();
|
||||
T *const p = sp.m_acquire();
|
||||
m_release();
|
||||
m_ptr = p;
|
||||
}
|
||||
return *this;
|
||||
|
@ -53,12 +53,15 @@ public:
|
|||
|
||||
ZT_INLINE void set(T *ptr) noexcept
|
||||
{
|
||||
_release();
|
||||
m_release();
|
||||
const_cast<std::atomic< int > *>(&((m_ptr = ptr)->__refCount))->fetch_add(1, std::memory_order_acquire);
|
||||
}
|
||||
|
||||
/**
|
||||
* Swap with another pointer 'for free' without ref count overhead
|
||||
* Swap with another pointer.
|
||||
*
|
||||
* This is much faster than using assignment as it requires no atomic
|
||||
* operations at all.
|
||||
*
|
||||
* @param with Pointer to swap with
|
||||
*/
|
||||
|
@ -70,16 +73,17 @@ public:
|
|||
}
|
||||
|
||||
/**
|
||||
* Set this value to one from another pointer and set that pointer to zero (take ownership from)
|
||||
* Move pointer from another SharedPtr to this one, zeroing target.
|
||||
*
|
||||
* This is faster than setting and zeroing the source pointer since it
|
||||
* avoids a synchronized reference count change.
|
||||
* This is faster than assignment as it saves one atomically synchronized
|
||||
* increment. If this pointer is null there are no atomic operations at
|
||||
* all.
|
||||
*
|
||||
* @param from Origin pointer; will be zeroed
|
||||
* @param from Source pointer; will be changed to NULL
|
||||
*/
|
||||
ZT_INLINE void move(SharedPtr &from)
|
||||
{
|
||||
_release();
|
||||
m_release();
|
||||
m_ptr = from.m_ptr;
|
||||
from.m_ptr = nullptr;
|
||||
}
|
||||
|
@ -104,21 +108,19 @@ public:
|
|||
*/
|
||||
ZT_INLINE void zero()
|
||||
{
|
||||
_release();
|
||||
m_release();
|
||||
m_ptr = nullptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set pointer to NULL and delete object if reference count is only 1
|
||||
* Return held object and null this pointer if reference count is one.
|
||||
*
|
||||
* This can be called periodically to implement something like a weak
|
||||
* reference as it exists in other more managed languages like Java,
|
||||
* but with the caveat that it only works if there is only one remaining
|
||||
* SharedPtr to be treated as weak.
|
||||
* If the reference count is one, the reference count is changed to zero
|
||||
* and the object is returned. It is not deleted; the caller must do that
|
||||
* if that is desired. This pointer will be set to NULL. If the reference
|
||||
* count is not one nothing happens and NULL is returned.
|
||||
*
|
||||
* This does not delete the object. It returns it as a naked pointer.
|
||||
*
|
||||
* @return Pointer to T if reference count was only one (this shared ptr is left NULL)
|
||||
* @return Pointer or NULL if more than one reference
|
||||
*/
|
||||
ZT_INLINE T *weakGC()
|
||||
{
|
||||
|
@ -136,20 +138,8 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current reference count for this object, which can change at any time
|
||||
*
|
||||
* @return Number of references according to this object's ref count or 0 if NULL
|
||||
*/
|
||||
ZT_INLINE int references() noexcept
|
||||
{
|
||||
if (likely(m_ptr != nullptr))
|
||||
return m_ptr->__refCount.load(std::memory_order_relaxed);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZT_INLINE unsigned long hashCode() const noexcept
|
||||
{ return (unsigned long)Utils::hash64((uint64_t)((uintptr_t)m_ptr)); }
|
||||
{ return (unsigned long)((uintptr_t)m_ptr + (uintptr_t)Utils::hash32((uint32_t)m_ptr)); }
|
||||
|
||||
ZT_INLINE bool operator==(const SharedPtr &sp) const noexcept
|
||||
{ return (m_ptr == sp.m_ptr); }
|
||||
|
@ -170,14 +160,14 @@ public:
|
|||
{ return (reinterpret_cast<const uint8_t *>(m_ptr) <= reinterpret_cast<const uint8_t *>(sp.m_ptr)); }
|
||||
|
||||
private:
|
||||
ZT_INLINE T *_getAndInc() const noexcept
|
||||
ZT_INLINE T *m_acquire() const noexcept
|
||||
{
|
||||
if (likely(m_ptr != nullptr))
|
||||
const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->fetch_add(1, std::memory_order_acquire);
|
||||
return m_ptr;
|
||||
}
|
||||
|
||||
ZT_INLINE void _release() const noexcept
|
||||
ZT_INLINE void m_release() const noexcept
|
||||
{
|
||||
if (unlikely((m_ptr != nullptr)&&(const_cast<std::atomic< int > *>(&(m_ptr->__refCount))->fetch_sub(1, std::memory_order_release) <= 1)))
|
||||
delete m_ptr;
|
||||
|
|
|
@ -27,7 +27,7 @@ namespace ZeroTier {
|
|||
class Store
|
||||
{
|
||||
public:
|
||||
ZT_INLINE Store(const Context *const renv): RR(renv)
|
||||
ZT_INLINE Store(const Context &ctx): m_ctx(ctx)
|
||||
{}
|
||||
|
||||
/**
|
||||
|
@ -38,12 +38,12 @@ public:
|
|||
* @param idSize Size of object ID in qwords
|
||||
* @return Data or empty vector if not found
|
||||
*/
|
||||
ZT_INLINE Vector< uint8_t > get(const CallContext &cc, ZT_StateObjectType type, const uint64_t *id, unsigned int idSize) const
|
||||
ZT_INLINE Vector< uint8_t > get(const CallContext &cc, ZT_StateObjectType type, const uint64_t *const id, unsigned int idSize) const
|
||||
{
|
||||
Vector< uint8_t > dv;
|
||||
void *data = nullptr;
|
||||
void (*freeFunc)(void *) = nullptr;
|
||||
const int r = RR->cb.stateGetFunction(reinterpret_cast<ZT_Node *>(RR->node), RR->uPtr, cc.tPtr, type, id, idSize, &data, &freeFunc);
|
||||
const int r = m_ctx.cb.stateGetFunction(reinterpret_cast<ZT_Node *>(m_ctx.node), m_ctx.uPtr, cc.tPtr, type, id, idSize, &data, &freeFunc);
|
||||
if (r > 0)
|
||||
dv.assign(reinterpret_cast<const uint8_t *>(data), reinterpret_cast<const uint8_t *>(data) + r);
|
||||
if ((data) && (freeFunc))
|
||||
|
@ -61,7 +61,7 @@ public:
|
|||
* @param len Length of data
|
||||
*/
|
||||
ZT_INLINE void put(const CallContext &cc, ZT_StateObjectType type, const uint64_t *const id, const unsigned int idSize, const void *const data, const unsigned int len) noexcept
|
||||
{ RR->cb.statePutFunction(reinterpret_cast<ZT_Node *>(this), RR->uPtr, cc.tPtr, type, id, idSize, data, (int)len); }
|
||||
{ m_ctx.cb.statePutFunction(reinterpret_cast<ZT_Node *>(this), m_ctx.uPtr, cc.tPtr, type, id, idSize, data, (int)len); }
|
||||
|
||||
/**
|
||||
* Erase a state object from the object store
|
||||
|
@ -71,10 +71,10 @@ public:
|
|||
* @param idSize Size of object ID in qwords
|
||||
*/
|
||||
ZT_INLINE void erase(const CallContext &cc, ZT_StateObjectType type, const uint64_t *const id, const unsigned int idSize) noexcept
|
||||
{ RR->cb.statePutFunction(reinterpret_cast<ZT_Node *>(this), RR->uPtr, cc.tPtr, type, id, idSize, nullptr, -1); }
|
||||
{ m_ctx.cb.statePutFunction(reinterpret_cast<ZT_Node *>(this), m_ctx.uPtr, cc.tPtr, type, id, idSize, nullptr, -1); }
|
||||
|
||||
private:
|
||||
const Context *RR;
|
||||
const Context &m_ctx;
|
||||
};
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
|
|
@ -16,9 +16,7 @@
|
|||
|
||||
#include "Constants.hpp"
|
||||
#include "Utils.hpp"
|
||||
#include "InetAddress.hpp"
|
||||
#include "AES.hpp"
|
||||
#include "SharedPtr.hpp"
|
||||
#include "Address.hpp"
|
||||
|
||||
namespace ZeroTier {
|
||||
|
@ -28,54 +26,69 @@ namespace ZeroTier {
|
|||
*/
|
||||
class SymmetricKey
|
||||
{
|
||||
friend class SharedPtr< SymmetricKey >;
|
||||
|
||||
public:
|
||||
/**
|
||||
* Secret key
|
||||
* Construct an uninitialized key (init() must be called)
|
||||
*/
|
||||
const uint8_t secret[ZT_SYMMETRIC_KEY_SIZE];
|
||||
|
||||
/**
|
||||
* Symmetric cipher keyed with this key
|
||||
*/
|
||||
const AES cipher;
|
||||
ZT_INLINE SymmetricKey():
|
||||
m_secret(),
|
||||
m_ts(-1),
|
||||
m_initialNonce(0),
|
||||
m_cipher(),
|
||||
m_nonce(0)
|
||||
{}
|
||||
|
||||
/**
|
||||
* Construct a new symmetric key
|
||||
*
|
||||
* SECURITY: we use a best effort method to construct the nonce's starting point so as
|
||||
* to avoid nonce duplication across invocations. The most significant bits are the
|
||||
* number of seconds since epoch but with the most significant bit masked to zero.
|
||||
* The least significant bits are random. Key life time is limited to 2^31 messages
|
||||
* per key as per the AES-GMAC-SIV spec, and this is a SIV mode anyway so nonce repetition
|
||||
* is non-catastrophic.
|
||||
*
|
||||
* The masking of the most significant bit is because we bisect the nonce space by
|
||||
* which direction the message is going. If the sender's ZeroTier address is
|
||||
* numerically greater than the receiver, this bit is flipped. This means that
|
||||
* two sides of a conversation that have created their key instances at the same
|
||||
* time are much less likely to duplicate nonces when sending pacekts from either
|
||||
* end.
|
||||
*
|
||||
* @param ts Current time
|
||||
* @param key 48-bit / 384-byte key
|
||||
* SECURITY: the MSB of the nonce is always 0 because this bit is set to 0
|
||||
* or 1 depending on which "direction" data is moving. See nextMessage().
|
||||
*
|
||||
* @param ts Key timestamp
|
||||
* @param key Key (must be 48 bytes / 384 bits)
|
||||
*/
|
||||
explicit ZT_INLINE SymmetricKey(const int64_t ts, const void *const key) noexcept:
|
||||
secret(),
|
||||
cipher(key), // AES-256 uses first 256 bits of 384-bit key
|
||||
m_initialNonce(((((uint64_t)ts / 1000ULL) << 32U) & 0x7fffffff00000000ULL) | (Utils::random() & 0x00000000ffffffffULL)),
|
||||
m_nonce(m_initialNonce),
|
||||
__refCount(0)
|
||||
{
|
||||
Utils::memoryLock(this, sizeof(SymmetricKey));
|
||||
Utils::copy< ZT_SYMMETRIC_KEY_SIZE >(const_cast<uint8_t *>(secret), key);
|
||||
}
|
||||
ZT_INLINE SymmetricKey(const int64_t ts, const void *const key) noexcept:
|
||||
m_secret(key),
|
||||
m_ts(ts),
|
||||
m_initialNonce(Utils::getSecureRandomU64() >> 1U),
|
||||
m_cipher(key),
|
||||
m_nonce(m_initialNonce)
|
||||
{}
|
||||
|
||||
ZT_INLINE SymmetricKey(const SymmetricKey &k) noexcept:
|
||||
m_secret(k.m_secret),
|
||||
m_ts(k.m_ts),
|
||||
m_initialNonce(k.m_initialNonce),
|
||||
m_cipher(k.m_secret.data),
|
||||
m_nonce(k.m_nonce.load(std::memory_order_relaxed))
|
||||
{}
|
||||
|
||||
ZT_INLINE ~SymmetricKey() noexcept
|
||||
{ Utils::burn(m_secret.data, ZT_SYMMETRIC_KEY_SIZE); }
|
||||
|
||||
ZT_INLINE SymmetricKey &operator=(const SymmetricKey &k) noexcept
|
||||
{
|
||||
Utils::burn(const_cast<uint8_t *>(secret), ZT_SYMMETRIC_KEY_SIZE);
|
||||
Utils::memoryUnlock(this, sizeof(SymmetricKey));
|
||||
m_secret = k.m_secret;
|
||||
m_ts = k.m_ts;
|
||||
m_initialNonce = k.m_initialNonce;
|
||||
m_cipher.init(k.m_secret.data);
|
||||
m_nonce.store(k.m_nonce.load(std::memory_order_relaxed), std::memory_order_relaxed);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize or re-initialize a symmetric key
|
||||
*
|
||||
* @param ts Key timestamp
|
||||
* @param key Key (must be 48 bytes / 384 bits)
|
||||
*/
|
||||
ZT_INLINE void init(const int64_t ts, const void *const key) noexcept
|
||||
{
|
||||
Utils::copy< ZT_SYMMETRIC_KEY_SIZE >(m_secret.data, key);
|
||||
m_ts = ts;
|
||||
m_initialNonce = Utils::getSecureRandomU64() >> 1U;
|
||||
m_cipher.init(key);
|
||||
m_nonce.store(m_initialNonce, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -86,18 +99,43 @@ public:
|
|||
* @return Next unique IV for next message
|
||||
*/
|
||||
ZT_INLINE uint64_t nextMessage(const Address sender, const Address receiver) noexcept
|
||||
{ return m_nonce.fetch_add(1) ^ (((uint64_t)(sender > receiver)) << 63U); }
|
||||
{ return m_nonce.fetch_add(1, std::memory_order_relaxed) ^ (((uint64_t)(sender > receiver)) << 63U); }
|
||||
|
||||
/**
|
||||
* Get the number of times this key has been used.
|
||||
*
|
||||
* This is used along with the key's initial timestamp to determine key age
|
||||
* for ephemeral key rotation.
|
||||
*
|
||||
* @return Number of times nextMessage() has been called since object creation
|
||||
*/
|
||||
ZT_INLINE uint64_t odometer() const noexcept
|
||||
{ return m_nonce.load() - m_initialNonce; }
|
||||
{ return m_nonce.load(std::memory_order_relaxed) - m_initialNonce; }
|
||||
|
||||
/**
|
||||
* @return Key creation timestamp or -1 if this is a long-lived key
|
||||
*/
|
||||
ZT_INLINE int64_t timestamp() const noexcept
|
||||
{ return m_ts; }
|
||||
|
||||
/**
|
||||
* @return 48-byte / 384-bit secret key
|
||||
*/
|
||||
ZT_INLINE const uint8_t *key() const noexcept
|
||||
{ return m_secret.data; }
|
||||
|
||||
/**
|
||||
* @return AES cipher (already initialized with secret key)
|
||||
*/
|
||||
ZT_INLINE const AES &aes() const noexcept
|
||||
{ return m_cipher; }
|
||||
|
||||
private:
|
||||
const uint64_t m_initialNonce;
|
||||
Blob< ZT_SYMMETRIC_KEY_SIZE > m_secret;
|
||||
int64_t m_ts;
|
||||
uint64_t m_initialNonce;
|
||||
AES m_cipher;
|
||||
std::atomic< uint64_t > m_nonce;
|
||||
std::atomic< int > __refCount;
|
||||
};
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#include "Spinlock.hpp"
|
||||
|
||||
#define ZT_TINYMAP_BUCKETS 1024
|
||||
#define ZT_TINYMAP_BUCKET_MASK 1023
|
||||
#define ZT_TINYMAP_BUCKETS_MASK 1023
|
||||
#define ZT_TINYMAP_LOCKED_POINTER (~((uintptr_t)0))
|
||||
|
||||
namespace ZeroTier {
|
||||
|
@ -67,7 +67,7 @@ public:
|
|||
ZT_INLINE V get(const uint64_t key) noexcept
|
||||
{
|
||||
V tmp;
|
||||
std::atomic<uintptr_t> &bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKET_MASK];
|
||||
std::atomic<uintptr_t> &bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKETS_MASK];
|
||||
for(;;) {
|
||||
const uintptr_t vptr = bucket.exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
|
||||
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
|
||||
|
@ -89,7 +89,7 @@ public:
|
|||
|
||||
ZT_INLINE void set(const uint64_t key, const V &value)
|
||||
{
|
||||
std::atomic<uintptr_t> &bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKET_MASK];
|
||||
std::atomic<uintptr_t> &bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKETS_MASK];
|
||||
for(;;) {
|
||||
uintptr_t vptr = bucket.exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
|
||||
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
|
||||
|
@ -115,7 +115,7 @@ public:
|
|||
|
||||
ZT_INLINE void erase(const uint64_t key)
|
||||
{
|
||||
std::atomic<uintptr_t> &bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKET_MASK];
|
||||
std::atomic<uintptr_t> &bucket = m_buckets[(key ^ (key >> 32)) & ZT_TINYMAP_BUCKETS_MASK];
|
||||
for(;;) {
|
||||
uintptr_t vptr = bucket.exchange(ZT_TINYMAP_LOCKED_POINTER, std::memory_order_acquire);
|
||||
if (likely(vptr != ZT_TINYMAP_LOCKED_POINTER)) {
|
||||
|
@ -143,6 +143,8 @@ private:
|
|||
std::atomic<uintptr_t> m_buckets[ZT_TINYMAP_BUCKETS];
|
||||
};
|
||||
|
||||
static_assert((ZT_TINYMAP_BUCKETS % (sizeof(uintptr_t) * 8)) == 0, "ZT_TINYMAP_BUCKETS is not a power of two");
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
||||
#endif
|
||||
|
|
|
@ -131,12 +131,17 @@ void Topology::trustStoreChanged(const CallContext &cc)
|
|||
SharedPtr< Peer > root(this->peer(cc, r->first.address(), true));
|
||||
if (!root) {
|
||||
root.set(new Peer());
|
||||
root->init(m_ctx, cc, r->first);
|
||||
root = this->add(cc, root);
|
||||
if (root->init(m_ctx, cc, r->first)) {
|
||||
root = this->add(cc, root);
|
||||
} else {
|
||||
root.zero();
|
||||
}
|
||||
}
|
||||
if (root) {
|
||||
newRootList.push_back(root);
|
||||
if (r->second)
|
||||
root->setLocator(r->second, true);
|
||||
}
|
||||
newRootList.push_back(root);
|
||||
if (r->second)
|
||||
root->setLocator(r->second, true);
|
||||
}
|
||||
|
||||
{
|
||||
|
|
|
@ -168,7 +168,7 @@ const uint64_t ZERO256[4] = {0, 0, 0, 0};
|
|||
const char HEXCHARS[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
|
||||
const uint64_t s_mapNonce = getSecureRandomU64();
|
||||
|
||||
bool secureEq(const void *a, const void *b, unsigned int len) noexcept
|
||||
bool secureEq(const void *const a, const void *const b, const unsigned int len) noexcept
|
||||
{
|
||||
uint8_t diff = 0;
|
||||
for (unsigned int i = 0; i < len; ++i)
|
||||
|
@ -176,7 +176,7 @@ bool secureEq(const void *a, const void *b, unsigned int len) noexcept
|
|||
return (diff == 0);
|
||||
}
|
||||
|
||||
void burn(volatile void *ptr, unsigned int len)
|
||||
void burn(volatile void *const ptr, const unsigned int len)
|
||||
{
|
||||
static volatile uintptr_t foo = 0;
|
||||
Utils::zero((void *)ptr, len);
|
||||
|
@ -323,7 +323,6 @@ void getSecureRandom(void *const buf, unsigned int bytes) noexcept
|
|||
|
||||
if (unlikely(!initialized)) {
|
||||
initialized = true;
|
||||
Utils::memoryLock(randomState, sizeof(randomState));
|
||||
Utils::zero< sizeof(randomState) >(randomState);
|
||||
#ifdef __WINDOWS__
|
||||
HCRYPTPROV cryptProvider = NULL;
|
||||
|
|
|
@ -92,40 +92,6 @@ extern const char HEXCHARS[16];
|
|||
*/
|
||||
extern const uint64_t s_mapNonce;
|
||||
|
||||
/**
|
||||
* Lock memory to prevent swapping out to secondary storage (if possible)
|
||||
*
|
||||
* This is used to attempt to prevent the swapping out of long-term stored secure
|
||||
* credentials like secret keys. It isn't supported on all platforms and may not
|
||||
* be absolutely guaranteed to work, but it's a countermeasure.
|
||||
*
|
||||
* @param p Memory to lock
|
||||
* @param l Size of memory
|
||||
*/
|
||||
static ZT_INLINE void memoryLock(const void *const p, const unsigned int l) noexcept
|
||||
{
|
||||
#ifdef __WINDOWS__
|
||||
VirtualLock(reinterpret_cast<LPVOID>(const_cast<void*>(p)), l);
|
||||
#else
|
||||
mlock(p, l);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Unlock memory locked with memoryLock()
|
||||
*
|
||||
* @param p Memory to unlock
|
||||
* @param l Size of memory
|
||||
*/
|
||||
static ZT_INLINE void memoryUnlock(const void *const p, const unsigned int l) noexcept
|
||||
{
|
||||
#ifdef __WINDOWS__
|
||||
VirtualUnlock(reinterpret_cast<LPVOID>(const_cast<void*>(p)), l);
|
||||
#else
|
||||
munlock(p, l);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform a time-invariant binary comparison
|
||||
*
|
||||
|
@ -254,24 +220,12 @@ bool scopy(char *dest, unsigned int len, const char *src) noexcept;
|
|||
/**
|
||||
* Check if a buffer's contents are all zero
|
||||
*/
|
||||
static ZT_INLINE bool allZero(const void *const b, unsigned int l) noexcept
|
||||
static ZT_INLINE bool allZero(const void *const b, const unsigned int l) noexcept
|
||||
{
|
||||
const uint8_t *p = reinterpret_cast<const uint8_t *>(b);
|
||||
|
||||
#ifndef ZT_NO_UNALIGNED_ACCESS
|
||||
while (l >= 8) {
|
||||
if (*reinterpret_cast<const uint64_t *>(p) != 0)
|
||||
return false;
|
||||
p += 8;
|
||||
l -= 8;
|
||||
}
|
||||
#endif
|
||||
|
||||
for (unsigned int i = 0; i < l; ++i) {
|
||||
if (reinterpret_cast<const uint8_t *>(p)[i] != 0)
|
||||
for (unsigned int i=0;i<l;++i) {
|
||||
if (reinterpret_cast<const uint8_t *>(b)[i] != 0)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
21
core/VL1.cpp
21
core/VL1.cpp
|
@ -206,7 +206,7 @@ void VL1::onRemotePacket(CallContext &cc, const int64_t localSocket, const InetA
|
|||
int pktSize = 0;
|
||||
|
||||
static_assert(ZT_PROTO_PACKET_VERB_INDEX < ZT_PROTO_MIN_PACKET_LENGTH, "overflow");
|
||||
if (unlikely(((cipher == ZT_PROTO_CIPHER_SUITE__POLY1305_NONE) || (cipher == ZT_PROTO_CIPHER_SUITE__NONE)) && ((hdr[ZT_PROTO_PACKET_VERB_INDEX] & ZT_PROTO_VERB_MASK) == Protocol::VERB_HELLO))) {
|
||||
if (unlikely(((cipher == ZT_PROTO_CIPHER_POLY1305_NONE) || (cipher == ZT_PROTO_CIPHER_NONE)) && ((hdr[ZT_PROTO_PACKET_VERB_INDEX] & ZT_PROTO_VERB_MASK) == Protocol::VERB_HELLO))) {
|
||||
// Handle unencrypted HELLO packets.
|
||||
pktSize = pktv.mergeCopy(*pkt);
|
||||
if (unlikely(pktSize < ZT_PROTO_MIN_PACKET_LENGTH)) {
|
||||
|
@ -228,7 +228,7 @@ void VL1::onRemotePacket(CallContext &cc, const int64_t localSocket, const InetA
|
|||
if (likely(peer)) {
|
||||
switch (cipher) {
|
||||
|
||||
case ZT_PROTO_CIPHER_SUITE__POLY1305_NONE: {
|
||||
case ZT_PROTO_CIPHER_POLY1305_NONE: {
|
||||
uint8_t perPacketKey[ZT_SALSA20_KEY_SIZE];
|
||||
Protocol::salsa2012DeriveKey(peer->rawIdentityKey(), perPacketKey, *pktv[0].b, pktv.totalSize());
|
||||
p_PolyCopyFunction s20cf(perPacketKey, &packetId);
|
||||
|
@ -252,7 +252,7 @@ void VL1::onRemotePacket(CallContext &cc, const int64_t localSocket, const InetA
|
|||
}
|
||||
break;
|
||||
|
||||
case ZT_PROTO_CIPHER_SUITE__POLY1305_SALSA2012: {
|
||||
case ZT_PROTO_CIPHER_POLY1305_SALSA2012: {
|
||||
uint8_t perPacketKey[ZT_SALSA20_KEY_SIZE];
|
||||
Protocol::salsa2012DeriveKey(peer->rawIdentityKey(), perPacketKey, *pktv[0].b, pktv.totalSize());
|
||||
p_SalsaPolyCopyFunction s20cf(perPacketKey, &packetId);
|
||||
|
@ -276,12 +276,12 @@ void VL1::onRemotePacket(CallContext &cc, const int64_t localSocket, const InetA
|
|||
}
|
||||
break;
|
||||
|
||||
case ZT_PROTO_CIPHER_SUITE__NONE: {
|
||||
case ZT_PROTO_CIPHER_NONE: {
|
||||
// TODO
|
||||
}
|
||||
break;
|
||||
|
||||
case ZT_PROTO_CIPHER_SUITE__AES_GMAC_SIV: {
|
||||
case ZT_PROTO_CIPHER_AES_GMAC_SIV: {
|
||||
// TODO
|
||||
}
|
||||
break;
|
||||
|
@ -455,19 +455,18 @@ void VL1::m_sendPendingWhois(CallContext &cc)
|
|||
}
|
||||
|
||||
if (!toSend.empty()) {
|
||||
const SharedPtr< SymmetricKey > key(root->key());
|
||||
SymmetricKey &key = root->key();
|
||||
uint8_t outp[ZT_DEFAULT_UDP_MTU - ZT_PROTO_MIN_PACKET_LENGTH];
|
||||
Vector< Address >::iterator a(toSend.begin());
|
||||
while (a != toSend.end()) {
|
||||
const uint64_t packetId = key->nextMessage(m_ctx.identity.address(), root->address());
|
||||
const uint64_t packetId = key.nextMessage(m_ctx.identity.address(), root->address());
|
||||
int p = Protocol::newPacket(outp, packetId, root->address(), m_ctx.identity.address(), Protocol::VERB_WHOIS);
|
||||
while ((a != toSend.end()) && (p < (sizeof(outp) - ZT_ADDRESS_LENGTH))) {
|
||||
a->copyTo(outp + p);
|
||||
++a;
|
||||
p += ZT_ADDRESS_LENGTH;
|
||||
}
|
||||
Protocol::armor(outp, p, key, root->cipher());
|
||||
m_ctx.expect->sending(packetId, cc.ticks);
|
||||
m_ctx.expect->sending(Protocol::armor(outp, p, key, root->cipher()), cc.ticks);
|
||||
root->send(m_ctx, cc, outp, p, rootPath);
|
||||
}
|
||||
}
|
||||
|
@ -578,7 +577,7 @@ SharedPtr< Peer > VL1::m_HELLO(CallContext &cc, const SharedPtr< Path > &path, B
|
|||
return SharedPtr< Peer >();
|
||||
}
|
||||
|
||||
const SharedPtr< SymmetricKey > key(peer->identityKey());
|
||||
SymmetricKey &key = peer->key();
|
||||
|
||||
if (protoVersion >= 11) {
|
||||
// V2.x and newer supports an encrypted section and has a new OK format.
|
||||
|
@ -609,7 +608,7 @@ SharedPtr< Peer > VL1::m_HELLO(CallContext &cc, const SharedPtr< Path > &path, B
|
|||
}
|
||||
}
|
||||
|
||||
Protocol::newPacket(pkt, key->nextMessage(m_ctx.identity.address(), peer->address()), peer->address(), m_ctx.identity.address(), Protocol::VERB_OK);
|
||||
Protocol::newPacket(pkt, key.nextMessage(m_ctx.identity.address(), peer->address()), peer->address(), m_ctx.identity.address(), Protocol::VERB_OK);
|
||||
ii = ZT_PROTO_PACKET_PAYLOAD_START;
|
||||
pkt.wI8(ii, Protocol::VERB_HELLO);
|
||||
pkt.wI64(ii, packetId);
|
||||
|
|
|
@ -65,8 +65,6 @@ public:
|
|||
void onRemotePacket(CallContext &cc, int64_t localSocket, const InetAddress &fromAddr, SharedPtr< Buf > &data, unsigned int len) noexcept;
|
||||
|
||||
private:
|
||||
const Context &m_ctx;
|
||||
|
||||
void m_relay(CallContext &cc, const SharedPtr< Path > &path, Address destination, SharedPtr< Buf > &pkt, int pktSize);
|
||||
void m_sendPendingWhois(CallContext &cc);
|
||||
SharedPtr< Peer > m_HELLO(CallContext &cc, const SharedPtr< Path > &path, Buf &pkt, int packetSize);
|
||||
|
@ -79,6 +77,8 @@ private:
|
|||
bool m_USER_MESSAGE(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
|
||||
bool m_ENCAP(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, const SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
|
||||
|
||||
const Context &m_ctx;
|
||||
|
||||
// Defragmentation engine for handling inbound packets with more than one fragment.
|
||||
Defragmenter< ZT_MAX_PACKET_FRAGMENTS > m_inputPacketAssembler;
|
||||
|
||||
|
|
|
@ -54,21 +54,13 @@ public:
|
|||
|
||||
protected:
|
||||
bool m_FRAME(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
|
||||
|
||||
bool m_EXT_FRAME(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
|
||||
|
||||
bool m_MULTICAST_LIKE(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
|
||||
|
||||
bool m_NETWORK_CREDENTIALS(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
|
||||
|
||||
bool m_NETWORK_CONFIG_REQUEST(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
|
||||
|
||||
bool m_NETWORK_CONFIG(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
|
||||
|
||||
bool m_MULTICAST_GATHER(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
|
||||
|
||||
bool m_MULTICAST_FRAME_deprecated(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
|
||||
|
||||
bool m_MULTICAST(CallContext &cc, uint64_t packetId, unsigned int auth, const SharedPtr< Path > &path, SharedPtr< Peer > &peer, Buf &pkt, int packetSize);
|
||||
|
||||
private:
|
||||
|
|
|
@ -61,7 +61,7 @@ pub use dictionary::*;
|
|||
base64_serde_type!(Base64Standard, base64::URL_SAFE_NO_PAD);
|
||||
|
||||
/// Recommended minimum thread stack size for background threads.
|
||||
pub const RECOMMENDED_THREAD_STACK_SIZE: usize = 262144;
|
||||
pub const RECOMMENDED_THREAD_STACK_SIZE: usize = 524288;
|
||||
|
||||
/// Default TCP and UDP port.
|
||||
pub const DEFAULT_PORT: u16 = ztcore::ZT_DEFAULT_PORT as u16;
|
||||
|
|
Loading…
Add table
Reference in a new issue