mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-04-21 14:36:55 +02:00
Docs, simplification.
This commit is contained in:
parent
741f7814c2
commit
90b5acfca6
6 changed files with 44 additions and 62 deletions
|
@ -51,10 +51,12 @@ Commands:
|
|||
peer <address> [command] [option] - Peer management commands
|
||||
show Show peer details (default)
|
||||
try <endpoint> [...] Try peer at explicit endpoint
|
||||
locator <locator> Explicitly update peer locator
|
||||
roots List root peers
|
||||
root [command] - Root management commands
|
||||
add <identity> [endpoint] Designate a peer as a root
|
||||
remove <address> Un-designate a peer as a root
|
||||
subscribe <url> Subscribe to a root set
|
||||
set [option] [value] - Get or set a core config option
|
||||
port <port> Primary P2P port
|
||||
secondaryport <port/0> Secondary P2P port (0 to disable)
|
||||
|
|
|
@ -1823,26 +1823,23 @@ ZT_SDK_API enum ZT_ResultCode ZT_Node_multicastUnsubscribe(
|
|||
unsigned long multicastAdi);
|
||||
|
||||
/**
|
||||
* Add a root node or update its locator
|
||||
* Designate a peer as a root, adding if not already known
|
||||
*
|
||||
* ZeroTier does not take possession of the id or loc objects. The caller
|
||||
* must still eventually delete them with ZT_Identity_delete() and
|
||||
* ZT_Locator_delete().
|
||||
* ZeroTier does not take possession of the 'id' object. It still must be
|
||||
* deleted if it was allocated.
|
||||
*
|
||||
* @param node Node instance
|
||||
* @param tptr Thread pointer to pass to functions/callbacks resulting from this call
|
||||
* @param id Identity of root to add
|
||||
* @param loc Root locator
|
||||
* @return OK (0) or error code if an error condition has occurred
|
||||
*/
|
||||
ZT_SDK_API enum ZT_ResultCode ZT_Node_addRoot(
|
||||
ZT_Node *node,
|
||||
void *tptr,
|
||||
const ZT_Identity *id,
|
||||
const ZT_Locator *loc);
|
||||
const ZT_Identity *id);
|
||||
|
||||
/**
|
||||
* Remove a root
|
||||
* Un-designate a peer as a root
|
||||
*
|
||||
* This doesn't fully remove the peer from the peer list. It just removes
|
||||
* its root trust flag. If there is no longer any need to communicate with it
|
||||
|
|
|
@ -349,13 +349,9 @@ ZT_ResultCode Node::multicastUnsubscribe(uint64_t nwid, uint64_t multicastGroup,
|
|||
} else return ZT_RESULT_ERROR_NETWORK_NOT_FOUND;
|
||||
}
|
||||
|
||||
ZT_ResultCode Node::addRoot(void *tPtr, const ZT_Identity *id, const ZT_Locator *loc)
|
||||
ZT_ResultCode Node::addRoot(void *tPtr, const ZT_Identity *id)
|
||||
{
|
||||
if ((!id) || (!loc))
|
||||
return ZT_RESULT_ERROR_BAD_PARAMETER;
|
||||
const SharedPtr<const Locator> locator(new Locator(*reinterpret_cast<const Locator *>(loc)));
|
||||
// SECURITY: locator credential validation happens in Topology.cpp in addRoot().
|
||||
return RR->topology->addRoot(tPtr, *reinterpret_cast<const Identity *>(id), locator) ? ZT_RESULT_OK : ZT_RESULT_ERROR_INVALID_CREDENTIAL;
|
||||
return (RR->topology->addRoot(tPtr, *reinterpret_cast<const Identity *>(id))) ? ZT_RESULT_OK : ZT_RESULT_ERROR_BAD_PARAMETER;
|
||||
}
|
||||
|
||||
ZT_ResultCode Node::removeRoot(void *tPtr, const uint64_t address)
|
||||
|
@ -891,10 +887,10 @@ enum ZT_ResultCode ZT_Node_multicastUnsubscribe(ZT_Node *node, uint64_t nwid, ui
|
|||
}
|
||||
}
|
||||
|
||||
enum ZT_ResultCode ZT_Node_addRoot(ZT_Node *node, void *tptr, const ZT_Identity *id, const ZT_Locator *loc)
|
||||
enum ZT_ResultCode ZT_Node_addRoot(ZT_Node *node, void *tptr, const ZT_Identity *id)
|
||||
{
|
||||
try {
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->addRoot(tptr, id, loc);
|
||||
return reinterpret_cast<ZeroTier::Node *>(node)->addRoot(tptr, id);
|
||||
} catch (std::bad_alloc &exc) {
|
||||
return ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY;
|
||||
} catch (...) {
|
||||
|
|
|
@ -100,8 +100,7 @@ public:
|
|||
|
||||
ZT_ResultCode addRoot(
|
||||
void *tptr,
|
||||
const ZT_Identity *id,
|
||||
const ZT_Locator *loc);
|
||||
const ZT_Identity *id);
|
||||
|
||||
ZT_ResultCode removeRoot(
|
||||
void *tptr,
|
||||
|
|
|
@ -29,20 +29,11 @@ Topology::Topology(const RuntimeEnvironment *renv, void *tPtr) :
|
|||
Identity id;
|
||||
int l = id.unmarshal(dptr, drem);
|
||||
if ((l > 0) && (id)) {
|
||||
m_roots.insert(id);
|
||||
ZT_SPEW("restored root %s", id.address().toString().c_str());
|
||||
if ((drem -= l) <= 0)
|
||||
break;
|
||||
Locator *const loc = new Locator();
|
||||
l = loc->unmarshal(dptr, drem);
|
||||
if (l > 0) {
|
||||
m_roots[id].set(loc);
|
||||
dptr += l;
|
||||
ZT_SPEW("loaded root %s", id.address().toString().c_str());
|
||||
if ((drem -= l) <= 0)
|
||||
break;
|
||||
} else {
|
||||
delete loc;
|
||||
}
|
||||
}
|
||||
} else break;
|
||||
}
|
||||
}
|
||||
m_updateRootPeers(tPtr);
|
||||
|
@ -73,15 +64,21 @@ struct p_RootSortComparisonOperator
|
|||
}
|
||||
};
|
||||
|
||||
bool Topology::addRoot(void *const tPtr, const Identity &id, const SharedPtr<const Locator> &loc)
|
||||
SharedPtr<Peer> Topology::addRoot(void *const tPtr, const Identity &id)
|
||||
{
|
||||
if ((id == RR->identity) || (!id) || (!loc) || (!loc->verify(id)) || (!id.locallyValidate()))
|
||||
return false;
|
||||
RWMutex::Lock l1(m_peers_l);
|
||||
m_roots[id] = loc;
|
||||
m_updateRootPeers(tPtr);
|
||||
m_writeRootList(tPtr);
|
||||
return true;
|
||||
if ((id != RR->identity) && id.locallyValidate()) {
|
||||
RWMutex::Lock l1(m_peers_l);
|
||||
m_roots.insert(id);
|
||||
|
||||
m_updateRootPeers(tPtr);
|
||||
m_writeRootList(tPtr);
|
||||
|
||||
for(Vector< SharedPtr<Peer> >::const_iterator p(m_rootPeers.begin());p!=m_rootPeers.end();++p) {
|
||||
if ((*p)->identity() == id)
|
||||
return *p;
|
||||
}
|
||||
}
|
||||
return SharedPtr<Peer>();
|
||||
}
|
||||
|
||||
bool Topology::removeRoot(void *const tPtr, Address address)
|
||||
|
@ -89,7 +86,7 @@ bool Topology::removeRoot(void *const tPtr, Address address)
|
|||
RWMutex::Lock l1(m_peers_l);
|
||||
for (Vector<SharedPtr<Peer> >::const_iterator r(m_rootPeers.begin());r != m_rootPeers.end();++r) {
|
||||
if ((*r)->address() == address) {
|
||||
Map<Identity, SharedPtr<const Locator> >::iterator rr(m_roots.find((*r)->identity()));
|
||||
Set<Identity>::iterator rr(m_roots.find((*r)->identity()));
|
||||
if (rr != m_roots.end()) {
|
||||
m_roots.erase(rr);
|
||||
m_updateRootPeers(tPtr);
|
||||
|
@ -175,14 +172,10 @@ void Topology::m_writeRootList(void *tPtr)
|
|||
uint8_t *const roots = (uint8_t *)malloc((ZT_IDENTITY_MARSHAL_SIZE_MAX + ZT_LOCATOR_MARSHAL_SIZE_MAX + 2) * m_roots.size());
|
||||
if (roots) { // sanity check
|
||||
int p = 0;
|
||||
for (Map<Identity, SharedPtr<const Locator> >::const_iterator r(m_roots.begin());r != m_roots.end();++r) {
|
||||
int pp = r->first.marshal(roots + p, false);
|
||||
if (pp > 0) {
|
||||
for (Set<Identity>::const_iterator r(m_roots.begin());r != m_roots.end();++r) {
|
||||
const int pp = r->marshal(roots + p, false);
|
||||
if (pp > 0)
|
||||
p += pp;
|
||||
pp = r->second->marshal(roots + p);
|
||||
if (pp > 0)
|
||||
p += pp;
|
||||
}
|
||||
}
|
||||
uint64_t id[2];
|
||||
id[0] = 0;
|
||||
|
@ -196,22 +189,21 @@ void Topology::m_updateRootPeers(void *tPtr)
|
|||
{
|
||||
// assumes m_peers_l is locked for write
|
||||
Vector<SharedPtr<Peer> > rp;
|
||||
for (Map<Identity, SharedPtr<const Locator> >::iterator r(m_roots.begin());r != m_roots.end();++r) {
|
||||
Map<Address, SharedPtr<Peer> >::iterator pp(m_peers.find(r->first.address()));
|
||||
for (Set<Identity>::iterator r(m_roots.begin());r != m_roots.end();++r) {
|
||||
Map<Address, SharedPtr<Peer> >::iterator pp(m_peers.find(r->address()));
|
||||
SharedPtr<Peer> p;
|
||||
if (pp != m_peers.end())
|
||||
p = pp->second;
|
||||
|
||||
if (!p)
|
||||
m_loadCached(tPtr, r->first.address(), p);
|
||||
m_loadCached(tPtr, r->address(), p);
|
||||
|
||||
if ((!p) || (p->identity() != r->first)) {
|
||||
if ((!p) || (p->identity() != *r)) {
|
||||
p.set(new Peer(RR));
|
||||
p->init(r->first);
|
||||
m_peers[r->first.address()] = p;
|
||||
p->init(*r);
|
||||
m_peers[r->address()] = p;
|
||||
}
|
||||
|
||||
p->setLocator(r->second);
|
||||
rp.push_back(p);
|
||||
}
|
||||
m_rootPeers.swap(rp);
|
||||
|
|
|
@ -172,17 +172,13 @@ public:
|
|||
}
|
||||
|
||||
/**
|
||||
* Add or update a root server and its locator
|
||||
*
|
||||
* This also validates the identity and checks the locator signature,
|
||||
* returning false if either of these is not valid.
|
||||
* Flag a peer as a root, adding the peer if it is not known
|
||||
*
|
||||
* @param tPtr Thread pointer
|
||||
* @param id Root identity
|
||||
* @param loc Root locator
|
||||
* @return True if identity and locator are valid and root was added / updated
|
||||
* @param id Root identity (will be locally validated)
|
||||
* @return Root peer or NULL if some problem occurred
|
||||
*/
|
||||
bool addRoot(void *tPtr,const Identity &id,const SharedPtr<const Locator> &loc);
|
||||
SharedPtr<Peer> addRoot(void *tPtr, const Identity &id);
|
||||
|
||||
/**
|
||||
* Remove a root server's identity from the root server set
|
||||
|
@ -239,7 +235,7 @@ private:
|
|||
RWMutex m_peers_l; // locks m_peers, m_roots, and m_rootPeers
|
||||
Map< uint64_t,SharedPtr<Path> > m_paths;
|
||||
Map< Address,SharedPtr<Peer> > m_peers;
|
||||
Map< Identity,SharedPtr<const Locator> > m_roots;
|
||||
Set< Identity > m_roots;
|
||||
Vector< SharedPtr<Peer> > m_rootPeers;
|
||||
};
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue