mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-06-06 12:33:44 +02:00
A bit of code comment cleanup.
This commit is contained in:
parent
edad580c32
commit
1d9977b8b9
1 changed files with 15 additions and 26 deletions
|
@ -208,6 +208,7 @@ public:
|
||||||
/**
|
/**
|
||||||
* Choose peers to send a propagating multicast to
|
* Choose peers to send a propagating multicast to
|
||||||
*
|
*
|
||||||
|
* @param prng Random source
|
||||||
* @param topology Topology object or mock thereof
|
* @param topology Topology object or mock thereof
|
||||||
* @param nwid Network ID
|
* @param nwid Network ID
|
||||||
* @param mg Multicast group
|
* @param mg Multicast group
|
||||||
|
@ -236,49 +237,37 @@ public:
|
||||||
{
|
{
|
||||||
typename std::set< P,_PeerPropagationPrioritySortOrder<P> > toConsider;
|
typename std::set< P,_PeerPropagationPrioritySortOrder<P> > toConsider;
|
||||||
|
|
||||||
// Pick up to ZT_MULTICAST_PICK_MAX_SAMPLE_SIZE peers that have
|
/* Pick up to ZT_MULTICAST_PICK_MAX_SAMPLE_SIZE peers that meet
|
||||||
// subscribed to this channel and that are not in bloom filter.
|
* our minimal criteria for this multicast group and place them
|
||||||
// Pick randomly from subscribers, but place into a set that is
|
* into a set that is sorted in descending order of time of most
|
||||||
// sorted in descending order of time of most recent unicast
|
* recent unicast frame transfer (implicit social ordering). */
|
||||||
// frame transfer. (Implicit social ordering.) Also ignore original
|
|
||||||
// submitter and upstream, since we know these have seen this
|
|
||||||
// message.
|
|
||||||
{
|
{
|
||||||
Mutex::Lock _l(_multicastMemberships_m);
|
Mutex::Lock _l(_multicastMemberships_m);
|
||||||
std::map< MulticastChannel,std::vector<MulticastMembership> >::iterator mm(_multicastMemberships.find(MulticastChannel(nwid,mg)));
|
std::map< MulticastChannel,std::vector<MulticastMembership> >::iterator mm(_multicastMemberships.find(MulticastChannel(nwid,mg)));
|
||||||
if ((mm != _multicastMemberships.end())&&(!mm->second.empty())) {
|
if ((mm != _multicastMemberships.end())&&(!mm->second.empty())) {
|
||||||
for(unsigned int stries=0;stries<ZT_MULTICAST_PICK_MAX_SAMPLE_SIZE;++stries) {
|
for(unsigned int stries=0;stries<ZT_MULTICAST_PICK_MAX_SAMPLE_SIZE;++stries) {
|
||||||
MulticastMembership &m = mm->second[prng.next32() % mm->second.size()];
|
MulticastMembership &m = mm->second[prng.next32() % mm->second.size()];
|
||||||
if (((now - m.second) < ZT_MULTICAST_LIKE_EXPIRE)&&(!bf.contains(m.first.sum()))&&(m.first != originalSubmitter)&&(m.first != upstream)) {
|
if (
|
||||||
|
((now - m.second) < ZT_MULTICAST_LIKE_EXPIRE)&& /* LIKE is not expired */
|
||||||
|
(!bf.contains(m.first.sum()))&& /* Not in propagation bloom */
|
||||||
|
(m.first != originalSubmitter)&& /* Not the original submitter */
|
||||||
|
(m.first != upstream) ) { /* Not where the frame came from */
|
||||||
P peer(topology.getPeer(m.first));
|
P peer(topology.getPeer(m.first));
|
||||||
if (peer)
|
if (peer)
|
||||||
toConsider.insert(peer);
|
toConsider.insert(peer); /* Consider propagating to this peer */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The first peers in toConsider will be the 'best'
|
/* The first peers in toConsider will be the "best" */
|
||||||
unsigned int chosen = 0;
|
unsigned int chosen = 0;
|
||||||
for(typename std::set< P,_PeerPropagationPrioritySortOrder<P> >::iterator i(toConsider.begin());((i!=toConsider.end())&&(chosen < max));++i)
|
for(typename std::set< P,_PeerPropagationPrioritySortOrder<P> >::iterator i(toConsider.begin());((i!=toConsider.end())&&(chosen < max));++i)
|
||||||
bf.set((peers[chosen++] = *i)->address().sum());
|
bf.set((peers[chosen++] = *i)->address().sum());
|
||||||
|
|
||||||
// Add a supernode if there are fewer than the desired
|
/* Tack on a supernode if we have no next hops */
|
||||||
// number of recipients. Note that we do not use the bloom
|
if (!chosen) {
|
||||||
// filter to track visits to supernodes, intentionally
|
P peer = topology.getBestSupernode();
|
||||||
// allowing multicasts to ping pong between supernodes.
|
|
||||||
// Supernodes propagate even messages they've already seen,
|
|
||||||
// while regular nodes do not. Thus this ping-ponging will
|
|
||||||
// cause the supernodes to pick new starting points for
|
|
||||||
// peer to peer graph traversal multiple times. It's a
|
|
||||||
// simple, stateless way to increase supernode-driven
|
|
||||||
// propagation of a multicast in the event that peer to
|
|
||||||
// peer connectivity for its group is sparse.
|
|
||||||
if (chosen < max) {
|
|
||||||
Address avoid[2];
|
|
||||||
avoid[0] = originalSubmitter;
|
|
||||||
avoid[1] = upstream;
|
|
||||||
P peer = topology.getBestSupernode(avoid,2,true);
|
|
||||||
if (peer)
|
if (peer)
|
||||||
peers[chosen++] = peer;
|
peers[chosen++] = peer;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue