mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-06-05 03:53:44 +02:00
Added flow-awareness check for policies, more work on ZT_MULTIPATH_ACTIVE_BACKUP
This commit is contained in:
parent
b0a91c0187
commit
5453cab22b
5 changed files with 134 additions and 93 deletions
|
@ -491,7 +491,7 @@
|
|||
/**
|
||||
* How long before we consider a path to be dead in rapid fail-over scenarios
|
||||
*/
|
||||
#define ZT_MULTIPATH_ACTIVE_BACKUP_RAPID_FAILOVER_PERIOD 1000
|
||||
#define ZT_MULTIPATH_ACTIVE_BACKUP_RAPID_FAILOVER_PERIOD 250
|
||||
|
||||
/**
|
||||
* Paths are considered expired if they have not sent us a real packet in this long
|
||||
|
|
|
@ -77,7 +77,8 @@ Peer::Peer(const RuntimeEnvironment *renv,const Identity &myIdentity,const Ident
|
|||
_lastAggregateStatsReport(0),
|
||||
_lastAggregateAllocation(0),
|
||||
_virtualPathCount(0),
|
||||
_roundRobinPathAssignmentIdx(0)
|
||||
_roundRobinPathAssignmentIdx(0),
|
||||
_pathAssignmentIdx(0)
|
||||
{
|
||||
if (!myIdentity.agree(peerIdentity,_key,ZT_PEER_SECRET_KEY_LENGTH))
|
||||
throw ZT_EXCEPTION_INVALID_ARGUMENT;
|
||||
|
@ -468,6 +469,7 @@ SharedPtr<Path> Peer::getAppropriatePath(int64_t now, bool includeExpired, int64
|
|||
_paths[i].p->processBackgroundPathMeasurements(now);
|
||||
}
|
||||
}
|
||||
if (RR->sw->isFlowAware()) {
|
||||
// Detect new flows and update existing records
|
||||
if (_flows.count(flowId)) {
|
||||
_flows[flowId]->lastSend = now;
|
||||
|
@ -479,6 +481,7 @@ SharedPtr<Path> Peer::getAppropriatePath(int64_t now, bool includeExpired, int64
|
|||
_flows[flowId] = newFlow;
|
||||
newFlow->assignedPath = nullptr;
|
||||
}
|
||||
}
|
||||
// Construct set of virtual paths if needed
|
||||
if (!_virtualPaths.size()) {
|
||||
constructSetOfVirtualPaths();
|
||||
|
@ -532,45 +535,64 @@ SharedPtr<Path> Peer::getAppropriatePath(int64_t now, bool includeExpired, int64
|
|||
if ((now - _paths[i].p->lastIn()) < ZT_MULTIPATH_ACTIVE_BACKUP_RAPID_FAILOVER_PERIOD) {
|
||||
bFoundHotPath = true;
|
||||
_activeBackupPath = _paths[i].p;
|
||||
_pathAssignmentIdx = i;
|
||||
_activeBackupPath->address().toString(curPathStr);
|
||||
fprintf(stderr, "selected %s as the primary active-backup path to %llx\n",
|
||||
curPathStr, this->_id.address().toInt());
|
||||
fprintf(stderr, "selected %s as the primary active-backup path to %llx (idx=%d)\n",
|
||||
curPathStr, this->_id.address().toInt(), _pathAssignmentIdx);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
char what[128];
|
||||
if ((now - _activeBackupPath->lastIn()) > ZT_MULTIPATH_ACTIVE_BACKUP_RAPID_FAILOVER_PERIOD) {
|
||||
_activeBackupPath->address().toString(curPathStr); // Record path string for later debug trace
|
||||
int16_t previousIdx = _pathAssignmentIdx;
|
||||
SharedPtr<Path> nextAlternativePath;
|
||||
// Search for a hot path, at the same time find the next path in
|
||||
// a RR sequence that seems viable to use as an alternative
|
||||
int searchCount = 0;
|
||||
while (searchCount < ZT_MAX_PEER_NETWORK_PATHS) {
|
||||
_pathAssignmentIdx++;
|
||||
if (_pathAssignmentIdx == ZT_MAX_PEER_NETWORK_PATHS) {
|
||||
_pathAssignmentIdx = 0;
|
||||
}
|
||||
searchCount++;
|
||||
if (_paths[_pathAssignmentIdx].p) {
|
||||
_paths[_pathAssignmentIdx].p->address().toString(what);
|
||||
if (_activeBackupPath.ptr() == _paths[_pathAssignmentIdx].p.ptr()) {
|
||||
continue;
|
||||
}
|
||||
if (!nextAlternativePath) { // Record the first viable alternative in the RR sequence
|
||||
nextAlternativePath = _paths[_pathAssignmentIdx].p;
|
||||
}
|
||||
if ((now - _paths[_pathAssignmentIdx].p->lastIn()) < ZT_MULTIPATH_ACTIVE_BACKUP_RAPID_FAILOVER_PERIOD) {
|
||||
bFoundHotPath = true;
|
||||
_activeBackupPath = _paths[_pathAssignmentIdx].p;
|
||||
_activeBackupPath->address().toString(newPathStr);
|
||||
fprintf(stderr, "primary active-backup path %s to %llx appears to be dead, switched to %s\n",
|
||||
curPathStr, this->_id.address().toInt(), newPathStr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!bFoundHotPath) {
|
||||
if (nextAlternativePath) {
|
||||
_activeBackupPath = nextAlternativePath;
|
||||
_activeBackupPath->address().toString(curPathStr);
|
||||
//fprintf(stderr, "no hot paths found to use as active-backup primary to %llx, using next best: %s\n",
|
||||
// this->_id.address().toInt(), curPathStr);
|
||||
}
|
||||
else {
|
||||
// No change
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!_activeBackupPath) {
|
||||
return SharedPtr<Path>();
|
||||
}
|
||||
if (!bFoundHotPath) {
|
||||
_activeBackupPath->address().toString(curPathStr);
|
||||
fprintf(stderr, "no hot paths available to to use as active-backup primary to %llx, selected %s anyway\n",
|
||||
this->_id.address().toInt(), curPathStr);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if ((now - _activeBackupPath->lastIn()) > ZT_MULTIPATH_ACTIVE_BACKUP_RAPID_FAILOVER_PERIOD) {
|
||||
_activeBackupPath->address().toString(curPathStr);
|
||||
/* Fail-over to the fist path that appears to still be active.
|
||||
* This will eventually be user-configurable */
|
||||
for (int i=0; i<ZT_MAX_PEER_NETWORK_PATHS; i++) {
|
||||
if (_paths[i].p) {
|
||||
if (_activeBackupPath.ptr() == _paths[i].p.ptr()) {
|
||||
continue;
|
||||
}
|
||||
if ((now - _paths[i].p->lastIn()) < ZT_MULTIPATH_ACTIVE_BACKUP_RAPID_FAILOVER_PERIOD) {
|
||||
bFoundHotPath = true;
|
||||
_activeBackupPath->address().toString(curPathStr); // Record path string for later debug trace
|
||||
_activeBackupPath = _paths[i].p;
|
||||
_activeBackupPath->address().toString(newPathStr);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (bFoundHotPath) {
|
||||
fprintf(stderr, "primary active-backup path %s to %llx appears to be dead, switched to path %s\n",
|
||||
curPathStr, this->_id.address().toInt(), newPathStr);
|
||||
}
|
||||
}
|
||||
}
|
||||
return _activeBackupPath;
|
||||
}
|
||||
|
||||
|
@ -866,6 +888,7 @@ inline void Peer::processBackgroundPeerTasks(const int64_t now)
|
|||
}
|
||||
|
||||
// Remove old flows
|
||||
if (RR->sw->isFlowAware()) {
|
||||
std::map<int64_t, struct Flow *>::iterator it = _flows.begin();
|
||||
while (it != _flows.end()) {
|
||||
if ((now - it->second->lastSend) > ZT_MULTIPATH_FLOW_EXPIRATION) {
|
||||
|
@ -877,6 +900,7 @@ inline void Peer::processBackgroundPeerTasks(const int64_t now)
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Peer::sendACK(void *tPtr,const SharedPtr<Path> &path,const int64_t localSocket,const InetAddress &atAddress,int64_t now)
|
||||
{
|
||||
|
|
|
@ -727,6 +727,7 @@ private:
|
|||
int16_t _roundRobinPathAssignmentIdx;
|
||||
|
||||
SharedPtr<Path> _activeBackupPath;
|
||||
int16_t _pathAssignmentIdx;
|
||||
};
|
||||
|
||||
} // namespace ZeroTier
|
||||
|
|
|
@ -284,6 +284,14 @@ static bool _ipv6GetPayload(const uint8_t *frameData,unsigned int frameLen,unsig
|
|||
return false; // overflow == invalid
|
||||
}
|
||||
|
||||
bool Switch::isFlowAware()
|
||||
{
|
||||
int mode = RR->node->getMultipathMode();
|
||||
return (( mode == ZT_MULTIPATH_BALANCE_RR_FLOW)
|
||||
|| (mode == ZT_MULTIPATH_BALANCE_XOR_FLOW)
|
||||
|| (mode == ZT_MULTIPATH_BALANCE_DYNAMIC_FLOW));
|
||||
}
|
||||
|
||||
void Switch::onLocalEthernet(void *tPtr,const SharedPtr<Network> &network,const MAC &from,const MAC &to,unsigned int etherType,unsigned int vlanId,const void *data,unsigned int len)
|
||||
{
|
||||
if (!network->hasConfig())
|
||||
|
@ -309,8 +317,10 @@ void Switch::onLocalEthernet(void *tPtr,const SharedPtr<Network> &network,const
|
|||
* preferred virtual path and will be sent out according to what the multipath logic
|
||||
* deems appropriate. An example of this would be an ICMP packet.
|
||||
*/
|
||||
|
||||
int64_t flowId = -1;
|
||||
|
||||
if (isFlowAware()) {
|
||||
if (etherType == ZT_ETHERTYPE_IPV4 && (len >= 20)) {
|
||||
uint16_t srcPort = 0;
|
||||
uint16_t dstPort = 0;
|
||||
|
@ -366,6 +376,7 @@ void Switch::onLocalEthernet(void *tPtr,const SharedPtr<Network> &network,const
|
|||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (to.isMulticast()) {
|
||||
MulticastGroup multicastGroup(to,0);
|
||||
|
|
|
@ -91,6 +91,11 @@ public:
|
|||
*/
|
||||
void onRemotePacket(void *tPtr,const int64_t localSocket,const InetAddress &fromAddr,const void *data,unsigned int len);
|
||||
|
||||
/**
|
||||
* Returns whether our bonding or balancing policy is aware of flows.
|
||||
*/
|
||||
bool isFlowAware();
|
||||
|
||||
/**
|
||||
* Called when a packet comes from a local Ethernet tap
|
||||
*
|
||||
|
|
Loading…
Add table
Reference in a new issue