mirror of
https://github.com/zerotier/ZeroTierOne.git
synced 2025-06-03 19:13:43 +02:00
A ton of build fixes.
This commit is contained in:
parent
2acd67fbc1
commit
0edb7776c7
16 changed files with 210 additions and 232 deletions
|
@ -75,3 +75,48 @@ pub(crate) fn hash32(mut x: u32) -> u32 {
|
|||
x = x.wrapping_mul(0x846ca68b);
|
||||
x ^ x.wrapping_shr(16)
|
||||
}
|
||||
|
||||
/// A hasher for maps that just returns u64 values as-is.
|
||||
///
|
||||
/// This should be used only for things like ZeroTier addresses that are already random
|
||||
/// and that aren't vulnerable to malicious crafting of identifiers.
|
||||
#[derive(Copy, Clone)]
|
||||
pub(crate) struct U64PassThroughHasher(u64);
|
||||
|
||||
impl U64PassThroughHasher {
|
||||
#[inline(always)]
|
||||
pub fn new() -> Self {
|
||||
Self(0)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::hash::Hasher for U64PassThroughHasher {
|
||||
#[inline(always)]
|
||||
fn finish(&self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn write(&mut self, _: &[u8]) {
|
||||
panic!("U64PassThroughHasher can only be used with u64 and i64");
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn write_u64(&mut self, i: u64) {
|
||||
self.0 += i;
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn write_i64(&mut self, i: i64) {
|
||||
self.0 += i as u64;
|
||||
}
|
||||
}
|
||||
|
||||
impl std::hash::BuildHasher for U64PassThroughHasher {
|
||||
type Hasher = Self;
|
||||
|
||||
#[inline(always)]
|
||||
fn build_hasher(&self) -> Self::Hasher {
|
||||
Self(0)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,8 +36,9 @@ impl<O: Reusable> Pooled<O> {
|
|||
pub unsafe fn into_raw(self) -> *mut O {
|
||||
debug_assert!(!self.0.is_null());
|
||||
debug_assert_eq!(self.0.cast::<u8>(), (&mut (*self.0).obj as *mut O).cast::<u8>());
|
||||
let ptr = self.0.cast::<O>();
|
||||
std::mem::forget(self);
|
||||
self.0.cast()
|
||||
ptr
|
||||
}
|
||||
|
||||
/// Restore a raw pointer from into_raw() into a Pooled object.
|
||||
|
|
|
@ -75,7 +75,7 @@ impl Default for Address {
|
|||
impl Hash for Address {
|
||||
#[inline(always)]
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.0.hash(state)
|
||||
state.write_u64(self.0);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -221,7 +221,7 @@ impl<const L: usize> Buffer<L> {
|
|||
/// Get a structure at a given position in the buffer.
|
||||
#[inline(always)]
|
||||
pub fn struct_at<T: RawObject>(&self, ptr: usize) -> std::io::Result<&T> {
|
||||
if (i + size_of::<T>()) <= self.0 {
|
||||
if (ptr + size_of::<T>()) <= self.0 {
|
||||
unsafe {
|
||||
Ok(&*self.1.as_ptr().cast::<u8>().offset(ptr as isize).cast::<T>())
|
||||
}
|
||||
|
@ -233,7 +233,7 @@ impl<const L: usize> Buffer<L> {
|
|||
/// Get a structure at a given position in the buffer.
|
||||
#[inline(always)]
|
||||
pub fn struct_mut_at<T: RawObject>(&mut self, ptr: usize) -> std::io::Result<&mut T> {
|
||||
if (i + size_of::<T>()) <= self.0 {
|
||||
if (ptr + size_of::<T>()) <= self.0 {
|
||||
unsafe {
|
||||
Ok(&mut *self.1.as_mut_ptr().cast::<u8>().offset(ptr as isize).cast::<T>())
|
||||
}
|
||||
|
|
|
@ -40,9 +40,6 @@ pub const PACKET_VERB_INDEX: usize = 27;
|
|||
/// Index of destination in both fragment and full packet headers.
|
||||
pub const PACKET_DESTINATION_INDEX: usize = 8;
|
||||
|
||||
/// Maximum number of paths to a remote peer.
|
||||
pub const PEER_MAX_PATHS: usize = 16;
|
||||
|
||||
/// Mask to select cipher from header flags field.
|
||||
pub const HEADER_FLAGS_FIELD_MASK_CIPHER: u8 = 0x30;
|
||||
|
||||
|
@ -79,7 +76,7 @@ pub const FRAGMENT_SIZE_MIN: usize = 16;
|
|||
pub const FRAGMENT_HEADER_SIZE: usize = 16;
|
||||
|
||||
/// Maximum allowed number of fragments.
|
||||
pub const FRAGMENT_COUNT_MAX: usize = 16;
|
||||
pub const FRAGMENT_COUNT_MAX: usize = 8;
|
||||
|
||||
/// Index of packet fragment indicator byte to detect fragments.
|
||||
pub const FRAGMENT_INDICATOR_INDEX: usize = 13;
|
||||
|
@ -87,8 +84,9 @@ pub const FRAGMENT_INDICATOR_INDEX: usize = 13;
|
|||
/// Byte found at FRAGMENT_INDICATOR_INDEX to indicate a fragment.
|
||||
pub const FRAGMENT_INDICATOR: u8 = 0xff;
|
||||
|
||||
/// Maximum number of inbound fragments to handle at once per path.
|
||||
pub const FRAGMENT_MAX_PER_PATH: usize = 64;
|
||||
/// Maximum number of inbound fragmented packets to handle at once per path.
|
||||
/// This is a sanity limit to prevent memory exhaustion due to DOS attacks or broken peers.
|
||||
pub const FRAGMENT_MAX_INBOUND_PACKETS_PER_PATH: usize = 256;
|
||||
|
||||
/// Time after which an incomplete fragmented packet expires.
|
||||
pub const FRAGMENT_EXPIRATION: i64 = 1500;
|
||||
|
|
|
@ -333,7 +333,7 @@ impl ToString for Endpoint {
|
|||
Endpoint::IpUdp(ip) => format!("udp:{}", ip.to_string()),
|
||||
Endpoint::IpTcp(ip) => format!("tcp:{}", ip.to_string()),
|
||||
Endpoint::Http(url) => url.clone(),
|
||||
Endpoint::WebRTC(offer) => format!("webrtc:{}", urlencoding::encode(offer.as_str())),
|
||||
Endpoint::WebRTC(offer) => format!("webrtc:{}", urlencoding::encode(String::from_utf8_lossy(offer.as_slice()).as_ref())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,62 +1,27 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use crate::vl1::node::PacketBuffer;
|
||||
use crate::vl1::constants::FRAGMENT_COUNT_MAX;
|
||||
use crate::vl1::Path;
|
||||
use crate::vl1::protocol::PacketID;
|
||||
|
||||
/// Packet fragment re-assembler and container.
|
||||
/// This is only used in the receive path.
|
||||
pub(crate) struct FragmentedPacket {
|
||||
pub id: PacketID,
|
||||
pub ts_ticks: i64,
|
||||
pub frags: [Option<PacketBuffer>; FRAGMENT_COUNT_MAX],
|
||||
pub have: u8,
|
||||
pub expecting: u8,
|
||||
}
|
||||
|
||||
impl Default for FragmentedPacket {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
id: 0,
|
||||
ts_ticks: -1,
|
||||
frags: [None; FRAGMENT_COUNT_MAX],
|
||||
have: 0,
|
||||
expecting: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FragmentedPacket {
|
||||
/// Return fragments to pool and reset id and ts_ticks to 0 and -1 respectively.
|
||||
#[inline(always)]
|
||||
pub fn clear(&mut self) {
|
||||
self.id = 0;
|
||||
self.ts_ticks = -1;
|
||||
self.frags.fill(None);
|
||||
}
|
||||
|
||||
/// Initialize for a new packet and log the first fragment.
|
||||
/// This will panic if 'no' is out of bounds.
|
||||
#[inline(always)]
|
||||
pub fn first_fragment(&mut self, id: PacketID, ts_ticks: i64, frag: PacketBuffer, no: u8, expecting: u8) {
|
||||
self.id = id;
|
||||
self.ts_ticks = ts_ticks;
|
||||
let _ = self.frags[no as usize].replace(frag);
|
||||
self.have = 1;
|
||||
self.expecting = expecting;
|
||||
}
|
||||
|
||||
/// Add a fragment to this fragment set and return true if the packet appears complete.
|
||||
/// This will panic if 'no' is out of bounds.
|
||||
#[inline(always)]
|
||||
pub fn add_fragment(&mut self, frag: PacketBuffer, no: u8, expecting: u8) -> bool {
|
||||
if self.frags[no as usize].replace(frag).is_none() {
|
||||
self.have = self.have.wrapping_add(1);
|
||||
self.expecting |= expecting; // in valid streams expecting is either 0 or the (same) total
|
||||
self.have == self.expecting
|
||||
} else {
|
||||
false
|
||||
if no < FRAGMENT_COUNT_MAX as u8 {
|
||||
if self.frags[no as usize].replace(frag).is_none() {
|
||||
self.have = self.have.wrapping_add(1);
|
||||
self.expecting |= expecting; // in valid streams expecting is either 0 or the (same) total
|
||||
return self.have == self.expecting;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ use crate::vl1::Address;
|
|||
use crate::vl1::buffer::Buffer;
|
||||
use crate::crypto::c25519::{C25519_PUBLIC_KEY_SIZE, ED25519_PUBLIC_KEY_SIZE, C25519_SECRET_KEY_SIZE, ED25519_SECRET_KEY_SIZE, C25519KeyPair, Ed25519KeyPair, ED25519_SIGNATURE_SIZE};
|
||||
use crate::crypto::p521::{P521KeyPair, P521PublicKey, P521_ECDSA_SIGNATURE_SIZE, P521_PUBLIC_KEY_SIZE, P521_SECRET_KEY_SIZE};
|
||||
use crate::crypto::hash::{SHA384, SHA512, SHA512_HASH_SIZE, SHA384_HASH_SIZE};
|
||||
use crate::crypto::hash::{SHA384, SHA512, SHA512_HASH_SIZE};
|
||||
use crate::crypto::balloon;
|
||||
use crate::crypto::salsa::Salsa;
|
||||
use crate::crypto::secret::Secret;
|
||||
|
|
|
@ -38,8 +38,6 @@ pub const AF_INET: u8 = libc::AF_INET as u8;
|
|||
#[cfg(not(windows))]
|
||||
pub const AF_INET6: u8 = libc::AF_INET6 as u8;
|
||||
|
||||
const NO_IP: [u8; 0] = [];
|
||||
|
||||
#[repr(u8)]
|
||||
pub enum IpScope {
|
||||
None = 0,
|
||||
|
@ -53,8 +51,12 @@ pub enum IpScope {
|
|||
}
|
||||
|
||||
/// An IPv4 or IPv6 socket address that directly encapsulates C sockaddr types.
|
||||
///
|
||||
/// The ZeroTier core uses this in preference to std::net stuff so this can be
|
||||
/// directly used via the C API or with C socket I/O functions.
|
||||
///
|
||||
/// Unfortunately this is full of unsafe because it's a union, but the code is
|
||||
/// not complex and doesn't allocate anything.
|
||||
#[repr(C)]
|
||||
pub union InetAddress {
|
||||
sa: sockaddr,
|
||||
|
@ -208,7 +210,7 @@ impl InetAddress {
|
|||
match self.sa.sa_family as u8 {
|
||||
AF_INET => &*(&self.sin.sin_addr.s_addr as *const u32).cast::<[u8; 4]>(),
|
||||
AF_INET6 => &*(&(self.sin6.sin6_addr) as *const in6_addr).cast::<[u8; 16]>(),
|
||||
_ => &NO_IP
|
||||
_ => &[]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -557,12 +559,14 @@ impl Hash for InetAddress {
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::str::FromStr;
|
||||
use crate::vl1::inetaddress::InetAddress;
|
||||
use crate::vl1::inetaddress::{InetAddress, sockaddr_storage};
|
||||
use std::mem::size_of;
|
||||
|
||||
#[test]
|
||||
fn layout() {
|
||||
unsafe {
|
||||
// Make sure union is laid out such that all the sockaddr structures overlap.
|
||||
assert_eq!(size_of::<sockaddr_storage>(), size_of::<InetAddress>());
|
||||
|
||||
let mut tmp = InetAddress::new();
|
||||
tmp.sa.sa_family = 0xab;
|
||||
if tmp.sin.sin_family != 0xab {
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
use std::cmp::Ordering;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
use crate::vl1::{Address, Endpoint, Identity};
|
||||
|
@ -123,7 +122,7 @@ impl Locator {
|
|||
|
||||
#[inline(always)]
|
||||
pub fn marshal<const BL: usize>(&self, buf: &mut Buffer<BL>) -> std::io::Result<()> {
|
||||
marshal_internal(buf, false)
|
||||
self.marshal_internal(buf, false)
|
||||
}
|
||||
|
||||
pub fn unmarshal<const BL: usize>(buf: &Buffer<BL>, cursor: &mut usize) -> std::io::Result<Self> {
|
||||
|
|
|
@ -54,7 +54,7 @@ impl Default for MAC {
|
|||
impl Hash for MAC {
|
||||
#[inline(always)]
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.0.hash(state)
|
||||
state.write_u64(self.0);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
use std::hash::Hash;
|
||||
use std::marker::PhantomData;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
@ -16,7 +14,7 @@ use crate::vl1::buffer::Buffer;
|
|||
use crate::vl1::constants::PACKET_SIZE_MAX;
|
||||
use crate::vl1::path::Path;
|
||||
use crate::vl1::peer::Peer;
|
||||
use crate::vl1::protocol::{FragmentHeader, is_fragment, PacketHeader, PacketID};
|
||||
use crate::vl1::protocol::*;
|
||||
use crate::vl1::whois::WhoisQueue;
|
||||
|
||||
/// Standard packet buffer type including pool container.
|
||||
|
@ -100,7 +98,7 @@ pub trait VL1CallerInterface {
|
|||
}
|
||||
|
||||
/// Trait implemented by VL2 to handle messages after they are unwrapped by VL1.
|
||||
pub(crate) trait VL1PacketHandler {
|
||||
pub trait VL1PacketHandler {
|
||||
/// Handle a packet, returning true if the verb was recognized.
|
||||
/// True should be returned even if the packet is not valid, since the return value is used
|
||||
/// to determine if this is a VL2 or VL1 packet. ERROR and OK should not be handled here but
|
||||
|
@ -135,8 +133,6 @@ impl Node {
|
|||
/// If the auto-generate identity type is not None, a new identity will be generated if
|
||||
/// no identity is currently stored in the data store.
|
||||
pub fn new<CI: VL1CallerInterface>(ci: &CI, auto_generate_identity_type: Option<crate::vl1::identity::Type>) -> Result<Self, InvalidParameterError> {
|
||||
crate::crypto::init(); // make sure this is initialized, okay to call more than once
|
||||
|
||||
let id = {
|
||||
let id_str = ci.load_identity();
|
||||
if id_str.is_none() {
|
||||
|
@ -209,7 +205,7 @@ impl Node {
|
|||
/// This should only be called once at a time. It technically won't hurt anything to
|
||||
/// call concurrently but it will waste CPU cycles.
|
||||
pub fn do_background_tasks<CI: VL1CallerInterface>(&self, ci: &CI) -> Duration {
|
||||
let intervals = self.intervals.lock();
|
||||
let mut intervals = self.intervals.lock();
|
||||
let tt = ci.time_ticks();
|
||||
|
||||
if intervals.whois.gate(tt) {
|
||||
|
@ -249,7 +245,7 @@ impl Node {
|
|||
let p = Arc::new(Path::new(ep.clone(), local_socket, local_interface));
|
||||
self.paths.insert(ep.clone(), p.clone()).unwrap_or(p) // if another thread added one, return that instead
|
||||
}, |path| {
|
||||
path.clone()
|
||||
path.value().clone()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,25 +1,22 @@
|
|||
use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicI64, Ordering};
|
||||
|
||||
use crate::vl1::Endpoint;
|
||||
use crate::vl1::constants::{FRAGMENT_COUNT_MAX, FRAGMENT_EXPIRATION};
|
||||
use crate::vl1::fragmentedpacket::FragmentedPacket;
|
||||
use crate::vl1::protocol::{FragmentHeader, PacketID};
|
||||
use crate::vl1::node::PacketBuffer;
|
||||
|
||||
use parking_lot::Mutex;
|
||||
|
||||
struct RxState {
|
||||
last_receive_time_ticks: i64,
|
||||
fragmented_packet_count: usize,
|
||||
fragmented_packets: [FragmentedPacket; FRAGMENT_COUNT_MAX],
|
||||
}
|
||||
use crate::util::U64PassThroughHasher;
|
||||
use crate::vl1::constants::*;
|
||||
use crate::vl1::Endpoint;
|
||||
use crate::vl1::fragmentedpacket::FragmentedPacket;
|
||||
use crate::vl1::node::{PacketBuffer, VL1CallerInterface};
|
||||
use crate::vl1::protocol::PacketID;
|
||||
|
||||
pub struct Path {
|
||||
pub(crate) endpoint: Endpoint,
|
||||
pub(crate) local_socket: i64,
|
||||
pub(crate) local_interface: i64,
|
||||
last_send_time_ticks: AtomicI64,
|
||||
rxs: Mutex<RxState>,
|
||||
last_receive_time_ticks: AtomicI64,
|
||||
fragmented_packets: Mutex<HashMap<u64, FragmentedPacket, U64PassThroughHasher>>,
|
||||
}
|
||||
|
||||
impl Path {
|
||||
|
@ -30,11 +27,8 @@ impl Path {
|
|||
local_socket,
|
||||
local_interface,
|
||||
last_send_time_ticks: AtomicI64::new(0),
|
||||
rxs: Mutex::new(RxState {
|
||||
last_receive_time_ticks: 0,
|
||||
fragmented_packet_count: 0,
|
||||
fragmented_packets: [FragmentedPacket::default(); FRAGMENT_COUNT_MAX]
|
||||
})
|
||||
last_receive_time_ticks: AtomicI64::new(0),
|
||||
fragmented_packets: Mutex::new(HashMap::with_capacity_and_hasher(8, U64PassThroughHasher::new())),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -45,75 +39,55 @@ impl Path {
|
|||
|
||||
#[inline(always)]
|
||||
pub fn send_receive_time_ticks(&self) -> i64 {
|
||||
self.rxs.lock().last_receive_time_ticks
|
||||
self.last_receive_time_ticks.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Receive a fragment and invoke the handler if a packet appears fully assembled.
|
||||
/// This also updates last receive time, etc.
|
||||
/// Receive a fragment and return a FragmentedPacket if the entire packet is assembled.
|
||||
#[inline(always)]
|
||||
pub(crate) fn receive_fragment<F: FnOnce(&mut FragmentedPacket)>(&self, packet_id: PacketID, fragment_no: u8, fragment_expecting_count: u8, packet: PacketBuffer, time_ticks: i64, assembled_packet_handler: F) {
|
||||
if fragment_no < FRAGMENT_COUNT_MAX as u8 {
|
||||
let mut rxs = self.rxs.lock();
|
||||
rxs.last_receive_time_ticks = time_ticks;
|
||||
pub(crate) fn receive_fragment(&self, packet_id: PacketID, fragment_no: u8, fragment_expecting_count: u8, packet: PacketBuffer, time_ticks: i64) -> Option<FragmentedPacket> {
|
||||
self.last_receive_time_ticks.store(time_ticks, Ordering::Relaxed);
|
||||
|
||||
// In most situlations this algorithms runs right through and doesn't need to iterate.
|
||||
// If there are no fragments fpcnt will be 0 and the first loop will skip. If there are
|
||||
// no fragments then the second loop won't be needed either since the first slot will
|
||||
// be open. Looping only happens when there are multiple fragments in flight, which is
|
||||
// not a common scenario for peer-to-peer links. The maximum iteration count in the
|
||||
// worst case is only 2*FRAGMENT_COUNT_MAX and the loops are only doing integer
|
||||
// comparisons, so the worst case is still linear.
|
||||
let mut fp = self.fragmented_packets.lock();
|
||||
|
||||
let mut fragmented_packets_to_check = rxs.fragmented_packet_count;
|
||||
let mut i = 0;
|
||||
while fragmented_packets_to_check > 0 {
|
||||
let mut f = &mut rxs.fragmented_packets[i];
|
||||
if f.id == packet_id {
|
||||
if f.add_fragment(packet, fragment_no, fragment_expecting_count) {
|
||||
assembled_packet_handler(f);
|
||||
f.clear();
|
||||
rxs.fragmented_packet_count -= 1;
|
||||
}
|
||||
return;
|
||||
} else if f.ts_ticks >= 0 {
|
||||
if (time_ticks - f.ts_ticks) > FRAGMENT_EXPIRATION {
|
||||
f.clear();
|
||||
rxs.fragmented_packet_count -= 1;
|
||||
}
|
||||
fragmented_packets_to_check -= 1;
|
||||
}
|
||||
i += 1;
|
||||
// This is mostly a defense against denial of service attacks or broken peers. It will
|
||||
// trim off about 1/3 of waiting packets if the total is over the limit.
|
||||
let fps = fp.len();
|
||||
if fps > FRAGMENT_MAX_INBOUND_PACKETS_PER_PATH {
|
||||
let mut entries: Vec<(i64, u64)> = Vec::new();
|
||||
entries.reserve(fps);
|
||||
for f in fp.iter() {
|
||||
entries.push((f.1.ts_ticks, *f.0));
|
||||
}
|
||||
|
||||
let mut oldest_ts = &mut rxs.fragmented_packets[0];
|
||||
let mut oldest_ts_ticks = oldest_ts.ts_ticks;
|
||||
if oldest_ts_ticks >= 0 {
|
||||
for fidx in 1..FRAGMENT_COUNT_MAX {
|
||||
let ts = &mut rxs.fragmented_packets[fidx];
|
||||
let tst = ts.ts_ticks;
|
||||
if tst < oldest_ts_ticks {
|
||||
oldest_ts = ts;
|
||||
oldest_ts_ticks = tst;
|
||||
if tst < 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
entries.sort_unstable_by(|a, b| (*a).0.cmp(&(*b).0));
|
||||
for i in 0..(fps / 3) {
|
||||
let _ = fp.remove(&(*unsafe { entries.get_unchecked(i) }).1);
|
||||
}
|
||||
}
|
||||
|
||||
if oldest_ts_ticks < 0 {
|
||||
rxs.fragmented_packet_count += 1;
|
||||
} else {
|
||||
oldest_ts.clear();
|
||||
}
|
||||
rxs.fragmented_packets[oldest_idx].init(packet_id, time_ticks, packet, fragment_no, fragment_expecting_count);
|
||||
let frag = fp.entry(packet_id).or_insert_with(|| FragmentedPacket {
|
||||
ts_ticks: time_ticks,
|
||||
frags: [None, None, None, None, None, None, None, None],
|
||||
have: 0,
|
||||
expecting: 0,
|
||||
});
|
||||
|
||||
if frag.add_fragment(packet, fragment_no, fragment_expecting_count) {
|
||||
fp.remove(&packet_id)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Register receipt of "anything" else which right now includes unfragmented packets and keepalives.
|
||||
#[inline(always)]
|
||||
pub(crate) fn receive_other(&self, time_ticks: i64) {
|
||||
self.rxs.lock().last_receive_time_ticks = time_ticks;
|
||||
self.last_receive_time_ticks.store(time_ticks, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn do_background_tasks<CI: VL1CallerInterface>(&self, ct: &CI) {
|
||||
let time_ticks = ct.time_ticks();
|
||||
self.fragmented_packets.lock().retain(|packet_id, frag| (time_ticks - frag.ts_ticks) < FRAGMENT_EXPIRATION);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
use std::ops::DerefMut;
|
||||
use std::sync::Arc;
|
||||
|
||||
use parking_lot::Mutex;
|
||||
use aes_gmac_siv::AesGmacSiv;
|
||||
|
||||
use crate::crypto::c25519::C25519KeyPair;
|
||||
use crate::crypto::hash::SHA384_HASH_SIZE;
|
||||
use crate::crypto::kbkdf::zt_kbkdf_hmac_sha384;
|
||||
use crate::crypto::p521::P521KeyPair;
|
||||
use crate::crypto::poly1305::Poly1305;
|
||||
|
@ -15,7 +13,6 @@ use crate::crypto::secret::Secret;
|
|||
use crate::vl1::{Identity, Path};
|
||||
use crate::vl1::buffer::Buffer;
|
||||
use crate::vl1::constants::*;
|
||||
use crate::vl1::fragmentedpacket::FragmentedPacket;
|
||||
use crate::vl1::node::*;
|
||||
use crate::vl1::protocol::*;
|
||||
|
||||
|
@ -66,8 +63,8 @@ struct TxState {
|
|||
// The current ephemeral key pair we will share with HELLO.
|
||||
ephemeral_pair: Option<EphemeralKeyPair>,
|
||||
|
||||
// Paths to this peer sorted in descending order of quality with None entries at the end.
|
||||
paths: [Option<Arc<Path>>; PEER_MAX_PATHS],
|
||||
// Paths to this peer sorted in ascending order of path quality.
|
||||
paths: Vec<Arc<Path>>,
|
||||
}
|
||||
|
||||
struct RxState {
|
||||
|
@ -158,14 +155,14 @@ impl Peer {
|
|||
last_send_time_ticks: 0,
|
||||
packet_iv_counter: next_u64_secure(),
|
||||
total_bytes: 0,
|
||||
static_secret: TxSecret {
|
||||
static_secret: PeerSecrets {
|
||||
create_time_ticks: -1,
|
||||
usage_count: 0,
|
||||
encrypt_count: 0,
|
||||
secret: static_secret.clone(),
|
||||
aes: AesGmacSiv::new(&aes_k0.0, &aes_k1.0),
|
||||
},
|
||||
ephemeral_secret: None,
|
||||
paths: [None; PEER_MAX_PATHS],
|
||||
paths: Vec::with_capacity(4),
|
||||
ephemeral_pair: None,
|
||||
}),
|
||||
rx: Mutex::new(RxState {
|
||||
|
@ -197,100 +194,101 @@ impl Peer {
|
|||
// When handling incoming packets we try any current ephemeral secret first, and if that
|
||||
// fails we fall back to the static secret. If decryption with an ephemeral secret succeeds
|
||||
// the forward secrecy flag in the receive path is set.
|
||||
let mut secret = rx.ephemeral_secret.as_mut().unwrap_or(&mut rx.static_secret);
|
||||
loop {
|
||||
match header.cipher() {
|
||||
CIPHER_NOCRYPT_POLY1305 => {
|
||||
// Only HELLO is allowed in the clear (but still authenticated).
|
||||
if (packet_frag0_payload_bytes[0] & VERB_MASK) == VERB_VL1_HELLO {
|
||||
let _ = payload.append_bytes(packet_frag0_payload_bytes);
|
||||
let forward_secrecy = {
|
||||
let mut secret = &mut rx.static_secret;
|
||||
loop {
|
||||
match header.cipher() {
|
||||
CIPHER_NOCRYPT_POLY1305 => {
|
||||
// Only HELLO is allowed in the clear (but still authenticated).
|
||||
if (packet_frag0_payload_bytes[0] & VERB_MASK) == VERB_VL1_HELLO {
|
||||
let _ = payload.append_bytes(packet_frag0_payload_bytes);
|
||||
|
||||
for f in fragments.iter() {
|
||||
let _ = f.as_ref().map(|f| {
|
||||
let _ = f.as_bytes_after(FRAGMENT_HEADER_SIZE).map(|f| {
|
||||
let _ = payload.append_bytes(f);
|
||||
for f in fragments.iter() {
|
||||
let _ = f.as_ref().map(|f| {
|
||||
let _ = f.as_bytes_after(FRAGMENT_HEADER_SIZE).map(|f| {
|
||||
let _ = payload.append_bytes(f);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// FIPS note: for FIPS purposes the HMAC-SHA384 tag at the end of V2 HELLOs
|
||||
// will be considered the "real" handshake authentication.
|
||||
// FIPS note: for FIPS purposes the HMAC-SHA384 tag at the end of V2 HELLOs
|
||||
// will be considered the "real" handshake authentication.
|
||||
let key = salsa_derive_per_packet_key(&secret.secret, header, payload.len());
|
||||
let mut salsa = Salsa::new(&key.0[0..32], header.id_bytes(), true).unwrap();
|
||||
let mut poly1305_key = [0_u8; 32];
|
||||
salsa.crypt_in_place(&mut poly1305_key);
|
||||
let mut poly = Poly1305::new(&poly1305_key).unwrap();
|
||||
poly.update(packet_frag0_payload_bytes);
|
||||
|
||||
if poly.finish()[0..8].eq(&header.message_auth) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CIPHER_SALSA2012_POLY1305 => {
|
||||
// FIPS note: support for this mode would have to be disabled in FIPS compliant
|
||||
// modes of operation.
|
||||
let key = salsa_derive_per_packet_key(&secret.secret, header, payload.len());
|
||||
let mut salsa = Salsa::new(&key.0[0..32], header.id_bytes(), true).unwrap();
|
||||
let mut poly1305_key = [0_u8; 32];
|
||||
salsa.crypt_in_place(&mut poly1305_key);
|
||||
let mut poly = Poly1305::new(&poly1305_key).unwrap();
|
||||
|
||||
poly.update(packet_frag0_payload_bytes);
|
||||
let _ = payload.append_and_init_bytes(packet_frag0_payload_bytes.len(), |b| salsa.crypt(packet_frag0_payload_bytes, b));
|
||||
for f in fragments.iter() {
|
||||
let _ = f.as_ref().map(|f| {
|
||||
let _ = f.as_bytes_after(FRAGMENT_HEADER_SIZE).map(|f| {
|
||||
poly.update(f);
|
||||
let _ = payload.append_and_init_bytes(f.len(), |b| salsa.crypt(f, b));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
if poly.finish()[0..8].eq(&header.message_auth) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CIPHER_SALSA2012_POLY1305 => {
|
||||
// FIPS note: support for this mode would have to be disabled in FIPS compliant
|
||||
// modes of operation.
|
||||
let key = salsa_derive_per_packet_key(&secret.secret, header, payload.len());
|
||||
let mut salsa = Salsa::new(&key.0[0..32], header.id_bytes(), true).unwrap();
|
||||
let mut poly1305_key = [0_u8; 32];
|
||||
salsa.crypt_in_place(&mut poly1305_key);
|
||||
let mut poly = Poly1305::new(&poly1305_key).unwrap();
|
||||
CIPHER_AES_GMAC_SIV => {
|
||||
secret.aes.reset();
|
||||
secret.aes.decrypt_init(&header.aes_gmac_siv_tag());
|
||||
secret.aes.decrypt_set_aad(&header.aad_bytes());
|
||||
|
||||
poly.update(packet_frag0_payload_bytes);
|
||||
let _ = payload.append_and_init_bytes(packet_frag0_payload_bytes.len(), |b| salsa.crypt(packet_frag0_payload_bytes, b));
|
||||
for f in fragments.iter() {
|
||||
let _ = f.as_ref().map(|f| {
|
||||
let _ = f.as_bytes_after(FRAGMENT_HEADER_SIZE).map(|f| {
|
||||
poly.update(f);
|
||||
let _ = payload.append_and_init_bytes(f.len(), |b| salsa.crypt(f, b));
|
||||
let _ = payload.append_and_init_bytes(packet_frag0_payload_bytes.len(), |b| secret.aes.decrypt(packet_frag0_payload_bytes, b));
|
||||
for f in fragments.iter() {
|
||||
let _ = f.as_ref().map(|f| {
|
||||
let _ = f.as_bytes_after(FRAGMENT_HEADER_SIZE).map(|f| {
|
||||
let _ = payload.append_and_init_bytes(f.len(), |b| secret.aes.decrypt(f, b));
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
if secret.aes.decrypt_finish() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if poly.finish()[0..8].eq(&header.message_auth) {
|
||||
break;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
CIPHER_AES_GMAC_SIV => {
|
||||
secret.aes.reset();
|
||||
secret.aes.decrypt_init(&header.aes_gmac_siv_tag());
|
||||
secret.aes.decrypt_set_aad(&header.aad_bytes());
|
||||
|
||||
let _ = payload.append_and_init_bytes(packet_frag0_payload_bytes.len(), |b| secret.aes.decrypt(packet_frag0_payload_bytes, b));
|
||||
for f in fragments.iter() {
|
||||
let _ = f.as_ref().map(|f| {
|
||||
let _ = f.as_bytes_after(FRAGMENT_HEADER_SIZE).map(|f| {
|
||||
let _ = payload.append_and_init_bytes(f.len(), |b| secret.aes.decrypt(f, b));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
if secret.aes.decrypt_finish() {
|
||||
break;
|
||||
}
|
||||
if (secret as *const PeerSecrets) != (&rx.static_secret as *const PeerSecrets) {
|
||||
payload.clear();
|
||||
secret = &mut rx.static_secret;
|
||||
} else {
|
||||
// Both ephemeral (if any) and static secret have failed, drop packet.
|
||||
return;
|
||||
}
|
||||
|
||||
_ => {}
|
||||
}
|
||||
|
||||
if (secret as *const PeerSecrets) != (&rx.static_secret as *const PeerSecrets) {
|
||||
payload.clear();
|
||||
secret = &mut rx.static_secret;
|
||||
} else {
|
||||
// Both ephemeral (if any) and static secret have failed, drop packet.
|
||||
return;
|
||||
}
|
||||
}
|
||||
(secret as *const PeerSecrets) != (&(rx.static_secret) as *const PeerSecrets)
|
||||
};
|
||||
|
||||
// If we make it here we've successfully decrypted and authenticated the packet.
|
||||
|
||||
rx.last_receive_time_ticks = time_ticks;
|
||||
rx.total_bytes += payload.len() as u64;
|
||||
|
||||
let forward_secrecy = (secret as *const PeerSecrets) != (&(rx.static_secret) as *const PeerSecrets);
|
||||
|
||||
// Unlock rx state mutex.
|
||||
drop(rx);
|
||||
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
use std::intrinsics::size_of;
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
use crate::vl1::Address;
|
||||
use crate::vl1::buffer::{Buffer, RawObject};
|
||||
use crate::vl1::buffer::RawObject;
|
||||
use crate::vl1::constants::*;
|
||||
|
||||
pub const VERB_VL1_NOP: u8 = 0x00;
|
||||
|
@ -174,6 +173,6 @@ mod tests {
|
|||
flags_cipher_hops: 0,
|
||||
message_auth: [0_u8; 8],
|
||||
};
|
||||
assert_eq!(bar.id_bytes(), [1_u8, 2, 3, 4, 5, 6, 7, 8]);
|
||||
assert_eq!(bar.id_bytes().clone(), [1_u8, 2, 3, 4, 5, 6, 7, 8]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,9 +14,9 @@ pub(crate) enum QueuedPacket {
|
|||
}
|
||||
|
||||
struct WhoisQueueItem {
|
||||
packet_queue: Vec<QueuedPacket>,
|
||||
retry_gate: IntervalGate<{ WHOIS_RETRY_INTERVAL }>,
|
||||
retry_count: u16,
|
||||
packet_queue: Vec<QueuedPacket>
|
||||
}
|
||||
|
||||
pub(crate) struct WhoisQueue {
|
||||
|
@ -34,19 +34,18 @@ impl WhoisQueue {
|
|||
|
||||
pub fn query<CI: VL1CallerInterface>(&self, node: &Node, ci: &CI, target: Address, packet: Option<QueuedPacket>) {
|
||||
let mut q = self.queue.lock();
|
||||
if q.get_mut(&target).map_or_else(|| {
|
||||
q.insert(target, WhoisQueueItem {
|
||||
retry_gate: IntervalGate::new(ci.time_ticks()),
|
||||
retry_count: 1,
|
||||
packet_queue: packet.map_or_else(|| Vec::new(), |p| vec![p]),
|
||||
});
|
||||
true
|
||||
}, |qi| {
|
||||
let g = qi.retry_gate(ci.time_ticks());
|
||||
qi.retry_count += g as u16;
|
||||
let _ = packet.map(|p| qi.packet_queue.push(p));
|
||||
g
|
||||
}) {
|
||||
|
||||
let qi = q.entry(target).or_insert_with(|| WhoisQueueItem {
|
||||
packet_queue: Vec::new(),
|
||||
retry_gate: IntervalGate::new(0),
|
||||
retry_count: 0,
|
||||
});
|
||||
|
||||
if qi.retry_gate.gate(ci.time_ticks()) {
|
||||
qi.retry_count += 1;
|
||||
if packet.is_some() {
|
||||
qi.packet_queue.push(packet.unwrap());
|
||||
}
|
||||
self.send_whois(node, ci, &[target]);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue