Move some stuff around, cleanup.

This commit is contained in:
Adam Ierymenko 2021-08-18 21:29:44 -04:00
parent 7f64dd34b8
commit 40941a25f7
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
14 changed files with 249 additions and 232 deletions

View file

@ -312,14 +312,17 @@ impl AesGmacSiv {
}
}
/// Finish decryption and return true if authentication appears valid.
/// If this returns false the message should be dropped.
/// Finish decryption and returns the decrypted tag if the message appears valid.
#[inline(always)]
pub fn decrypt_finish(&mut self) -> bool {
pub fn decrypt_finish(&mut self) -> Option<&[u8; 16]> {
unsafe {
CCCryptorGCMFinalize(self.gmac, self.tmp.as_mut_ptr().cast(), 16);
let tmp = self.tmp.as_mut_ptr().cast::<u64>();
*self.tag.as_mut_ptr().cast::<u64>().offset(1) == *tmp ^ *tmp.offset(1)
if *self.tag.as_mut_ptr().cast::<u64>().offset(1) == *tmp ^ *tmp.offset(1) {
Some(&self.tag)
} else {
None
}
}
}
}

View file

@ -3,7 +3,7 @@ use std::str::FromStr;
use crate::error::InvalidFormatError;
use crate::util::hex::HEX_CHARS;
use crate::vl1::constants::{ADDRESS_RESERVED_PREFIX, ADDRESS_SIZE};
use crate::vl1::protocol::{ADDRESS_RESERVED_PREFIX, ADDRESS_SIZE};
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Address(u64);
@ -96,6 +96,7 @@ impl From<[u8; ADDRESS_SIZE]> for Address {
impl From<u64> for Address {
#[inline(always)]
fn from(i: u64) -> Address {
debug_assert!((i >> 24) == 0);
Address(i)
}
}

View file

@ -83,22 +83,6 @@ impl<const L: usize> Buffer<L> {
self.0 == 0
}
/// Explicitly set the size of the data in this buffer, returning an error on overflow.
/// If the new size is larger than the old size, the new space is zeroed.
#[inline(always)]
pub fn set_size(&mut self, new_size: usize) -> std::io::Result<()> {
if new_size <= L {
let old_size = self.0;
self.0 = new_size;
if old_size < new_size {
self.1[old_size..new_size].fill(0);
}
Ok(())
} else {
Err(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, OVERFLOW_ERR_MSG))
}
}
/// Append a packed structure and call a function to initialize it in place.
/// Anything not initialized will be zero.
#[inline(always)]

View file

@ -1,64 +0,0 @@
/// Length of an address in bytes.
pub const ADDRESS_SIZE: usize = 5;
/// Prefix indicating reserved addresses (that can't actually be addresses).
pub const ADDRESS_RESERVED_PREFIX: u8 = 0xff;
/// KBKDF usage label indicating a key used to encrypt the dictionary inside HELLO.
pub const KBKDF_KEY_USAGE_LABEL_HELLO_DICTIONARY_ENCRYPT: u8 = b'H';
/// KBKDF usage label indicating a key used to HMAC packets, which is currently only used for HELLO.
pub const KBKDF_KEY_USAGE_LABEL_PACKET_HMAC: u8 = b'M';
/// KBKDF usage label for the first AES-GMAC-SIV key.
pub const KBKDF_KEY_USAGE_LABEL_AES_GMAC_SIV_K0: u8 = b'0';
/// KBKDF usage label for the second AES-GMAC-SIV key.
pub const KBKDF_KEY_USAGE_LABEL_AES_GMAC_SIV_K1: u8 = b'1';
/// KBKDF usage label for acknowledgement of a shared secret.
pub const KBKDF_KEY_USAGE_LABEL_EPHEMERAL_ACK: u8 = b'A';
/// Size of packet header that lies outside the encryption envelope.
pub const PACKET_HEADER_SIZE: usize = 27;
/// Maximum packet payload size including the verb/flags field.
///
/// This is large enough to carry "jumbo MTU" packets. The exact
/// value is because 10005+27 == 10032 which is divisible by 16. This
/// improves memory layout and alignment when buffers are allocated.
/// This value could technically be increased but it would require a
/// protocol version bump and only new nodes would be able to accept
/// the new size.
pub const PACKET_PAYLOAD_SIZE_MAX: usize = 10005;
/// Minimum packet, which is the header plus a verb.
pub const PACKET_SIZE_MIN: usize = PACKET_HEADER_SIZE + 1;
/// Maximum size of an entire packet.
pub const PACKET_SIZE_MAX: usize = PACKET_HEADER_SIZE + PACKET_PAYLOAD_SIZE_MAX;
/// Maximum number of inbound fragmented packets to handle at once per path.
/// This is a sanity limit to prevent memory exhaustion due to DOS attacks or broken peers.
pub const FRAGMENT_MAX_INBOUND_PACKETS_PER_PATH: usize = 256;
/// Time after which an incomplete fragmented packet expires.
pub const FRAGMENT_EXPIRATION: i64 = 1500;
/// Frequency for WHOIS retries
pub const WHOIS_RETRY_INTERVAL: i64 = 1000;
/// Maximum number of WHOIS retries
pub const WHOIS_RETRY_MAX: u16 = 3;
/// Maximum number of packets to queue up behind a WHOIS.
pub const WHOIS_MAX_WAITING_PACKETS: usize = 64;
/// Maximum number of endpoints allowed in a Locator.
pub const LOCATOR_MAX_ENDPOINTS: usize = 32;
/// Keepalive interval for paths in milliseconds.
pub const PATH_KEEPALIVE_INTERVAL: i64 = 20000;
/// Interval for servicing and background operations on peers.
pub const PEER_SERVICE_INTERVAL: i64 = 30000;

View file

@ -1,5 +1,5 @@
use crate::vl1::node::PacketBuffer;
use crate::vl1::protocol::FRAGMENT_COUNT_MAX;
use crate::vl1::protocol::*;
/// Packet fragment re-assembler and container.
/// This is only used in the receive path.

View file

@ -15,7 +15,7 @@ use crate::crypto::secret::Secret;
use crate::error::InvalidFormatError;
use crate::vl1::Address;
use crate::vl1::buffer::Buffer;
use crate::vl1::constants::PACKET_SIZE_MAX;
use crate::vl1::protocol::PACKET_SIZE_MAX;
use concat_arrays::concat_arrays;

View file

@ -2,7 +2,10 @@ use std::hash::{Hash, Hasher};
use crate::vl1::{Address, Endpoint, Identity};
use crate::vl1::buffer::Buffer;
use crate::vl1::constants::{LOCATOR_MAX_ENDPOINTS, PACKET_SIZE_MAX};
use crate::vl1::protocol::PACKET_SIZE_MAX;
/// Maximum number of endpoints allowed in a Locator.
pub const LOCATOR_MAX_ENDPOINTS: usize = 32;
/// A signed object generated by nodes to inform the network where they may be found.
///

View file

@ -9,7 +9,6 @@ pub(crate) mod mac;
pub(crate) mod fragmentedpacket;
pub(crate) mod whois;
pub mod constants;
pub mod identity;
pub mod inetaddress;
pub mod endpoint;

View file

@ -11,11 +11,11 @@ use crate::util::gate::IntervalGate;
use crate::util::pool::{Pool, Pooled};
use crate::vl1::{Address, Endpoint, Identity, Locator};
use crate::vl1::buffer::{Buffer, PooledBufferFactory};
use crate::vl1::constants::*;
use crate::vl1::path::Path;
use crate::vl1::peer::Peer;
use crate::vl1::protocol::*;
use crate::vl1::whois::{WhoisQueue, QueuedPacket};
use crate::vl1::rootset::RootSet;
/// Standard packet buffer type including pool container.
pub type PacketBuffer = Pooled<Buffer<{ PACKET_SIZE_MAX }>, PooledBufferFactory<{ PACKET_SIZE_MAX }>>;
@ -116,6 +116,7 @@ pub struct Node {
paths: DashMap<Endpoint, Arc<Path>>,
peers: DashMap<Address, Arc<Peer>>,
roots: Mutex<Vec<Arc<Peer>>>,
root_sets: Mutex<Vec<RootSet>>,
whois: WhoisQueue,
buffer_pool: Pool<Buffer<{ PACKET_SIZE_MAX }>, PooledBufferFactory<{ PACKET_SIZE_MAX }>>,
secure_prng: SecureRandom,
@ -158,6 +159,7 @@ impl Node {
paths: DashMap::new(),
peers: DashMap::new(),
roots: Mutex::new(Vec::new()),
root_sets: Mutex::new(Vec::new()),
whois: WhoisQueue::new(),
buffer_pool: Pool::new(64, PooledBufferFactory),
secure_prng: SecureRandom::get(),
@ -210,10 +212,7 @@ impl Node {
self.roots.lock().first().map(|p| p.clone())
}
/// Determine if a given peer is a root.
pub(crate) fn is_root(&self, peer: &Peer) -> bool {
let pptr = peer as *const Peer;
self.roots.lock().iter().any(|p| Arc::as_ptr(p) == pptr)
pub(crate) fn for_each_root_set(&self) {
}
/// Run background tasks and return desired delay until next call in milliseconds.
@ -257,6 +256,7 @@ impl Node {
if dest == self.identity.address() {
let path = self.path(source_endpoint, source_local_socket, source_local_interface);
path.log_receive(time_ticks);
if fragment_header.is_fragment() {
let _ = path.receive_fragment(fragment_header.id, fragment_header.fragment_no(), fragment_header.total_fragments(), data, time_ticks).map(|assembled_packet| {
@ -278,7 +278,6 @@ impl Node {
} else {
path.receive_other(time_ticks);
let packet_header = data.struct_at::<PacketHeader>(0);
if packet_header.is_ok() {
let packet_header = packet_header.unwrap();
@ -309,7 +308,7 @@ impl Node {
return;
}
}
let _ = self.peer(dest).map(|peer| peer.forward(ci, time_ticks, data));
let _ = self.peer(dest).map(|peer| peer.forward(ci, time_ticks, data.as_ref()));
}
}

View file

@ -4,12 +4,14 @@ use std::sync::atomic::{AtomicI64, Ordering};
use parking_lot::Mutex;
use crate::util::U64PassThroughHasher;
use crate::vl1::constants::*;
use crate::vl1::Endpoint;
use crate::vl1::fragmentedpacket::FragmentedPacket;
use crate::vl1::fragmentedpacket::{FragmentedPacket, FRAGMENT_EXPIRATION, FRAGMENT_MAX_INBOUND_PACKETS_PER_PATH};
use crate::vl1::node::{PacketBuffer, VL1CallerInterface};
use crate::vl1::protocol::PacketID;
/// Keepalive interval for paths in milliseconds.
pub(crate) const PATH_KEEPALIVE_INTERVAL: i64 = 20000;
/// A remote endpoint paired with a local socket and a local interface.
/// These are maintained in Node and canonicalized so that all unique paths have
/// one and only one unique path object. That enables statistics to be tracked
@ -38,13 +40,28 @@ impl Path {
}
}
#[inline(always)]
pub fn endpoint(&self) -> &Endpoint {
&self.endpoint
}
#[inline(always)]
pub fn local_socket(&self) -> i64 {
self.local_socket
}
#[inline(always)]
pub fn local_interface(&self) -> i64 {
self.local_interface
}
#[inline(always)]
pub fn last_send_time_ticks(&self) -> i64 {
self.last_send_time_ticks.load(Ordering::Relaxed)
}
#[inline(always)]
pub fn send_receive_time_ticks(&self) -> i64 {
pub fn last_receive_time_ticks(&self) -> i64 {
self.last_receive_time_ticks.load(Ordering::Relaxed)
}
@ -52,8 +69,6 @@ impl Path {
/// This returns None if more fragments are needed to assemble the packet.
#[inline(always)]
pub(crate) fn receive_fragment(&self, packet_id: PacketID, fragment_no: u8, fragment_expecting_count: u8, packet: PacketBuffer, time_ticks: i64) -> Option<FragmentedPacket> {
self.last_receive_time_ticks.store(time_ticks, Ordering::Relaxed);
let mut fp = self.fragmented_packets.lock();
// This is mostly a defense against denial of service attacks or broken peers. It will
@ -78,15 +93,19 @@ impl Path {
}
}
/// Register receipt of "anything" else which right now includes unfragmented packets and keepalives.
#[inline(always)]
pub(crate) fn receive_other(&self, time_ticks: i64) {
pub(crate) fn log_receive(&self, time_ticks: i64) {
self.last_receive_time_ticks.store(time_ticks, Ordering::Relaxed);
}
#[inline(always)]
pub(crate) fn log_send(&self, time_ticks: i64) {
self.last_send_time_ticks.store(time_ticks, Ordering::Relaxed);
}
/// Called every INTERVAL during background tasks.
#[inline(always)]
pub fn on_interval<CI: VL1CallerInterface>(&self, ct: &CI, time_ticks: i64) {
pub(crate) fn on_interval<CI: VL1CallerInterface>(&self, ct: &CI, time_ticks: i64) {
self.fragmented_packets.lock().retain(|packet_id, frag| (time_ticks - frag.ts_ticks) < FRAGMENT_EXPIRATION);
}
}

View file

@ -1,5 +1,6 @@
use std::convert::TryInto;
use std::mem::MaybeUninit;
use std::ptr::copy_nonoverlapping;
use std::sync::Arc;
use std::sync::atomic::{AtomicI64, AtomicU64, AtomicU8, Ordering};
@ -20,10 +21,12 @@ use crate::defaults::UDP_DEFAULT_MTU;
use crate::util::pool::{Pool, PoolFactory};
use crate::vl1::{Dictionary, Endpoint, Identity, InetAddress, Path};
use crate::vl1::buffer::Buffer;
use crate::vl1::constants::*;
use crate::vl1::node::*;
use crate::vl1::protocol::*;
/// Interval for servicing and background operations on peers.
pub(crate) const PEER_SERVICE_INTERVAL: i64 = 30000;
struct AesGmacSivPoolFactory(Secret<48>, Secret<48>);
impl PoolFactory<AesGmacSiv> for AesGmacSivPoolFactory {
@ -106,7 +109,7 @@ pub struct Peer {
total_bytes_forwarded: AtomicU64,
// Counter for assigning packet IV's a.k.a. PacketIDs.
packet_iv_counter: AtomicU64,
packet_id_counter: AtomicU64,
// Remote peer version information.
remote_version: AtomicU64,
@ -180,21 +183,17 @@ impl Peer {
total_bytes_received: AtomicU64::new(0),
total_bytes_received_indirect: AtomicU64::new(0),
total_bytes_forwarded: AtomicU64::new(0),
packet_iv_counter: AtomicU64::new(next_u64_secure()),
packet_id_counter: AtomicU64::new(next_u64_secure()),
remote_version: AtomicU64::new(0),
remote_protocol_version: AtomicU8::new(0),
}
})
}
/// Get the next packet initialization vector.
///
/// For Salsa20/12 with Poly1305 this is the packet ID. For AES-GMAC-SIV the packet ID is
/// not known until the packet is encrypted, since it's the first 64 bits of the GMAC-SIV
/// tag.
/// Get the next packet ID / IV.
#[inline(always)]
pub(crate) fn next_packet_iv(&self) -> PacketID {
self.packet_iv_counter.fetch_add(1, Ordering::Relaxed)
pub(crate) fn next_packet_id(&self) -> PacketID {
self.packet_id_counter.fetch_add(1, Ordering::Relaxed)
}
/// Receive, decrypt, authenticate, and process an incoming packet from this peer.
@ -204,8 +203,9 @@ impl Peer {
let _ = packet.as_bytes_starting_at(PACKET_VERB_INDEX).map(|packet_frag0_payload_bytes| {
let mut payload: Buffer<{ PACKET_SIZE_MAX }> = Buffer::new();
let mut forward_secrecy = true; // set to false below if ephemeral fails
let mut packet_id = header.id as u64;
let cipher = header.cipher();
let mut forward_secrecy = true;
let ephemeral_secret = self.ephemeral_secret.lock().clone();
for secret in [ephemeral_secret.as_ref().map_or(&self.static_secret, |s| s.as_ref()), &self.static_secret] {
match cipher {
@ -254,13 +254,16 @@ impl Peer {
for f in fragments.iter() {
let _ = f.as_ref().map(|f| f.as_bytes_starting_at(FRAGMENT_HEADER_SIZE).map(|f| payload.append_and_init_bytes(f.len(), |b| aes.decrypt(f, b))));
}
if aes.decrypt_finish() {
let tag = aes.decrypt_finish();
if tag.is_some() {
// For AES-GMAC-SIV we need to grab the original packet ID from the decrypted tag.
let tag = tag.unwrap();
unsafe { copy_nonoverlapping(tag.as_ptr(), (&mut packet_id as *mut u64).cast(), 8) };
break;
}
}
_ => {
// Unrecognized or unsupported cipher type.
return;
}
}
@ -272,7 +275,7 @@ impl Peer {
} else {
// If ephemeral failed, static secret will be tried. Set forward secrecy to false.
forward_secrecy = false;
let _ = payload.set_size(0);
payload.clear();
}
}
drop(ephemeral_secret);
@ -281,11 +284,12 @@ impl Peer {
// for loop and end up here. Otherwise it returns from the whole function.
self.last_receive_time_ticks.store(time_ticks, Ordering::Relaxed);
let _ = self.total_bytes_received.fetch_add((payload.len() + PACKET_HEADER_SIZE) as u64, Ordering::Relaxed);
self.total_bytes_received.fetch_add((payload.len() + PACKET_HEADER_SIZE) as u64, Ordering::Relaxed);
let _ = payload.u8_at(0).map(|verb| {
// For performance reasons we let VL2 handle packets first. It returns false
// if it didn't handle the packet, in which case it's handled at VL1.
let verb = verb & VERB_MASK;
if !ph.handle_packet(self, source_path, forward_secrecy, verb, &payload) {
match verb {
//VERB_VL1_NOP => {}
@ -304,68 +308,62 @@ impl Peer {
});
}
/// Get current best path or None if there are no direct paths to this peer.
#[inline(always)]
pub(crate) fn best_path(&self) -> Option<Arc<Path>> {
self.paths.lock().last().map(|p| p.clone())
}
/// Send a packet as one or more UDP fragments.
///
/// Calling this with anything other than a UDP endpoint is invalid.
fn send_udp<CI: VL1CallerInterface>(&self, ci: &CI, endpoint: &Endpoint, local_socket: Option<i64>, local_interface: Option<i64>, packet_id: PacketID, data: &Buffer<{ PACKET_SIZE_MAX }>) -> bool {
debug_assert!(matches!(endpoint, Endpoint::IpUdp(_)));
debug_assert!(data.len() <= PACKET_SIZE_MAX);
let packet_size = data.len();
if packet_size > UDP_DEFAULT_MTU {
let bytes = data.as_bytes();
if !ci.wire_send(endpoint, local_socket, local_interface, &[&bytes[0..UDP_DEFAULT_MTU]], 0) {
return false;
}
let mut pos = UDP_DEFAULT_MTU;
let fragment_count = (((packet_size - UDP_DEFAULT_MTU) as u32) / ((UDP_DEFAULT_MTU - FRAGMENT_HEADER_SIZE) as u32)) + ((((packet_size - UDP_DEFAULT_MTU) as u32) % ((UDP_DEFAULT_MTU - FRAGMENT_HEADER_SIZE) as u32)) != 0) as u32;
debug_assert!(fragment_count <= FRAGMENT_COUNT_MAX as u32);
let mut header = FragmentHeader {
id: packet_id,
dest: bytes[PACKET_DESTINATION_INDEX..PACKET_DESTINATION_INDEX + ADDRESS_SIZE].try_into().unwrap(),
fragment_indicator: FRAGMENT_INDICATOR,
total_and_fragment_no: ((fragment_count + 1) << 4) as u8,
reserved_hops: 0,
};
let mut chunk_size = (packet_size - pos).min(UDP_DEFAULT_MTU - FRAGMENT_HEADER_SIZE);
loop {
header.total_and_fragment_no += 1;
let next_pos = pos + chunk_size;
if !ci.wire_send(endpoint, local_socket, local_interface, &[header.as_bytes(), &bytes[pos..next_pos]], 0) {
fn send_to_endpoint<CI: VL1CallerInterface>(&self, ci: &CI, endpoint: &Endpoint, local_socket: Option<i64>, local_interface: Option<i64>, packet_id: PacketID, packet: &Buffer<{ PACKET_SIZE_MAX }>) -> bool {
debug_assert!(packet.len() <= PACKET_SIZE_MAX);
if matches!(endpoint, Endpoint::IpUdp(_)) {
let packet_size = packet.len();
if packet_size > UDP_DEFAULT_MTU {
let bytes = packet.as_bytes();
if !ci.wire_send(endpoint, local_socket, local_interface, &[&bytes[0..UDP_DEFAULT_MTU]], 0) {
return false;
}
pos = next_pos;
if pos < packet_size {
chunk_size = (packet_size - pos).min(UDP_DEFAULT_MTU - FRAGMENT_HEADER_SIZE);
} else {
break;
let mut pos = UDP_DEFAULT_MTU;
let fragment_count = (((packet_size - UDP_DEFAULT_MTU) as u32) / ((UDP_DEFAULT_MTU - FRAGMENT_HEADER_SIZE) as u32)) + ((((packet_size - UDP_DEFAULT_MTU) as u32) % ((UDP_DEFAULT_MTU - FRAGMENT_HEADER_SIZE) as u32)) != 0) as u32;
debug_assert!(fragment_count <= FRAGMENT_COUNT_MAX as u32);
let mut header = FragmentHeader {
id: packet_id,
dest: bytes[PACKET_DESTINATION_INDEX..PACKET_DESTINATION_INDEX + ADDRESS_SIZE].try_into().unwrap(),
fragment_indicator: FRAGMENT_INDICATOR,
total_and_fragment_no: ((fragment_count + 1) << 4) as u8,
reserved_hops: 0,
};
let mut chunk_size = (packet_size - pos).min(UDP_DEFAULT_MTU - FRAGMENT_HEADER_SIZE);
loop {
header.total_and_fragment_no += 1;
let next_pos = pos + chunk_size;
if !ci.wire_send(endpoint, local_socket, local_interface, &[header.as_bytes(), &bytes[pos..next_pos]], 0) {
return false;
}
pos = next_pos;
if pos < packet_size {
chunk_size = (packet_size - pos).min(UDP_DEFAULT_MTU - FRAGMENT_HEADER_SIZE);
} else {
return true;
}
}
}
return true;
}
return ci.wire_send(endpoint, local_socket, local_interface, &[data.as_bytes()], 0);
return ci.wire_send(endpoint, local_socket, local_interface, &[packet.as_bytes()], 0);
}
/// Send a packet to this peer.
///
/// This will go directly if there is an active path, or otherwise indirectly
/// via a root or some other route.
pub(crate) fn send<CI: VL1CallerInterface>(&self, ci: &CI, time_ticks: i64, data: PacketBuffer) {
self.last_send_time_ticks.store(time_ticks, Ordering::Relaxed);
let _ = self.total_bytes_sent.fetch_add(data.len() as u64, Ordering::Relaxed);
todo!()
pub(crate) fn send<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, packet_id: PacketID, packet: &Buffer<{ PACKET_SIZE_MAX }>) -> bool {
self.path(node).map_or(false, |path| {
if self.send_to_endpoint(ci, &path.endpoint, Some(path.local_socket), Some(path.local_interface), packet_id, packet) {
self.last_send_time_ticks.store(time_ticks, Ordering::Relaxed);
self.total_bytes_sent.fetch_add(packet.len() as u64, Ordering::Relaxed);
true
} else {
false
}
})
}
/// Forward a packet to this peer.
@ -373,34 +371,34 @@ impl Peer {
/// This is called when we receive a packet not addressed to this node and
/// want to pass it along.
///
/// This doesn't support fragmenting since fragments are forwarded individually.
/// This doesn't fragment large packets since fragments are forwarded individually.
/// Intermediates don't need to adjust fragmentation.
pub(crate) fn forward<CI: VL1CallerInterface>(&self, ci: &CI, time_ticks: i64, data: PacketBuffer) {
self.last_forward_time_ticks.store(time_ticks, Ordering::Relaxed);
let _ = self.total_bytes_forwarded.fetch_add(data.len() as u64, Ordering::Relaxed);
todo!()
pub(crate) fn forward<CI: VL1CallerInterface>(&self, ci: &CI, time_ticks: i64, packet: &Buffer<{ PACKET_SIZE_MAX }>) -> bool {
self.direct_path().map_or(false, |path| {
if ci.wire_send(&path.endpoint, Some(path.local_socket), Some(path.local_interface), &[packet.as_bytes()], 0) {
self.last_forward_time_ticks.store(time_ticks, Ordering::Relaxed);
self.total_bytes_forwarded.fetch_add(packet.len() as u64, Ordering::Relaxed);
true
} else {
false
}
})
}
/// Send a HELLO to this peer.
///
/// If try_new_endpoint is not None the packet will be sent directly to this endpoint.
/// Otherwise it will be sent via the best direct or indirect path.
pub(crate) fn send_hello<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, try_new_endpoint: Option<Endpoint>) {
let path = if try_new_endpoint.is_none() {
self.best_path().map_or_else(|| {
node.root().map_or(None, |root| {
root.best_path().map_or(None, |bp| Some(bp))
})
}, |bp| Some(bp))
} else {
None
};
let _ = try_new_endpoint.as_ref().map_or_else(|| Some(&path.as_ref().unwrap().endpoint), |ep| Some(ep)).map(|endpoint| {
///
/// This has its own send logic so it can handle either an explicit endpoint or a
/// known one.
pub(crate) fn send_hello<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, explicit_endpoint: Option<Endpoint>) -> bool {
let path = if explicit_endpoint.is_none() { self.path(node) } else { None };
explicit_endpoint.as_ref().map_or_else(|| Some(&path.as_ref().unwrap().endpoint), |ep| Some(ep)).map_or(false, |endpoint| {
let mut packet: Buffer<{ PACKET_SIZE_MAX }> = Buffer::new();
let this_peer_is_root = node.is_root(self);
let time_ticks = ci.time_ticks();
let packet_id = self.next_packet_iv();
let packet_id = self.next_packet_id();
debug_assert!(packet.append_and_init_struct(|header: &mut PacketHeader| {
header.id = packet_id;
header.dest = self.identity.address().to_bytes();
@ -413,7 +411,7 @@ impl Peer {
header.version_major = VERSION_MAJOR;
header.version_minor = VERSION_MINOR;
header.version_revision = (VERSION_REVISION as u16).to_be();
header.timestamp = (ci.time_ticks() as u64).to_be();
header.timestamp = (time_ticks as u64).to_be();
}).is_ok());
debug_assert!(self.identity.marshal(&mut packet, false).is_ok());
@ -421,10 +419,18 @@ impl Peer {
let aes_ctr_iv_position = packet.len();
debug_assert!(packet.append_and_init_bytes_fixed(|iv: &mut [u8; 18]| {
crate::crypto::random::fill_bytes_secure(&mut iv[0..12]);
todo!()
crate::crypto::random::fill_bytes_secure(&mut iv[0..16]);
iv[12] &= 0x7f; // mask off MSB of counter in iv to play nice with some AES-CTR implementations
// LEGACY: create a 16-bit encrypted field that specifies zero moons. This is ignored by v2
// but causes v1 nodes to be able to parse this packet properly. This is not significant in
// terms of encryption or authentication.
let mut salsa_iv = packet_id.to_ne_bytes();
salsa_iv[7] &= 0xf8;
Salsa::new(&self.static_secret.secret.0[0..32], &salsa_iv, true).unwrap().crypt(&[0_u8, 0_u8], &mut salsa_iv[16..18]);
}).is_ok());
let dictionary_position = packet.len();
let dict_start_position = packet.len();
let mut dict = Dictionary::new();
dict.set_u64(HELLO_DICT_KEY_INSTANCE_ID, node.instance_id);
dict.set_u64(HELLO_DICT_KEY_CLOCK, ci.time_clock() as u64);
@ -437,7 +443,7 @@ impl Peer {
dict.set_bytes(HELLO_DICT_KEY_EPHEMERAL_C25519, ephemeral_pair.c25519.public_bytes().to_vec());
dict.set_bytes(HELLO_DICT_KEY_EPHEMERAL_P521, ephemeral_pair.p521.public_key_bytes().to_vec());
});
if this_peer_is_root {
if node.is_root(self) {
// If the peer is a root we include some extra information for diagnostic and statistics
// purposes such as the CPU type, bits, and OS info. This is not sent to other peers.
dict.set_str(HELLO_DICT_KEY_SYS_ARCH, std::env::consts::ARCH);
@ -460,8 +466,8 @@ impl Peer {
debug_assert!(dict.write_to(&mut packet).is_ok());
let mut dict_aes = self.static_secret_hello_dictionary.lock();
dict_aes.init(&packet.as_bytes()[aes_ctr_iv_position..aes_ctr_iv_position + 12]);
dict_aes.crypt_in_place(&mut packet.as_bytes_mut()[dictionary_position..]);
dict_aes.init(&packet.as_bytes()[aes_ctr_iv_position..aes_ctr_iv_position + 16]);
dict_aes.crypt_in_place(&mut packet.as_bytes_mut()[dict_start_position..]);
drop(dict_aes);
debug_assert!(packet.append_bytes_fixed(&SHA384::hmac(self.static_secret_packet_hmac.as_ref(), &packet.as_bytes()[PACKET_HEADER_SIZE + 1..])).is_ok());
@ -470,45 +476,55 @@ impl Peer {
poly.update(packet.as_bytes_starting_at(PACKET_HEADER_SIZE).unwrap());
packet.as_bytes_mut()[HEADER_MAC_FIELD_INDEX..HEADER_MAC_FIELD_INDEX + 8].copy_from_slice(&poly.finish()[0..8]);
self.send_udp(ci, endpoint, path.as_ref().map(|p| p.local_socket), path.as_ref().map(|p| p.local_interface), packet_id, &packet);
});
self.static_secret.encrypt_count.fetch_add(1, Ordering::Relaxed);
self.last_send_time_ticks.store(time_ticks, Ordering::Relaxed);
self.total_bytes_sent.fetch_add(packet.len() as u64, Ordering::Relaxed);
path.map_or_else(|| {
self.send_to_endpoint(ci, endpoint, None, None, packet_id, &packet)
}, |path| {
path.log_send(time_ticks);
self.send_to_endpoint(ci, endpoint, Some(path.local_socket), Some(path.local_interface), packet_id, &packet)
})
})
}
/// Called every INTERVAL during background tasks.
#[inline(always)]
pub(crate) fn on_interval<CI: VL1CallerInterface>(&self, ct: &CI, time_ticks: i64) {
}
pub(crate) fn on_interval<CI: VL1CallerInterface>(&self, ct: &CI, time_ticks: i64) {}
#[inline(always)]
fn receive_hello<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {
}
fn receive_hello<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {}
#[inline(always)]
fn receive_error<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {
}
fn receive_error<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {}
#[inline(always)]
fn receive_ok<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {
}
fn receive_ok<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {}
#[inline(always)]
fn receive_whois<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {
}
fn receive_whois<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {}
#[inline(always)]
fn receive_rendezvous<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {
}
fn receive_rendezvous<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {}
#[inline(always)]
fn receive_echo<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {
}
fn receive_echo<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {}
#[inline(always)]
fn receive_push_direct_paths<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {
}
fn receive_push_direct_paths<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {}
#[inline(always)]
fn receive_user_message<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {
fn receive_user_message<CI: VL1CallerInterface>(&self, ci: &CI, node: &Node, time_ticks: i64, source_path: &Arc<Path>, payload: &Buffer<{ PACKET_SIZE_MAX }>) {}
/// Get current best path or None if there are no direct paths to this peer.
pub fn direct_path(&self) -> Option<Arc<Path>> {
self.paths.lock().last().map(|p| p.clone())
}
/// Get either the current best direct path or an indirect path.
pub fn path(&self, node: &Node) -> Option<Arc<Path>> {
self.direct_path().map_or_else(|| node.root().map_or(None, |root| root.direct_path().map_or(None, |bp| Some(bp))), |bp| Some(bp))
}
/// Get the remote version of this peer: major, minor, revision, and build.

View file

@ -2,7 +2,6 @@ use std::mem::MaybeUninit;
use crate::vl1::Address;
use crate::vl1::buffer::RawObject;
use crate::vl1::constants::*;
pub const VERB_VL1_NOP: u8 = 0x00;
pub const VERB_VL1_HELLO: u8 = 0x01;
@ -14,20 +13,60 @@ pub const VERB_VL1_ECHO: u8 = 0x08;
pub const VERB_VL1_PUSH_DIRECT_PATHS: u8 = 0x10;
pub const VERB_VL1_USER_MESSAGE: u8 = 0x14;
pub(crate) const HELLO_DICT_KEY_INSTANCE_ID: &'static str = "I";
pub(crate) const HELLO_DICT_KEY_CLOCK: &'static str = "C";
pub(crate) const HELLO_DICT_KEY_LOCATOR: &'static str = "L";
pub(crate) const HELLO_DICT_KEY_EPHEMERAL_C25519: &'static str = "E0";
pub(crate) const HELLO_DICT_KEY_EPHEMERAL_P521: &'static str = "E1";
pub(crate) const HELLO_DICT_KEY_EPHEMERAL_ACK: &'static str = "e";
pub(crate) const HELLO_DICT_KEY_HELLO_ORIGIN: &'static str = "@";
pub(crate) const HELLO_DICT_KEY_SYS_ARCH: &'static str = "Sa";
pub(crate) const HELLO_DICT_KEY_SYS_BITS: &'static str = "Sb";
pub(crate) const HELLO_DICT_KEY_OS_NAME: &'static str = "So";
pub(crate) const HELLO_DICT_KEY_OS_VERSION: &'static str = "Sv";
pub(crate) const HELLO_DICT_KEY_OS_VARIANT: &'static str = "St";
pub(crate) const HELLO_DICT_KEY_VENDOR: &'static str = "V";
pub(crate) const HELLO_DICT_KEY_FLAGS: &'static str = "+";
pub const HELLO_DICT_KEY_INSTANCE_ID: &'static str = "I";
pub const HELLO_DICT_KEY_CLOCK: &'static str = "C";
pub const HELLO_DICT_KEY_LOCATOR: &'static str = "L";
pub const HELLO_DICT_KEY_EPHEMERAL_C25519: &'static str = "E0";
pub const HELLO_DICT_KEY_EPHEMERAL_P521: &'static str = "E1";
pub const HELLO_DICT_KEY_EPHEMERAL_ACK: &'static str = "e";
pub const HELLO_DICT_KEY_HELLO_ORIGIN: &'static str = "@";
pub const HELLO_DICT_KEY_SYS_ARCH: &'static str = "Sa";
pub const HELLO_DICT_KEY_SYS_BITS: &'static str = "Sb";
pub const HELLO_DICT_KEY_OS_NAME: &'static str = "So";
pub const HELLO_DICT_KEY_OS_VERSION: &'static str = "Sv";
pub const HELLO_DICT_KEY_OS_VARIANT: &'static str = "St";
pub const HELLO_DICT_KEY_VENDOR: &'static str = "V";
pub const HELLO_DICT_KEY_FLAGS: &'static str = "+";
/// KBKDF usage label indicating a key used to encrypt the dictionary inside HELLO.
pub const KBKDF_KEY_USAGE_LABEL_HELLO_DICTIONARY_ENCRYPT: u8 = b'H';
/// KBKDF usage label indicating a key used to HMAC packets, which is currently only used for HELLO.
pub const KBKDF_KEY_USAGE_LABEL_PACKET_HMAC: u8 = b'M';
/// KBKDF usage label for the first AES-GMAC-SIV key.
pub const KBKDF_KEY_USAGE_LABEL_AES_GMAC_SIV_K0: u8 = b'0';
/// KBKDF usage label for the second AES-GMAC-SIV key.
pub const KBKDF_KEY_USAGE_LABEL_AES_GMAC_SIV_K1: u8 = b'1';
/// KBKDF usage label for acknowledgement of a shared secret.
pub const KBKDF_KEY_USAGE_LABEL_EPHEMERAL_ACK: u8 = b'A';
/// Length of an address in bytes.
pub const ADDRESS_SIZE: usize = 5;
/// Prefix indicating reserved addresses (that can't actually be addresses).
pub const ADDRESS_RESERVED_PREFIX: u8 = 0xff;
/// Size of packet header that lies outside the encryption envelope.
pub const PACKET_HEADER_SIZE: usize = 27;
/// Minimum packet, which is the header plus a verb.
pub const PACKET_SIZE_MIN: usize = PACKET_HEADER_SIZE + 1;
/// Maximum size of an entire packet.
pub const PACKET_SIZE_MAX: usize = PACKET_HEADER_SIZE + PACKET_PAYLOAD_SIZE_MAX;
/// Maximum packet payload size including the verb/flags field.
///
/// This is large enough to carry "jumbo MTU" packets. The exact
/// value is because 10005+27 == 10032 which is divisible by 16. This
/// improves memory layout and alignment when buffers are allocated.
/// This value could technically be increased but it would require a
/// protocol version bump and only new nodes would be able to accept
/// the new size.
pub const PACKET_PAYLOAD_SIZE_MAX: usize = 10005;
/// Index of packet verb after header.
pub const PACKET_VERB_INDEX: usize = 27;
@ -76,6 +115,13 @@ pub const FRAGMENT_HEADER_SIZE: usize = 16;
/// Maximum allowed number of fragments.
pub const FRAGMENT_COUNT_MAX: usize = 8;
/// Time after which an incomplete fragmented packet expires.
pub const FRAGMENT_EXPIRATION: i64 = 1500;
/// Maximum number of inbound fragmented packets to handle at once per path.
/// This is a sanity limit to prevent memory exhaustion due to DOS attacks or broken peers.
pub const FRAGMENT_MAX_INBOUND_PACKETS_PER_PATH: usize = 256;
/// Index of packet fragment indicator byte to detect fragments.
pub const FRAGMENT_INDICATOR_INDEX: usize = 13;
@ -100,6 +146,9 @@ pub const PROTOCOL_MAX_HOPS: u8 = 7;
/// Maximum number of hops to allow.
pub const FORWARD_MAX_HOPS: u8 = 3;
/// Maximum difference between an OK in-re packet ID and the current packet ID counter.
pub const OK_PACKET_SEQUENCE_CUTOFF: u64 = 1000;
/// A unique packet identifier, also the cryptographic nonce.
///
/// Packet IDs are stored as u64s for efficiency but they should be treated as
@ -273,7 +322,6 @@ pub(crate) mod message_component_structs {
mod tests {
use std::mem::size_of;
use crate::vl1::constants::*;
use crate::vl1::protocol::*;
#[test]

View file

@ -11,7 +11,7 @@ use crate::crypto::secret::Secret;
use crate::error::InvalidFormatError;
use crate::vl1::{Endpoint, Identity};
use crate::vl1::buffer::Buffer;
use crate::vl1::constants::PACKET_SIZE_MAX;
use crate::vl1::protocol::PACKET_SIZE_MAX;
const ROOT_SET_TYPE_LEGACY_PLANET: u8 = 1;
const ROOT_SET_TYPE_LEGACY_MOON: u8 = 127;

View file

@ -8,6 +8,15 @@ use crate::vl1::constants::*;
use crate::vl1::fragmentedpacket::FragmentedPacket;
use crate::vl1::node::{Node, PacketBuffer, VL1CallerInterface};
/// Frequency for WHOIS retries
pub const WHOIS_RETRY_INTERVAL: i64 = 1000;
/// Maximum number of WHOIS retries
pub const WHOIS_RETRY_MAX: u16 = 3;
/// Maximum number of packets to queue up behind a WHOIS.
pub const WHOIS_MAX_WAITING_PACKETS: usize = 64;
pub(crate) enum QueuedPacket {
Singular(PacketBuffer),
Fragmented(FragmentedPacket)