Rename session protocol to ZSSP, merge p384 builtin code into p384.rs

This commit is contained in:
Adam Ierymenko 2022-08-28 13:32:29 -07:00
parent 2bf9521a09
commit ea5abdc3db
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
6 changed files with 1270 additions and 1151 deletions

View file

@ -12,7 +12,7 @@ lazy_static = "^1"
openssl = {version = "^0", features = [], default-features = false}
parking_lot = {version = "^0", features = [], default-features = false}
poly1305 = {version = "0.7.2", features = [], default-features = false}
pqc_kyber = {path = "../third_party/kyber", features = ["kyber512", "reference"], default-features = false}
pqc_kyber = {path = "../third_party/kyber", features = ["kyber1024", "reference"], default-features = false}
rand_core = "0.5.1"
rand_core_062 = {package = "rand_core", version = "0.6.2"}
subtle = "2.4.1"

View file

@ -5,7 +5,6 @@ pub mod aes_gmac_siv;
pub mod hash;
pub mod hex;
pub mod kbkdf;
pub mod noise;
pub mod p384;
pub mod poly1305;
pub mod random;
@ -13,7 +12,6 @@ pub mod salsa;
pub mod secret;
pub mod varint;
pub mod x25519;
mod p384_internal;
pub mod zssp;
pub const ZEROES: [u8; 16] = [0_u8; 16];

View file

@ -1,17 +1,923 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md.
#![allow(dead_code, mutable_transmutes, non_camel_case_types, non_snake_case, non_upper_case_globals, unused_assignments, unused_mut)]
pub const P384_PUBLIC_KEY_SIZE: usize = 49;
pub const P384_SECRET_KEY_SIZE: usize = 48;
pub const P384_ECDSA_SIGNATURE_SIZE: usize = 96;
pub const P384_ECDH_SHARED_SECRET_SIZE: usize = 48;
/// Version using the slightly faster code in p384_internal.rs
mod internal {
use crate::p384_internal::{ecc_make_key, ecdh_shared_secret, ecdsa_sign, ecdsa_verify};
// This is small and fast but may not be constant time and hasn't been well audited, so we don't
// use it. It's left here though in case it proves useful in the future on embedded systems.
#[cfg(target_feature = "builtin_nist_ecc")]
mod builtin {
use crate::hash::SHA384;
use crate::secret::Secret;
// EASY-ECC by Kenneth MacKay
// https://github.com/esxgx/easy-ecc (no longer there, but search GitHub for forks)
//
// Translated directly from C to Rust using: https://c2rust.com
//
// It inherits its original BSD 2-Clause license, not ZeroTier's license.
pub mod libc {
pub type c_uchar = u8;
pub type c_ulong = u64;
pub type c_long = i64;
pub type c_uint = u32;
pub type c_int = i32;
pub type c_ulonglong = u64;
pub type c_longlong = i64;
}
pub type uint8_t = libc::c_uchar;
pub type uint64_t = libc::c_ulong;
pub type uint = libc::c_uint;
pub type uint128_t = u128;
pub struct EccPoint {
pub x: [u64; 6],
pub y: [u64; 6],
}
static mut curve_p: [uint64_t; 6] = [
0xffffffff as libc::c_uint as uint64_t,
0xffffffff00000000 as libc::c_ulong,
0xfffffffffffffffe as libc::c_ulong,
0xffffffffffffffff as libc::c_ulong,
0xffffffffffffffff as libc::c_ulong,
0xffffffffffffffff as libc::c_ulong,
];
static mut curve_b: [uint64_t; 6] = [
0x2a85c8edd3ec2aef as libc::c_long as uint64_t,
0xc656398d8a2ed19d as libc::c_ulong,
0x314088f5013875a as libc::c_long as uint64_t,
0x181d9c6efe814112 as libc::c_long as uint64_t,
0x988e056be3f82d19 as libc::c_ulong,
0xb3312fa7e23ee7e4 as libc::c_ulong,
];
static mut curve_G: EccPoint = {
let mut init = EccPoint {
x: [
0x3a545e3872760ab7 as libc::c_long as uint64_t,
0x5502f25dbf55296c as libc::c_long as uint64_t,
0x59f741e082542a38 as libc::c_long as uint64_t,
0x6e1d3b628ba79b98 as libc::c_long as uint64_t,
0x8eb1c71ef320ad74 as libc::c_ulong,
0xaa87ca22be8b0537 as libc::c_ulong,
],
y: [
0x7a431d7c90ea0e5f as libc::c_long as uint64_t,
0xa60b1ce1d7e819d as libc::c_long as uint64_t,
0xe9da3113b5f0b8c0 as libc::c_ulong,
0xf8f41dbd289a147c as libc::c_ulong,
0x5d9e98bf9292dc29 as libc::c_long as uint64_t,
0x3617de4a96262c6f as libc::c_long as uint64_t,
],
};
init
};
static mut curve_n: [uint64_t; 6] = [
0xecec196accc52973 as libc::c_ulong,
0x581a0db248b0a77a as libc::c_long as uint64_t,
0xc7634d81f4372ddf as libc::c_ulong,
0xffffffffffffffff as libc::c_ulong,
0xffffffffffffffff as libc::c_ulong,
0xffffffffffffffff as libc::c_ulong,
];
#[inline(always)]
unsafe fn getRandomNumber(mut p_vli: *mut uint64_t) -> libc::c_int {
crate::random::fill_bytes_secure(&mut *std::ptr::slice_from_raw_parts_mut(p_vli.cast(), 48));
return 1 as libc::c_int;
}
#[inline(always)]
unsafe fn vli_clear(mut p_vli: *mut uint64_t) {
let mut i: uint = 0;
i = 0 as libc::c_int as uint;
while i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
*p_vli.offset(i as isize) = 0 as libc::c_int as uint64_t;
i = i.wrapping_add(1)
}
}
/* Returns 1 if p_vli == 0, 0 otherwise. */
#[inline(always)]
unsafe fn vli_isZero(mut p_vli: *mut uint64_t) -> libc::c_int {
let mut i: uint = 0;
i = 0 as libc::c_int as uint;
while i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
if *p_vli.offset(i as isize) != 0 {
return 0 as libc::c_int;
}
i = i.wrapping_add(1)
}
return 1 as libc::c_int;
}
/* Returns nonzero if bit p_bit of p_vli is set. */
#[inline(always)]
unsafe fn vli_testBit(mut p_vli: *mut uint64_t, mut p_bit: uint) -> uint64_t {
return *p_vli.offset(p_bit.wrapping_div(64 as libc::c_int as libc::c_uint) as isize) & (1 as libc::c_int as uint64_t) << p_bit.wrapping_rem(64 as libc::c_int as libc::c_uint);
}
/* Counts the number of 64-bit "digits" in p_vli. */
#[inline(always)]
unsafe fn vli_numDigits(mut p_vli: *mut uint64_t) -> uint {
let mut i: libc::c_int = 0;
/* Search from the end until we find a non-zero digit.
We do it in reverse because we expect that most digits will be nonzero. */
i = 48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int;
while i >= 0 as libc::c_int && *p_vli.offset(i as isize) == 0 as libc::c_int as libc::c_ulong {
i -= 1
}
return (i + 1 as libc::c_int) as uint;
}
/* Counts the number of bits required for p_vli. */
#[inline(always)]
unsafe fn vli_numBits(mut p_vli: *mut uint64_t) -> uint {
let mut i: uint = 0;
let mut l_digit: uint64_t = 0;
let mut l_numDigits: uint = vli_numDigits(p_vli);
if l_numDigits == 0 as libc::c_int as libc::c_uint {
return 0 as libc::c_int as uint;
}
l_digit = *p_vli.offset(l_numDigits.wrapping_sub(1 as libc::c_int as libc::c_uint) as isize);
i = 0 as libc::c_int as uint;
while l_digit != 0 {
l_digit >>= 1 as libc::c_int;
i = i.wrapping_add(1)
}
return l_numDigits.wrapping_sub(1 as libc::c_int as libc::c_uint).wrapping_mul(64 as libc::c_int as libc::c_uint).wrapping_add(i);
}
/* Sets p_dest = p_src. */
#[inline(always)]
unsafe fn vli_set(mut p_dest: *mut uint64_t, mut p_src: *mut uint64_t) {
let mut i: uint = 0;
i = 0 as libc::c_int as uint;
while i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
*p_dest.offset(i as isize) = *p_src.offset(i as isize);
i = i.wrapping_add(1)
}
}
/* Returns sign of p_left - p_right. */
#[inline(always)]
unsafe fn vli_cmp(mut p_left: *mut uint64_t, mut p_right: *mut uint64_t) -> libc::c_int {
let mut i: libc::c_int = 0;
i = 48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int;
while i >= 0 as libc::c_int {
if *p_left.offset(i as isize) > *p_right.offset(i as isize) {
return 1 as libc::c_int;
} else {
if *p_left.offset(i as isize) < *p_right.offset(i as isize) {
return -(1 as libc::c_int);
}
}
i -= 1
}
return 0 as libc::c_int;
}
/* Computes p_result = p_in << c, returning carry. Can modify in place (if p_result == p_in). 0 < p_shift < 64. */
#[inline(always)]
unsafe fn vli_lshift(mut p_result: *mut uint64_t, mut p_in: *mut uint64_t, mut p_shift: uint) -> uint64_t {
let mut l_carry: uint64_t = 0 as libc::c_int as uint64_t;
let mut i: uint = 0;
i = 0 as libc::c_int as uint;
while i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
let mut l_temp: uint64_t = *p_in.offset(i as isize);
*p_result.offset(i as isize) = l_temp << p_shift | l_carry;
l_carry = l_temp >> (64 as libc::c_int as libc::c_uint).wrapping_sub(p_shift);
i = i.wrapping_add(1)
}
return l_carry;
}
/* Computes p_vli = p_vli >> 1. */
#[inline(always)]
unsafe fn vli_rshift1(mut p_vli: *mut uint64_t) {
let mut l_end: *mut uint64_t = p_vli;
let mut l_carry: uint64_t = 0 as libc::c_int as uint64_t;
p_vli = p_vli.offset((48 as libc::c_int / 8 as libc::c_int) as isize);
loop {
let fresh0 = p_vli;
p_vli = p_vli.offset(-1);
if !(fresh0 > l_end) {
break;
}
let mut l_temp: uint64_t = *p_vli;
*p_vli = l_temp >> 1 as libc::c_int | l_carry;
l_carry = l_temp << 63 as libc::c_int
}
}
/* Computes p_result = p_left + p_right, returning carry. Can modify in place. */
#[inline(always)]
unsafe fn vli_add(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t, mut p_right: *mut uint64_t) -> uint64_t {
let mut l_carry: uint64_t = 0 as libc::c_int as uint64_t;
let mut i: uint = 0;
i = 0 as libc::c_int as uint;
while i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
let mut l_sum: uint64_t = (*p_left.offset(i as isize)).wrapping_add(*p_right.offset(i as isize)).wrapping_add(l_carry);
if l_sum != *p_left.offset(i as isize) {
l_carry = (l_sum < *p_left.offset(i as isize)) as libc::c_int as uint64_t
}
*p_result.offset(i as isize) = l_sum;
i = i.wrapping_add(1)
}
return l_carry;
}
/* Computes p_result = p_left - p_right, returning borrow. Can modify in place. */
#[inline(always)]
unsafe fn vli_sub(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t, mut p_right: *mut uint64_t) -> uint64_t {
let mut l_borrow: uint64_t = 0 as libc::c_int as uint64_t;
let mut i: uint = 0;
i = 0 as libc::c_int as uint;
while i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
let mut l_diff: uint64_t = (*p_left.offset(i as isize)).wrapping_sub(*p_right.offset(i as isize)).wrapping_sub(l_borrow);
if l_diff != *p_left.offset(i as isize) {
l_borrow = (l_diff > *p_left.offset(i as isize)) as libc::c_int as uint64_t
}
*p_result.offset(i as isize) = l_diff;
i = i.wrapping_add(1)
}
return l_borrow;
}
/* Computes p_result = p_left * p_right. */
#[inline(always)]
unsafe fn vli_mult(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t, mut p_right: *mut uint64_t) {
let mut r01: uint128_t = 0 as libc::c_int as uint128_t;
let mut r2: uint64_t = 0 as libc::c_int as uint64_t;
let mut i: uint = 0;
let mut k: uint = 0;
/* Compute each digit of p_result in sequence, maintaining the carries. */
k = 0 as libc::c_int as uint;
while k < (48 as libc::c_int / 8 as libc::c_int * 2 as libc::c_int - 1 as libc::c_int) as libc::c_uint {
let mut l_min: uint = if k < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
0 as libc::c_int as libc::c_uint
} else {
k.wrapping_add(1 as libc::c_int as libc::c_uint).wrapping_sub((48 as libc::c_int / 8 as libc::c_int) as libc::c_uint)
};
i = l_min;
while i <= k && i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
let mut l_product: uint128_t = (*p_left.offset(i as isize) as uint128_t).wrapping_mul(*p_right.offset(k.wrapping_sub(i) as isize) as u128);
r01 = (r01 as u128).wrapping_add(l_product) as uint128_t as uint128_t;
r2 = (r2 as libc::c_ulong).wrapping_add((r01 < l_product) as libc::c_int as libc::c_ulong) as uint64_t as uint64_t;
i = i.wrapping_add(1)
}
*p_result.offset(k as isize) = r01 as uint64_t;
r01 = r01 >> 64 as libc::c_int | (r2 as uint128_t) << 64 as libc::c_int;
r2 = 0 as libc::c_int as uint64_t;
k = k.wrapping_add(1)
}
*p_result.offset((48 as libc::c_int / 8 as libc::c_int * 2 as libc::c_int - 1 as libc::c_int) as isize) = r01 as uint64_t;
}
/* Computes p_result = p_left^2. */
#[inline(always)]
unsafe fn vli_square(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t) {
let mut r01: uint128_t = 0 as libc::c_int as uint128_t;
let mut r2: uint64_t = 0 as libc::c_int as uint64_t;
let mut i: uint = 0;
let mut k: uint = 0;
k = 0 as libc::c_int as uint;
while k < (48 as libc::c_int / 8 as libc::c_int * 2 as libc::c_int - 1 as libc::c_int) as libc::c_uint {
let mut l_min: uint = if k < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
0 as libc::c_int as libc::c_uint
} else {
k.wrapping_add(1 as libc::c_int as libc::c_uint).wrapping_sub((48 as libc::c_int / 8 as libc::c_int) as libc::c_uint)
};
i = l_min;
while i <= k && i <= k.wrapping_sub(i) {
let mut l_product: uint128_t = (*p_left.offset(i as isize) as uint128_t).wrapping_mul(*p_left.offset(k.wrapping_sub(i) as isize) as u128);
if i < k.wrapping_sub(i) {
r2 = (r2 as u128).wrapping_add(l_product >> 127 as libc::c_int) as uint64_t as uint64_t;
l_product = (l_product as u128).wrapping_mul(2 as libc::c_int as u128) as uint128_t as uint128_t
}
r01 = (r01 as u128).wrapping_add(l_product) as uint128_t as uint128_t;
r2 = (r2 as libc::c_ulong).wrapping_add((r01 < l_product) as libc::c_int as libc::c_ulong) as uint64_t as uint64_t;
i = i.wrapping_add(1)
}
*p_result.offset(k as isize) = r01 as uint64_t;
r01 = r01 >> 64 as libc::c_int | (r2 as uint128_t) << 64 as libc::c_int;
r2 = 0 as libc::c_int as uint64_t;
k = k.wrapping_add(1)
}
*p_result.offset((48 as libc::c_int / 8 as libc::c_int * 2 as libc::c_int - 1 as libc::c_int) as isize) = r01 as uint64_t;
}
/* #if SUPPORTS_INT128 */
/* SUPPORTS_INT128 */
/* Computes p_result = (p_left + p_right) % p_mod.
Assumes that p_left < p_mod and p_right < p_mod, p_result != p_mod. */
#[inline(always)]
unsafe fn vli_modAdd(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t, mut p_right: *mut uint64_t, mut p_mod: *mut uint64_t) {
let mut l_carry: uint64_t = vli_add(p_result, p_left, p_right);
if l_carry != 0 || vli_cmp(p_result, p_mod) >= 0 as libc::c_int {
/* p_result > p_mod (p_result = p_mod + remainder), so subtract p_mod to get remainder. */
vli_sub(p_result, p_result, p_mod);
};
}
/* Computes p_result = (p_left - p_right) % p_mod.
Assumes that p_left < p_mod and p_right < p_mod, p_result != p_mod. */
#[inline(always)]
unsafe fn vli_modSub(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t, mut p_right: *mut uint64_t, mut p_mod: *mut uint64_t) {
let mut l_borrow: uint64_t = vli_sub(p_result, p_left, p_right);
if l_borrow != 0 {
/* In this case, p_result == -diff == (max int) - diff.
Since -x % d == d - x, we can get the correct result from p_result + p_mod (with overflow). */
vli_add(p_result, p_result, p_mod);
};
}
//#elif ECC_CURVE == secp384r1
#[inline(always)]
unsafe fn omega_mult(mut p_result: *mut uint64_t, mut p_right: *mut uint64_t) {
let mut l_tmp: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_carry: uint64_t = 0;
let mut l_diff: uint64_t = 0;
/* Multiply by (2^128 + 2^96 - 2^32 + 1). */
vli_set(p_result, p_right); /* 1 */
l_carry = vli_lshift(l_tmp.as_mut_ptr(), p_right, 32 as libc::c_int as uint); /* 2^96 + 1 */
*p_result.offset((1 as libc::c_int + 48 as libc::c_int / 8 as libc::c_int) as isize) = l_carry.wrapping_add(vli_add(p_result.offset(1 as libc::c_int as isize), p_result.offset(1 as libc::c_int as isize), l_tmp.as_mut_ptr())); /* 2^128 + 2^96 + 1 */
*p_result.offset((2 as libc::c_int + 48 as libc::c_int / 8 as libc::c_int) as isize) = vli_add(p_result.offset(2 as libc::c_int as isize), p_result.offset(2 as libc::c_int as isize), p_right); /* 2^128 + 2^96 - 2^32 + 1 */
l_carry = (l_carry as libc::c_ulong).wrapping_add(vli_sub(p_result, p_result, l_tmp.as_mut_ptr())) as uint64_t as uint64_t;
l_diff = (*p_result.offset((48 as libc::c_int / 8 as libc::c_int) as isize)).wrapping_sub(l_carry);
if l_diff > *p_result.offset((48 as libc::c_int / 8 as libc::c_int) as isize) {
/* Propagate borrow if necessary. */
let mut i: uint = 0;
i = (1 as libc::c_int + 48 as libc::c_int / 8 as libc::c_int) as uint;
loop {
let ref mut fresh1 = *p_result.offset(i as isize);
*fresh1 = (*fresh1).wrapping_sub(1);
if *p_result.offset(i as isize) != -(1 as libc::c_int) as uint64_t {
break;
}
i = i.wrapping_add(1)
}
}
*p_result.offset((48 as libc::c_int / 8 as libc::c_int) as isize) = l_diff;
}
/* Computes p_result = p_product % curve_p
see PDF "Comparing Elliptic Curve Cryptography and RSA on 8-bit CPUs"
section "Curve-Specific Optimizations" */
#[inline(always)]
unsafe fn vli_mmod_fast(mut p_result: *mut uint64_t, mut p_product: *mut uint64_t) {
let mut l_tmp: [uint64_t; 12] = std::mem::MaybeUninit::uninit().assume_init();
while vli_isZero(p_product.offset((48 as libc::c_int / 8 as libc::c_int) as isize)) == 0 {
/* While c1 != 0 */
let mut l_carry: uint64_t = 0 as libc::c_int as uint64_t; /* tmp = w * c1 */
let mut i: uint = 0; /* p = c0 */
vli_clear(l_tmp.as_mut_ptr());
vli_clear(l_tmp.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize));
omega_mult(l_tmp.as_mut_ptr(), p_product.offset((48 as libc::c_int / 8 as libc::c_int) as isize));
vli_clear(p_product.offset((48 as libc::c_int / 8 as libc::c_int) as isize));
/* (c1, c0) = c0 + w * c1 */
i = 0 as libc::c_int as uint;
while i < (48 as libc::c_int / 8 as libc::c_int + 3 as libc::c_int) as libc::c_uint {
let mut l_sum: uint64_t = (*p_product.offset(i as isize)).wrapping_add(l_tmp[i as usize]).wrapping_add(l_carry);
if l_sum != *p_product.offset(i as isize) {
l_carry = (l_sum < *p_product.offset(i as isize)) as libc::c_int as uint64_t
}
*p_product.offset(i as isize) = l_sum;
i = i.wrapping_add(1)
}
}
while vli_cmp(p_product, curve_p.as_mut_ptr()) > 0 as libc::c_int {
vli_sub(p_product, p_product, curve_p.as_mut_ptr());
}
vli_set(p_result, p_product);
}
//#endif
/* Computes p_result = (p_left * p_right) % curve_p. */
#[inline(always)]
unsafe fn vli_modMult_fast(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t, mut p_right: *mut uint64_t) {
let mut l_product: [uint64_t; 12] = std::mem::MaybeUninit::uninit().assume_init();
vli_mult(l_product.as_mut_ptr(), p_left, p_right);
vli_mmod_fast(p_result, l_product.as_mut_ptr());
}
/* Computes p_result = p_left^2 % curve_p. */
#[inline(always)]
unsafe fn vli_modSquare_fast(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t) {
let mut l_product: [uint64_t; 12] = std::mem::MaybeUninit::uninit().assume_init();
vli_square(l_product.as_mut_ptr(), p_left);
vli_mmod_fast(p_result, l_product.as_mut_ptr());
}
/* Computes p_result = (1 / p_input) % p_mod. All VLIs are the same size.
See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf */
#[inline(always)]
unsafe fn vli_modInv(mut p_result: *mut uint64_t, mut p_input: *mut uint64_t, mut p_mod: *mut uint64_t) {
let mut a: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut b: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut u: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut v: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_carry: uint64_t = 0;
let mut l_cmpResult: libc::c_int = 0;
if vli_isZero(p_input) != 0 {
vli_clear(p_result);
return;
}
vli_set(a.as_mut_ptr(), p_input);
vli_set(b.as_mut_ptr(), p_mod);
vli_clear(u.as_mut_ptr());
u[0 as libc::c_int as usize] = 1 as libc::c_int as uint64_t;
vli_clear(v.as_mut_ptr());
loop {
l_cmpResult = vli_cmp(a.as_mut_ptr(), b.as_mut_ptr());
if !(l_cmpResult != 0 as libc::c_int) {
break;
}
l_carry = 0 as libc::c_int as uint64_t;
if a[0 as libc::c_int as usize] & 1 as libc::c_int as libc::c_ulong == 0 {
vli_rshift1(a.as_mut_ptr());
if u[0 as libc::c_int as usize] & 1 as libc::c_int as libc::c_ulong != 0 {
l_carry = vli_add(u.as_mut_ptr(), u.as_mut_ptr(), p_mod)
}
vli_rshift1(u.as_mut_ptr());
if l_carry != 0 {
u[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] = (u[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] as libc::c_ulonglong | 0x8000000000000000 as libc::c_ulonglong) as uint64_t
}
} else if b[0 as libc::c_int as usize] & 1 as libc::c_int as libc::c_ulong == 0 {
vli_rshift1(b.as_mut_ptr());
if v[0 as libc::c_int as usize] & 1 as libc::c_int as libc::c_ulong != 0 {
l_carry = vli_add(v.as_mut_ptr(), v.as_mut_ptr(), p_mod)
}
vli_rshift1(v.as_mut_ptr());
if l_carry != 0 {
v[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] = (v[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] as libc::c_ulonglong | 0x8000000000000000 as libc::c_ulonglong) as uint64_t
}
} else if l_cmpResult > 0 as libc::c_int {
vli_sub(a.as_mut_ptr(), a.as_mut_ptr(), b.as_mut_ptr());
vli_rshift1(a.as_mut_ptr());
if vli_cmp(u.as_mut_ptr(), v.as_mut_ptr()) < 0 as libc::c_int {
vli_add(u.as_mut_ptr(), u.as_mut_ptr(), p_mod);
}
vli_sub(u.as_mut_ptr(), u.as_mut_ptr(), v.as_mut_ptr());
if u[0 as libc::c_int as usize] & 1 as libc::c_int as libc::c_ulong != 0 {
l_carry = vli_add(u.as_mut_ptr(), u.as_mut_ptr(), p_mod)
}
vli_rshift1(u.as_mut_ptr());
if l_carry != 0 {
u[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] = (u[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] as libc::c_ulonglong | 0x8000000000000000 as libc::c_ulonglong) as uint64_t
}
} else {
vli_sub(b.as_mut_ptr(), b.as_mut_ptr(), a.as_mut_ptr());
vli_rshift1(b.as_mut_ptr());
if vli_cmp(v.as_mut_ptr(), u.as_mut_ptr()) < 0 as libc::c_int {
vli_add(v.as_mut_ptr(), v.as_mut_ptr(), p_mod);
}
vli_sub(v.as_mut_ptr(), v.as_mut_ptr(), u.as_mut_ptr());
if v[0 as libc::c_int as usize] & 1 as libc::c_int as libc::c_ulong != 0 {
l_carry = vli_add(v.as_mut_ptr(), v.as_mut_ptr(), p_mod)
}
vli_rshift1(v.as_mut_ptr());
if l_carry != 0 {
v[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] = (v[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] as libc::c_ulonglong | 0x8000000000000000 as libc::c_ulonglong) as uint64_t
}
}
}
vli_set(p_result, u.as_mut_ptr());
}
/* ------ Point operations ------ */
/* Returns 1 if p_point is the point at infinity, 0 otherwise. */
#[inline(always)]
unsafe fn EccPoint_isZero(mut p_point: *mut EccPoint) -> libc::c_int {
return (vli_isZero((*p_point).x.as_mut_ptr()) != 0 && vli_isZero((*p_point).y.as_mut_ptr()) != 0) as libc::c_int;
}
/* Point multiplication algorithm using Montgomery's ladder with co-Z coordinates.
From http://eprint.iacr.org/2011/338.pdf
*/
/* Double in place */
#[inline(always)]
unsafe fn EccPoint_double_jacobian(mut X1: *mut uint64_t, mut Y1: *mut uint64_t, mut Z1: *mut uint64_t) {
/* t1 = X, t2 = Y, t3 = Z */
let mut t4: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init(); /* t4 = y1^2 */
let mut t5: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init(); /* t5 = x1*y1^2 = A */
if vli_isZero(Z1) != 0 {
return;
} /* t4 = y1^4 */
vli_modSquare_fast(t4.as_mut_ptr(), Y1); /* t2 = y1*z1 = z3 */
vli_modMult_fast(t5.as_mut_ptr(), X1, t4.as_mut_ptr()); /* t3 = z1^2 */
vli_modSquare_fast(t4.as_mut_ptr(), t4.as_mut_ptr()); /* t1 = x1 + z1^2 */
vli_modMult_fast(Y1, Y1, Z1); /* t3 = 2*z1^2 */
vli_modSquare_fast(Z1, Z1); /* t3 = x1 - z1^2 */
vli_modAdd(X1, X1, Z1, curve_p.as_mut_ptr()); /* t1 = x1^2 - z1^4 */
vli_modAdd(Z1, Z1, Z1, curve_p.as_mut_ptr()); /* t3 = 2*(x1^2 - z1^4) */
vli_modSub(Z1, X1, Z1, curve_p.as_mut_ptr()); /* t1 = 3*(x1^2 - z1^4) */
vli_modMult_fast(X1, X1, Z1);
vli_modAdd(Z1, X1, X1, curve_p.as_mut_ptr());
vli_modAdd(X1, X1, Z1, curve_p.as_mut_ptr());
if vli_testBit(X1, 0 as libc::c_int as uint) != 0 {
let mut l_carry: uint64_t = vli_add(X1, X1, curve_p.as_mut_ptr());
vli_rshift1(X1);
let ref mut fresh2 = *X1.offset((48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as isize);
*fresh2 |= l_carry << 63 as libc::c_int
} else {
vli_rshift1(X1);
}
/* t1 = 3/2*(x1^2 - z1^4) = B */
vli_modSquare_fast(Z1, X1); /* t3 = B^2 */
vli_modSub(Z1, Z1, t5.as_mut_ptr(), curve_p.as_mut_ptr()); /* t3 = B^2 - A */
vli_modSub(Z1, Z1, t5.as_mut_ptr(), curve_p.as_mut_ptr()); /* t3 = B^2 - 2A = x3 */
vli_modSub(t5.as_mut_ptr(), t5.as_mut_ptr(), Z1, curve_p.as_mut_ptr()); /* t5 = A - x3 */
vli_modMult_fast(X1, X1, t5.as_mut_ptr()); /* t1 = B * (A - x3) */
vli_modSub(t4.as_mut_ptr(), X1, t4.as_mut_ptr(), curve_p.as_mut_ptr()); /* t4 = B * (A - x3) - y1^4 = y3 */
vli_set(X1, Z1);
vli_set(Z1, Y1);
vli_set(Y1, t4.as_mut_ptr());
}
/* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
#[inline(always)]
unsafe fn apply_z(mut X1: *mut uint64_t, mut Y1: *mut uint64_t, mut Z: *mut uint64_t) {
let mut t1: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init(); /* z^2 */
vli_modSquare_fast(t1.as_mut_ptr(), Z); /* x1 * z^2 */
vli_modMult_fast(X1, X1, t1.as_mut_ptr()); /* z^3 */
vli_modMult_fast(t1.as_mut_ptr(), t1.as_mut_ptr(), Z);
vli_modMult_fast(Y1, Y1, t1.as_mut_ptr());
/* y1 * z^3 */
}
/* P = (x1, y1) => 2P, (x2, y2) => P' */
#[inline(always)]
unsafe fn XYcZ_initial_double(mut X1: *mut uint64_t, mut Y1: *mut uint64_t, mut X2: *mut uint64_t, mut Y2: *mut uint64_t, mut p_initialZ: *mut uint64_t) {
let mut z: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
vli_set(X2, X1);
vli_set(Y2, Y1);
vli_clear(z.as_mut_ptr());
z[0 as libc::c_int as usize] = 1 as libc::c_int as uint64_t;
if !p_initialZ.is_null() {
vli_set(z.as_mut_ptr(), p_initialZ);
}
apply_z(X1, Y1, z.as_mut_ptr());
EccPoint_double_jacobian(X1, Y1, z.as_mut_ptr());
apply_z(X2, Y2, z.as_mut_ptr());
}
/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
or P => P', Q => P + Q
*/
#[inline(always)]
unsafe fn XYcZ_add(mut X1: *mut uint64_t, mut Y1: *mut uint64_t, mut X2: *mut uint64_t, mut Y2: *mut uint64_t) {
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
let mut t5: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init(); /* t5 = x2 - x1 */
vli_modSub(t5.as_mut_ptr(), X2, X1, curve_p.as_mut_ptr()); /* t5 = (x2 - x1)^2 = A */
vli_modSquare_fast(t5.as_mut_ptr(), t5.as_mut_ptr()); /* t1 = x1*A = B */
vli_modMult_fast(X1, X1, t5.as_mut_ptr()); /* t3 = x2*A = C */
vli_modMult_fast(X2, X2, t5.as_mut_ptr()); /* t4 = y2 - y1 */
vli_modSub(Y2, Y2, Y1, curve_p.as_mut_ptr()); /* t5 = (y2 - y1)^2 = D */
vli_modSquare_fast(t5.as_mut_ptr(), Y2); /* t5 = D - B */
vli_modSub(t5.as_mut_ptr(), t5.as_mut_ptr(), X1, curve_p.as_mut_ptr()); /* t5 = D - B - C = x3 */
vli_modSub(t5.as_mut_ptr(), t5.as_mut_ptr(), X2, curve_p.as_mut_ptr()); /* t3 = C - B */
vli_modSub(X2, X2, X1, curve_p.as_mut_ptr()); /* t2 = y1*(C - B) */
vli_modMult_fast(Y1, Y1, X2); /* t3 = B - x3 */
vli_modSub(X2, X1, t5.as_mut_ptr(), curve_p.as_mut_ptr()); /* t4 = (y2 - y1)*(B - x3) */
vli_modMult_fast(Y2, Y2, X2); /* t4 = y3 */
vli_modSub(Y2, Y2, Y1, curve_p.as_mut_ptr());
vli_set(X2, t5.as_mut_ptr());
}
/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
or P => P - Q, Q => P + Q
*/
#[inline(always)]
unsafe fn XYcZ_addC(mut X1: *mut uint64_t, mut Y1: *mut uint64_t, mut X2: *mut uint64_t, mut Y2: *mut uint64_t) {
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
let mut t5: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init(); /* t5 = x2 - x1 */
let mut t6: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init(); /* t5 = (x2 - x1)^2 = A */
let mut t7: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init(); /* t1 = x1*A = B */
vli_modSub(t5.as_mut_ptr(), X2, X1, curve_p.as_mut_ptr()); /* t3 = x2*A = C */
vli_modSquare_fast(t5.as_mut_ptr(), t5.as_mut_ptr()); /* t4 = y2 + y1 */
vli_modMult_fast(X1, X1, t5.as_mut_ptr()); /* t4 = y2 - y1 */
vli_modMult_fast(X2, X2, t5.as_mut_ptr()); /* t6 = C - B */
vli_modAdd(t5.as_mut_ptr(), Y2, Y1, curve_p.as_mut_ptr()); /* t2 = y1 * (C - B) */
vli_modSub(Y2, Y2, Y1, curve_p.as_mut_ptr()); /* t6 = B + C */
vli_modSub(t6.as_mut_ptr(), X2, X1, curve_p.as_mut_ptr()); /* t3 = (y2 - y1)^2 */
vli_modMult_fast(Y1, Y1, t6.as_mut_ptr()); /* t3 = x3 */
vli_modAdd(t6.as_mut_ptr(), X1, X2, curve_p.as_mut_ptr()); /* t7 = B - x3 */
vli_modSquare_fast(X2, Y2); /* t4 = (y2 - y1)*(B - x3) */
vli_modSub(X2, X2, t6.as_mut_ptr(), curve_p.as_mut_ptr()); /* t4 = y3 */
vli_modSub(t7.as_mut_ptr(), X1, X2, curve_p.as_mut_ptr()); /* t7 = (y2 + y1)^2 = F */
vli_modMult_fast(Y2, Y2, t7.as_mut_ptr()); /* t7 = x3' */
vli_modSub(Y2, Y2, Y1, curve_p.as_mut_ptr()); /* t6 = x3' - B */
vli_modSquare_fast(t7.as_mut_ptr(), t5.as_mut_ptr()); /* t6 = (y2 + y1)*(x3' - B) */
vli_modSub(t7.as_mut_ptr(), t7.as_mut_ptr(), t6.as_mut_ptr(), curve_p.as_mut_ptr()); /* t2 = y3' */
vli_modSub(t6.as_mut_ptr(), t7.as_mut_ptr(), X1, curve_p.as_mut_ptr());
vli_modMult_fast(t6.as_mut_ptr(), t6.as_mut_ptr(), t5.as_mut_ptr());
vli_modSub(Y1, t6.as_mut_ptr(), Y1, curve_p.as_mut_ptr());
vli_set(X1, t7.as_mut_ptr());
}
#[inline(always)]
unsafe fn EccPoint_mult(mut p_result: *mut EccPoint, mut p_point: *mut EccPoint, mut p_scalar: *mut uint64_t, mut p_initialZ: *mut uint64_t) {
/* R0 and R1 */
let mut Rx: [[uint64_t; 6]; 2] = std::mem::MaybeUninit::uninit().assume_init();
let mut Ry: [[uint64_t; 6]; 2] = std::mem::MaybeUninit::uninit().assume_init();
let mut z: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut i: libc::c_int = 0;
let mut nb: libc::c_int = 0;
vli_set(Rx[1 as libc::c_int as usize].as_mut_ptr(), (*p_point).x.as_mut_ptr());
vli_set(Ry[1 as libc::c_int as usize].as_mut_ptr(), (*p_point).y.as_mut_ptr());
XYcZ_initial_double(
Rx[1 as libc::c_int as usize].as_mut_ptr(),
Ry[1 as libc::c_int as usize].as_mut_ptr(),
Rx[0 as libc::c_int as usize].as_mut_ptr(),
Ry[0 as libc::c_int as usize].as_mut_ptr(),
p_initialZ,
);
i = vli_numBits(p_scalar).wrapping_sub(2 as libc::c_int as libc::c_uint) as libc::c_int;
while i > 0 as libc::c_int {
nb = (vli_testBit(p_scalar, i as uint) == 0) as libc::c_int;
XYcZ_addC(Rx[(1 as libc::c_int - nb) as usize].as_mut_ptr(), Ry[(1 as libc::c_int - nb) as usize].as_mut_ptr(), Rx[nb as usize].as_mut_ptr(), Ry[nb as usize].as_mut_ptr());
XYcZ_add(Rx[nb as usize].as_mut_ptr(), Ry[nb as usize].as_mut_ptr(), Rx[(1 as libc::c_int - nb) as usize].as_mut_ptr(), Ry[(1 as libc::c_int - nb) as usize].as_mut_ptr());
i -= 1
}
nb = (vli_testBit(p_scalar, 0 as libc::c_int as uint) == 0) as libc::c_int;
XYcZ_addC(Rx[(1 as libc::c_int - nb) as usize].as_mut_ptr(), Ry[(1 as libc::c_int - nb) as usize].as_mut_ptr(), Rx[nb as usize].as_mut_ptr(), Ry[nb as usize].as_mut_ptr());
/* Find final 1/Z value. */
vli_modSub(z.as_mut_ptr(), Rx[1 as libc::c_int as usize].as_mut_ptr(), Rx[0 as libc::c_int as usize].as_mut_ptr(), curve_p.as_mut_ptr()); /* X1 - X0 */
vli_modMult_fast(z.as_mut_ptr(), z.as_mut_ptr(), Ry[(1 as libc::c_int - nb) as usize].as_mut_ptr()); /* Yb * (X1 - X0) */
vli_modMult_fast(z.as_mut_ptr(), z.as_mut_ptr(), (*p_point).x.as_mut_ptr()); /* xP * Yb * (X1 - X0) */
vli_modInv(z.as_mut_ptr(), z.as_mut_ptr(), curve_p.as_mut_ptr()); /* 1 / (xP * Yb * (X1 - X0)) */
vli_modMult_fast(z.as_mut_ptr(), z.as_mut_ptr(), (*p_point).y.as_mut_ptr()); /* yP / (xP * Yb * (X1 - X0)) */
vli_modMult_fast(z.as_mut_ptr(), z.as_mut_ptr(), Rx[(1 as libc::c_int - nb) as usize].as_mut_ptr()); /* Xb * yP / (xP * Yb * (X1 - X0)) */
/* End 1/Z calculation */
XYcZ_add(Rx[nb as usize].as_mut_ptr(), Ry[nb as usize].as_mut_ptr(), Rx[(1 as libc::c_int - nb) as usize].as_mut_ptr(), Ry[(1 as libc::c_int - nb) as usize].as_mut_ptr());
apply_z(Rx[0 as libc::c_int as usize].as_mut_ptr(), Ry[0 as libc::c_int as usize].as_mut_ptr(), z.as_mut_ptr());
vli_set((*p_result).x.as_mut_ptr(), Rx[0 as libc::c_int as usize].as_mut_ptr());
vli_set((*p_result).y.as_mut_ptr(), Ry[0 as libc::c_int as usize].as_mut_ptr());
}
#[inline(always)]
unsafe fn ecc_bytes2native(mut p_native: *mut uint64_t, mut p_bytes: *const uint8_t) {
let mut i: libc::c_uint = 0;
i = 0 as libc::c_int as libc::c_uint;
while i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
let mut p_digit: *const uint8_t = p_bytes.offset((8 as libc::c_int as libc::c_uint).wrapping_mul(((48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as libc::c_uint).wrapping_sub(i)) as isize);
*p_native.offset(i as isize) = (*p_digit.offset(0 as libc::c_int as isize) as uint64_t) << 56 as libc::c_int
| (*p_digit.offset(1 as libc::c_int as isize) as uint64_t) << 48 as libc::c_int
| (*p_digit.offset(2 as libc::c_int as isize) as uint64_t) << 40 as libc::c_int
| (*p_digit.offset(3 as libc::c_int as isize) as uint64_t) << 32 as libc::c_int
| (*p_digit.offset(4 as libc::c_int as isize) as uint64_t) << 24 as libc::c_int
| (*p_digit.offset(5 as libc::c_int as isize) as uint64_t) << 16 as libc::c_int
| (*p_digit.offset(6 as libc::c_int as isize) as uint64_t) << 8 as libc::c_int
| *p_digit.offset(7 as libc::c_int as isize) as uint64_t;
i = i.wrapping_add(1)
}
}
#[inline(always)]
unsafe fn ecc_native2bytes(mut p_bytes: *mut uint8_t, mut p_native: *const uint64_t) {
let mut i: libc::c_uint = 0;
i = 0 as libc::c_int as libc::c_uint;
while i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
let mut p_digit: *mut uint8_t = p_bytes.offset((8 as libc::c_int as libc::c_uint).wrapping_mul(((48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as libc::c_uint).wrapping_sub(i)) as isize);
*p_digit.offset(0 as libc::c_int as isize) = (*p_native.offset(i as isize) >> 56 as libc::c_int) as uint8_t;
*p_digit.offset(1 as libc::c_int as isize) = (*p_native.offset(i as isize) >> 48 as libc::c_int) as uint8_t;
*p_digit.offset(2 as libc::c_int as isize) = (*p_native.offset(i as isize) >> 40 as libc::c_int) as uint8_t;
*p_digit.offset(3 as libc::c_int as isize) = (*p_native.offset(i as isize) >> 32 as libc::c_int) as uint8_t;
*p_digit.offset(4 as libc::c_int as isize) = (*p_native.offset(i as isize) >> 24 as libc::c_int) as uint8_t;
*p_digit.offset(5 as libc::c_int as isize) = (*p_native.offset(i as isize) >> 16 as libc::c_int) as uint8_t;
*p_digit.offset(6 as libc::c_int as isize) = (*p_native.offset(i as isize) >> 8 as libc::c_int) as uint8_t;
*p_digit.offset(7 as libc::c_int as isize) = *p_native.offset(i as isize) as uint8_t;
i = i.wrapping_add(1)
}
}
/* Compute a = sqrt(a) (mod curve_p). */
#[inline(always)]
unsafe fn mod_sqrt(mut a: *mut uint64_t) {
let mut i: libc::c_uint = 0;
let mut p1: [uint64_t; 6] = [1 as libc::c_int as uint64_t, 0, 0, 0, 0, 0];
let mut l_result: [uint64_t; 6] = [1 as libc::c_int as uint64_t, 0, 0, 0, 0, 0];
/* Since curve_p == 3 (mod 4) for all supported curves, we can
compute sqrt(a) = a^((curve_p + 1) / 4) (mod curve_p). */
vli_add(p1.as_mut_ptr(), curve_p.as_mut_ptr(), p1.as_mut_ptr()); /* p1 = curve_p + 1 */
i = vli_numBits(p1.as_mut_ptr()).wrapping_sub(1 as libc::c_int as libc::c_uint); /* -a = 3 */
while i > 1 as libc::c_int as libc::c_uint {
vli_modSquare_fast(l_result.as_mut_ptr(), l_result.as_mut_ptr()); /* y = x^2 */
if vli_testBit(p1.as_mut_ptr(), i) != 0 {
vli_modMult_fast(l_result.as_mut_ptr(), l_result.as_mut_ptr(), a);
/* y = x^2 - 3 */
} /* y = x^3 - 3x */
i = i.wrapping_sub(1)
} /* y = x^3 - 3x + b */
vli_set(a, l_result.as_mut_ptr());
}
#[inline(always)]
unsafe fn ecc_point_decompress(mut p_point: *mut EccPoint, mut p_compressed: *const uint8_t) {
let mut _3: [uint64_t; 6] = [3 as libc::c_int as uint64_t, 0, 0, 0, 0, 0];
ecc_bytes2native((*p_point).x.as_mut_ptr(), p_compressed.offset(1 as libc::c_int as isize));
vli_modSquare_fast((*p_point).y.as_mut_ptr(), (*p_point).x.as_mut_ptr());
vli_modSub((*p_point).y.as_mut_ptr(), (*p_point).y.as_mut_ptr(), _3.as_mut_ptr(), curve_p.as_mut_ptr());
vli_modMult_fast((*p_point).y.as_mut_ptr(), (*p_point).y.as_mut_ptr(), (*p_point).x.as_mut_ptr());
vli_modAdd((*p_point).y.as_mut_ptr(), (*p_point).y.as_mut_ptr(), curve_b.as_mut_ptr(), curve_p.as_mut_ptr());
mod_sqrt((*p_point).y.as_mut_ptr());
if (*p_point).y[0 as libc::c_int as usize] & 0x1 as libc::c_int as libc::c_ulong != (*p_compressed.offset(0 as libc::c_int as isize) as libc::c_int & 0x1 as libc::c_int) as libc::c_ulong {
vli_sub((*p_point).y.as_mut_ptr(), curve_p.as_mut_ptr(), (*p_point).y.as_mut_ptr());
};
}
pub unsafe fn ecc_make_key(mut p_publicKey: *mut uint8_t, mut p_privateKey: *mut uint8_t) -> libc::c_int {
let mut l_private: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_public: EccPoint = std::mem::MaybeUninit::uninit().assume_init();
let mut l_tries: libc::c_uint = 0 as libc::c_int as libc::c_uint;
loop {
if getRandomNumber(l_private.as_mut_ptr()) == 0 || {
let fresh3 = l_tries;
l_tries = l_tries.wrapping_add(1);
(fresh3) >= 1024 as libc::c_int as libc::c_uint
} {
return 0 as libc::c_int;
}
if !(vli_isZero(l_private.as_mut_ptr()) != 0) {
/* Make sure the private key is in the range [1, n-1].
For the supported curves, n is always large enough that we only need to subtract once at most. */
if vli_cmp(curve_n.as_mut_ptr(), l_private.as_mut_ptr()) != 1 as libc::c_int {
vli_sub(l_private.as_mut_ptr(), l_private.as_mut_ptr(), curve_n.as_mut_ptr());
}
EccPoint_mult(&mut l_public, &mut curve_G, l_private.as_mut_ptr(), 0 as *mut uint64_t);
}
if !(EccPoint_isZero(&mut l_public) != 0) {
break;
}
}
ecc_native2bytes(p_privateKey, l_private.as_mut_ptr() as *const uint64_t);
ecc_native2bytes(p_publicKey.offset(1 as libc::c_int as isize), l_public.x.as_mut_ptr() as *const uint64_t);
*p_publicKey.offset(0 as libc::c_int as isize) = (2 as libc::c_int as libc::c_ulong).wrapping_add(l_public.y[0 as libc::c_int as usize] & 0x1 as libc::c_int as libc::c_ulong) as uint8_t;
return 1 as libc::c_int;
}
pub unsafe fn ecdh_shared_secret(mut p_publicKey: *const uint8_t, mut p_privateKey: *const uint8_t, mut p_secret: *mut uint8_t) -> libc::c_int {
let mut l_public: EccPoint = std::mem::MaybeUninit::uninit().assume_init();
let mut l_private: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_random: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
if getRandomNumber(l_random.as_mut_ptr()) == 0 {
return 0 as libc::c_int;
}
ecc_point_decompress(&mut l_public, p_publicKey);
ecc_bytes2native(l_private.as_mut_ptr(), p_privateKey);
let mut l_product: EccPoint = EccPoint { x: [0; 6], y: [0; 6] };
EccPoint_mult(&mut l_product, &mut l_public, l_private.as_mut_ptr(), l_random.as_mut_ptr());
ecc_native2bytes(p_secret, l_product.x.as_mut_ptr() as *const uint64_t);
return (EccPoint_isZero(&mut l_product) == 0) as libc::c_int;
}
/* -------- ECDSA code -------- */
/* Computes p_result = (p_left * p_right) % p_mod. */
#[inline(always)]
unsafe fn vli_modMult(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t, mut p_right: *mut uint64_t, mut p_mod: *mut uint64_t) {
let mut l_product: [uint64_t; 12] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_modMultiple: [uint64_t; 12] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_digitShift: uint = 0;
let mut l_bitShift: uint = 0;
let mut l_productBits: uint = 0;
let mut l_modBits: uint = vli_numBits(p_mod);
vli_mult(l_product.as_mut_ptr(), p_left, p_right);
l_productBits = vli_numBits(l_product.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize));
if l_productBits != 0 {
l_productBits = (l_productBits as libc::c_uint).wrapping_add((48 as libc::c_int / 8 as libc::c_int * 64 as libc::c_int) as libc::c_uint) as uint as uint
} else {
l_productBits = vli_numBits(l_product.as_mut_ptr())
}
if l_productBits < l_modBits {
/* l_product < p_mod. */
vli_set(p_result, l_product.as_mut_ptr());
return;
}
/* Shift p_mod by (l_leftBits - l_modBits). This multiplies p_mod by the largest
power of two possible while still resulting in a number less than p_left. */
vli_clear(l_modMultiple.as_mut_ptr());
vli_clear(l_modMultiple.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize));
l_digitShift = l_productBits.wrapping_sub(l_modBits).wrapping_div(64 as libc::c_int as libc::c_uint);
l_bitShift = l_productBits.wrapping_sub(l_modBits).wrapping_rem(64 as libc::c_int as libc::c_uint);
if l_bitShift != 0 {
l_modMultiple[l_digitShift.wrapping_add((48 as libc::c_int / 8 as libc::c_int) as libc::c_uint) as usize] = vli_lshift(l_modMultiple.as_mut_ptr().offset(l_digitShift as isize), p_mod, l_bitShift)
} else {
vli_set(l_modMultiple.as_mut_ptr().offset(l_digitShift as isize), p_mod);
}
/* Subtract all multiples of p_mod to get the remainder. */
vli_clear(p_result); /* Use p_result as a temp var to store 1 (for subtraction) */
*p_result.offset(0 as libc::c_int as isize) = 1 as libc::c_int as uint64_t;
while l_productBits > (48 as libc::c_int / 8 as libc::c_int * 64 as libc::c_int) as libc::c_uint || vli_cmp(l_modMultiple.as_mut_ptr(), p_mod) >= 0 as libc::c_int {
let mut l_cmp: libc::c_int = vli_cmp(l_modMultiple.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize), l_product.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize));
if l_cmp < 0 as libc::c_int || l_cmp == 0 as libc::c_int && vli_cmp(l_modMultiple.as_mut_ptr(), l_product.as_mut_ptr()) <= 0 as libc::c_int {
if vli_sub(l_product.as_mut_ptr(), l_product.as_mut_ptr(), l_modMultiple.as_mut_ptr()) != 0 {
/* borrow */
vli_sub(l_product.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize), l_product.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize), p_result);
}
vli_sub(
l_product.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize),
l_product.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize),
l_modMultiple.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize),
);
}
let mut l_carry: uint64_t = (l_modMultiple[(48 as libc::c_int / 8 as libc::c_int) as usize] & 0x1 as libc::c_int as libc::c_ulong) << 63 as libc::c_int;
vli_rshift1(l_modMultiple.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize));
vli_rshift1(l_modMultiple.as_mut_ptr());
l_modMultiple[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] |= l_carry;
l_productBits = l_productBits.wrapping_sub(1)
}
vli_set(p_result, l_product.as_mut_ptr());
}
#[inline(always)]
unsafe fn umax(mut a: uint, mut b: uint) -> uint {
a.max(b)
}
pub unsafe fn ecdsa_sign(mut p_privateKey: *const uint8_t, mut p_hash: *const uint8_t, mut p_signature: *mut uint8_t) -> libc::c_int {
let mut k: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_tmp: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_s: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut p: EccPoint = std::mem::MaybeUninit::uninit().assume_init();
let mut l_tries: libc::c_uint = 0 as libc::c_int as libc::c_uint;
loop {
if getRandomNumber(k.as_mut_ptr()) == 0 || {
let fresh4 = l_tries;
l_tries = l_tries.wrapping_add(1);
(fresh4) >= 1024 as libc::c_int as libc::c_uint
} {
return 0 as libc::c_int;
}
if !(vli_isZero(k.as_mut_ptr()) != 0) {
if vli_cmp(curve_n.as_mut_ptr(), k.as_mut_ptr()) != 1 as libc::c_int {
vli_sub(k.as_mut_ptr(), k.as_mut_ptr(), curve_n.as_mut_ptr());
}
/* tmp = k * G */
EccPoint_mult(&mut p, &mut curve_G, k.as_mut_ptr(), 0 as *mut uint64_t);
/* r = x1 (mod n) */
if vli_cmp(curve_n.as_mut_ptr(), p.x.as_mut_ptr()) != 1 as libc::c_int {
vli_sub(p.x.as_mut_ptr(), p.x.as_mut_ptr(), curve_n.as_mut_ptr());
/* s = r*d */
}
} /* s = e + r*d */
if !(vli_isZero(p.x.as_mut_ptr()) != 0) {
break; /* k = 1 / k */
}
} /* s = (e + r*d) / k */
ecc_native2bytes(p_signature, p.x.as_mut_ptr() as *const uint64_t);
ecc_bytes2native(l_tmp.as_mut_ptr(), p_privateKey);
vli_modMult(l_s.as_mut_ptr(), p.x.as_mut_ptr(), l_tmp.as_mut_ptr(), curve_n.as_mut_ptr());
ecc_bytes2native(l_tmp.as_mut_ptr(), p_hash);
vli_modAdd(l_s.as_mut_ptr(), l_tmp.as_mut_ptr(), l_s.as_mut_ptr(), curve_n.as_mut_ptr());
vli_modInv(k.as_mut_ptr(), k.as_mut_ptr(), curve_n.as_mut_ptr());
vli_modMult(l_s.as_mut_ptr(), l_s.as_mut_ptr(), k.as_mut_ptr(), curve_n.as_mut_ptr());
ecc_native2bytes(p_signature.offset(48 as libc::c_int as isize), l_s.as_mut_ptr() as *const uint64_t);
return 1 as libc::c_int;
}
pub unsafe fn ecdsa_verify(mut p_publicKey: *const uint8_t, mut p_hash: *const uint8_t, mut p_signature: *const uint8_t) -> libc::c_int {
let mut u1: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut u2: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut z: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_public: EccPoint = std::mem::MaybeUninit::uninit().assume_init();
let mut l_sum: EccPoint = std::mem::MaybeUninit::uninit().assume_init();
let mut rx: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut ry: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut tx: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut ty: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut tz: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_r: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_s: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
ecc_point_decompress(&mut l_public, p_publicKey);
ecc_bytes2native(l_r.as_mut_ptr(), p_signature);
ecc_bytes2native(l_s.as_mut_ptr(), p_signature.offset(48 as libc::c_int as isize));
if vli_isZero(l_r.as_mut_ptr()) != 0 || vli_isZero(l_s.as_mut_ptr()) != 0 {
/* r, s must not be 0. */
return 0 as libc::c_int;
}
if vli_cmp(curve_n.as_mut_ptr(), l_r.as_mut_ptr()) != 1 as libc::c_int || vli_cmp(curve_n.as_mut_ptr(), l_s.as_mut_ptr()) != 1 as libc::c_int {
/* r, s must be < n. */
return 0 as libc::c_int;
}
/* Calculate u1 and u2. */
vli_modInv(z.as_mut_ptr(), l_s.as_mut_ptr(), curve_n.as_mut_ptr()); /* Z = s^-1 */
ecc_bytes2native(u1.as_mut_ptr(), p_hash); /* u1 = e/s */
vli_modMult(u1.as_mut_ptr(), u1.as_mut_ptr(), z.as_mut_ptr(), curve_n.as_mut_ptr()); /* u2 = r/s */
vli_modMult(u2.as_mut_ptr(), l_r.as_mut_ptr(), z.as_mut_ptr(), curve_n.as_mut_ptr());
/* Calculate l_sum = G + Q. */
vli_set(l_sum.x.as_mut_ptr(), l_public.x.as_mut_ptr()); /* Z = x2 - x1 */
vli_set(l_sum.y.as_mut_ptr(), l_public.y.as_mut_ptr()); /* Z = 1/Z */
vli_set(tx.as_mut_ptr(), curve_G.x.as_mut_ptr());
vli_set(ty.as_mut_ptr(), curve_G.y.as_mut_ptr());
vli_modSub(z.as_mut_ptr(), l_sum.x.as_mut_ptr(), tx.as_mut_ptr(), curve_p.as_mut_ptr());
XYcZ_add(tx.as_mut_ptr(), ty.as_mut_ptr(), l_sum.x.as_mut_ptr(), l_sum.y.as_mut_ptr());
vli_modInv(z.as_mut_ptr(), z.as_mut_ptr(), curve_p.as_mut_ptr());
apply_z(l_sum.x.as_mut_ptr(), l_sum.y.as_mut_ptr(), z.as_mut_ptr());
/* Use Shamir's trick to calculate u1*G + u2*Q */
let mut l_points: [*mut EccPoint; 4] = [0 as *mut EccPoint, &mut curve_G, &mut l_public, &mut l_sum]; /* Z = x2 - x1 */
let mut l_numBits: uint = umax(vli_numBits(u1.as_mut_ptr()), vli_numBits(u2.as_mut_ptr())); /* Z = 1/Z */
let mut l_point: *mut EccPoint =
l_points[((vli_testBit(u1.as_mut_ptr(), l_numBits.wrapping_sub(1 as libc::c_int as libc::c_uint)) != 0) as libc::c_int | ((vli_testBit(u2.as_mut_ptr(), l_numBits.wrapping_sub(1 as libc::c_int as libc::c_uint)) != 0) as libc::c_int) << 1 as libc::c_int) as usize];
vli_set(rx.as_mut_ptr(), (*l_point).x.as_mut_ptr());
vli_set(ry.as_mut_ptr(), (*l_point).y.as_mut_ptr());
vli_clear(z.as_mut_ptr());
z[0 as libc::c_int as usize] = 1 as libc::c_int as uint64_t;
let mut i: libc::c_int = 0;
i = l_numBits.wrapping_sub(2 as libc::c_int as libc::c_uint) as libc::c_int;
while i >= 0 as libc::c_int {
EccPoint_double_jacobian(rx.as_mut_ptr(), ry.as_mut_ptr(), z.as_mut_ptr());
let mut l_index: libc::c_int = (vli_testBit(u1.as_mut_ptr(), i as uint) != 0) as libc::c_int | ((vli_testBit(u2.as_mut_ptr(), i as uint) != 0) as libc::c_int) << 1 as libc::c_int;
let mut l_point_0: *mut EccPoint = l_points[l_index as usize];
if !l_point_0.is_null() {
vli_set(tx.as_mut_ptr(), (*l_point_0).x.as_mut_ptr());
vli_set(ty.as_mut_ptr(), (*l_point_0).y.as_mut_ptr());
apply_z(tx.as_mut_ptr(), ty.as_mut_ptr(), z.as_mut_ptr());
vli_modSub(tz.as_mut_ptr(), rx.as_mut_ptr(), tx.as_mut_ptr(), curve_p.as_mut_ptr());
XYcZ_add(tx.as_mut_ptr(), ty.as_mut_ptr(), rx.as_mut_ptr(), ry.as_mut_ptr());
vli_modMult_fast(z.as_mut_ptr(), z.as_mut_ptr(), tz.as_mut_ptr());
}
i -= 1
}
vli_modInv(z.as_mut_ptr(), z.as_mut_ptr(), curve_p.as_mut_ptr());
apply_z(rx.as_mut_ptr(), ry.as_mut_ptr(), z.as_mut_ptr());
/* v = x1 (mod n) */
if vli_cmp(curve_n.as_mut_ptr(), rx.as_mut_ptr()) != 1 as libc::c_int {
vli_sub(rx.as_mut_ptr(), rx.as_mut_ptr(), curve_n.as_mut_ptr());
}
/* Accept only if v == r. */
return (vli_cmp(rx.as_mut_ptr(), l_r.as_mut_ptr()) == 0 as libc::c_int) as libc::c_int;
}
#[derive(Clone, PartialEq, Eq)]
pub struct P384PublicKey([u8; 49]);
@ -94,8 +1000,8 @@ mod internal {
impl P384KeyPair {}
}
/*
// Version using OpenSSL's ECC
#[cfg(not(target_feature = "builtin_nist_ecc"))]
mod openssl_based {
use std::convert::TryInto;
use std::os::raw::{c_int, c_ulong, c_void};
@ -298,10 +1204,12 @@ mod openssl_based {
unsafe impl Sync for P384KeyPair {}
}
*/
pub use internal::*;
//pub use openssl_based::*;
#[cfg(target_feature = "builtin_nist_ecc")]
pub use builtin::*;
#[cfg(not(target_feature = "builtin_nist_ecc"))]
pub use openssl_based::*;
#[cfg(test)]
mod tests {

View file

@ -1,903 +0,0 @@
// This is EASY-ECC by Kenneth MacKay
// https://github.com/esxgx/easy-ecc
//
// It inherits the BSD 2-Clause license, not ZeroTier's license.
//
// Translated to Rust using: https://c2rust.com
#![allow(dead_code, mutable_transmutes, non_camel_case_types, non_snake_case, non_upper_case_globals, unused_assignments, unused_mut)]
pub mod libc {
pub type c_uchar = u8;
pub type c_ulong = u64;
pub type c_long = i64;
pub type c_uint = u32;
pub type c_int = i32;
pub type c_ulonglong = u64;
pub type c_longlong = i64;
}
pub type uint8_t = libc::c_uchar;
pub type uint64_t = libc::c_ulong;
pub type uint = libc::c_uint;
pub type uint128_t = u128;
pub struct EccPoint {
pub x: [u64; 6],
pub y: [u64; 6],
}
static mut curve_p: [uint64_t; 6] = [
0xffffffff as libc::c_uint as uint64_t,
0xffffffff00000000 as libc::c_ulong,
0xfffffffffffffffe as libc::c_ulong,
0xffffffffffffffff as libc::c_ulong,
0xffffffffffffffff as libc::c_ulong,
0xffffffffffffffff as libc::c_ulong,
];
static mut curve_b: [uint64_t; 6] = [
0x2a85c8edd3ec2aef as libc::c_long as uint64_t,
0xc656398d8a2ed19d as libc::c_ulong,
0x314088f5013875a as libc::c_long as uint64_t,
0x181d9c6efe814112 as libc::c_long as uint64_t,
0x988e056be3f82d19 as libc::c_ulong,
0xb3312fa7e23ee7e4 as libc::c_ulong,
];
static mut curve_G: EccPoint = {
let mut init = EccPoint {
x: [
0x3a545e3872760ab7 as libc::c_long as uint64_t,
0x5502f25dbf55296c as libc::c_long as uint64_t,
0x59f741e082542a38 as libc::c_long as uint64_t,
0x6e1d3b628ba79b98 as libc::c_long as uint64_t,
0x8eb1c71ef320ad74 as libc::c_ulong,
0xaa87ca22be8b0537 as libc::c_ulong,
],
y: [
0x7a431d7c90ea0e5f as libc::c_long as uint64_t,
0xa60b1ce1d7e819d as libc::c_long as uint64_t,
0xe9da3113b5f0b8c0 as libc::c_ulong,
0xf8f41dbd289a147c as libc::c_ulong,
0x5d9e98bf9292dc29 as libc::c_long as uint64_t,
0x3617de4a96262c6f as libc::c_long as uint64_t,
],
};
init
};
static mut curve_n: [uint64_t; 6] = [
0xecec196accc52973 as libc::c_ulong,
0x581a0db248b0a77a as libc::c_long as uint64_t,
0xc7634d81f4372ddf as libc::c_ulong,
0xffffffffffffffff as libc::c_ulong,
0xffffffffffffffff as libc::c_ulong,
0xffffffffffffffff as libc::c_ulong,
];
#[inline(always)]
unsafe fn getRandomNumber(mut p_vli: *mut uint64_t) -> libc::c_int {
crate::random::fill_bytes_secure(&mut *std::ptr::slice_from_raw_parts_mut(p_vli.cast(), 48));
return 1 as libc::c_int;
}
#[inline(always)]
unsafe fn vli_clear(mut p_vli: *mut uint64_t) {
let mut i: uint = 0;
i = 0 as libc::c_int as uint;
while i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
*p_vli.offset(i as isize) = 0 as libc::c_int as uint64_t;
i = i.wrapping_add(1)
}
}
/* Returns 1 if p_vli == 0, 0 otherwise. */
#[inline(always)]
unsafe fn vli_isZero(mut p_vli: *mut uint64_t) -> libc::c_int {
let mut i: uint = 0;
i = 0 as libc::c_int as uint;
while i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
if *p_vli.offset(i as isize) != 0 {
return 0 as libc::c_int;
}
i = i.wrapping_add(1)
}
return 1 as libc::c_int;
}
/* Returns nonzero if bit p_bit of p_vli is set. */
#[inline(always)]
unsafe fn vli_testBit(mut p_vli: *mut uint64_t, mut p_bit: uint) -> uint64_t {
return *p_vli.offset(p_bit.wrapping_div(64 as libc::c_int as libc::c_uint) as isize) & (1 as libc::c_int as uint64_t) << p_bit.wrapping_rem(64 as libc::c_int as libc::c_uint);
}
/* Counts the number of 64-bit "digits" in p_vli. */
#[inline(always)]
unsafe fn vli_numDigits(mut p_vli: *mut uint64_t) -> uint {
let mut i: libc::c_int = 0;
/* Search from the end until we find a non-zero digit.
We do it in reverse because we expect that most digits will be nonzero. */
i = 48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int;
while i >= 0 as libc::c_int && *p_vli.offset(i as isize) == 0 as libc::c_int as libc::c_ulong {
i -= 1
}
return (i + 1 as libc::c_int) as uint;
}
/* Counts the number of bits required for p_vli. */
#[inline(always)]
unsafe fn vli_numBits(mut p_vli: *mut uint64_t) -> uint {
let mut i: uint = 0;
let mut l_digit: uint64_t = 0;
let mut l_numDigits: uint = vli_numDigits(p_vli);
if l_numDigits == 0 as libc::c_int as libc::c_uint {
return 0 as libc::c_int as uint;
}
l_digit = *p_vli.offset(l_numDigits.wrapping_sub(1 as libc::c_int as libc::c_uint) as isize);
i = 0 as libc::c_int as uint;
while l_digit != 0 {
l_digit >>= 1 as libc::c_int;
i = i.wrapping_add(1)
}
return l_numDigits.wrapping_sub(1 as libc::c_int as libc::c_uint).wrapping_mul(64 as libc::c_int as libc::c_uint).wrapping_add(i);
}
/* Sets p_dest = p_src. */
#[inline(always)]
unsafe fn vli_set(mut p_dest: *mut uint64_t, mut p_src: *mut uint64_t) {
let mut i: uint = 0;
i = 0 as libc::c_int as uint;
while i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
*p_dest.offset(i as isize) = *p_src.offset(i as isize);
i = i.wrapping_add(1)
}
}
/* Returns sign of p_left - p_right. */
#[inline(always)]
unsafe fn vli_cmp(mut p_left: *mut uint64_t, mut p_right: *mut uint64_t) -> libc::c_int {
let mut i: libc::c_int = 0;
i = 48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int;
while i >= 0 as libc::c_int {
if *p_left.offset(i as isize) > *p_right.offset(i as isize) {
return 1 as libc::c_int;
} else {
if *p_left.offset(i as isize) < *p_right.offset(i as isize) {
return -(1 as libc::c_int);
}
}
i -= 1
}
return 0 as libc::c_int;
}
/* Computes p_result = p_in << c, returning carry. Can modify in place (if p_result == p_in). 0 < p_shift < 64. */
#[inline(always)]
unsafe fn vli_lshift(mut p_result: *mut uint64_t, mut p_in: *mut uint64_t, mut p_shift: uint) -> uint64_t {
let mut l_carry: uint64_t = 0 as libc::c_int as uint64_t;
let mut i: uint = 0;
i = 0 as libc::c_int as uint;
while i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
let mut l_temp: uint64_t = *p_in.offset(i as isize);
*p_result.offset(i as isize) = l_temp << p_shift | l_carry;
l_carry = l_temp >> (64 as libc::c_int as libc::c_uint).wrapping_sub(p_shift);
i = i.wrapping_add(1)
}
return l_carry;
}
/* Computes p_vli = p_vli >> 1. */
#[inline(always)]
unsafe fn vli_rshift1(mut p_vli: *mut uint64_t) {
let mut l_end: *mut uint64_t = p_vli;
let mut l_carry: uint64_t = 0 as libc::c_int as uint64_t;
p_vli = p_vli.offset((48 as libc::c_int / 8 as libc::c_int) as isize);
loop {
let fresh0 = p_vli;
p_vli = p_vli.offset(-1);
if !(fresh0 > l_end) {
break;
}
let mut l_temp: uint64_t = *p_vli;
*p_vli = l_temp >> 1 as libc::c_int | l_carry;
l_carry = l_temp << 63 as libc::c_int
}
}
/* Computes p_result = p_left + p_right, returning carry. Can modify in place. */
#[inline(always)]
unsafe fn vli_add(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t, mut p_right: *mut uint64_t) -> uint64_t {
let mut l_carry: uint64_t = 0 as libc::c_int as uint64_t;
let mut i: uint = 0;
i = 0 as libc::c_int as uint;
while i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
let mut l_sum: uint64_t = (*p_left.offset(i as isize)).wrapping_add(*p_right.offset(i as isize)).wrapping_add(l_carry);
if l_sum != *p_left.offset(i as isize) {
l_carry = (l_sum < *p_left.offset(i as isize)) as libc::c_int as uint64_t
}
*p_result.offset(i as isize) = l_sum;
i = i.wrapping_add(1)
}
return l_carry;
}
/* Computes p_result = p_left - p_right, returning borrow. Can modify in place. */
#[inline(always)]
unsafe fn vli_sub(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t, mut p_right: *mut uint64_t) -> uint64_t {
let mut l_borrow: uint64_t = 0 as libc::c_int as uint64_t;
let mut i: uint = 0;
i = 0 as libc::c_int as uint;
while i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
let mut l_diff: uint64_t = (*p_left.offset(i as isize)).wrapping_sub(*p_right.offset(i as isize)).wrapping_sub(l_borrow);
if l_diff != *p_left.offset(i as isize) {
l_borrow = (l_diff > *p_left.offset(i as isize)) as libc::c_int as uint64_t
}
*p_result.offset(i as isize) = l_diff;
i = i.wrapping_add(1)
}
return l_borrow;
}
/* Computes p_result = p_left * p_right. */
#[inline(always)]
unsafe fn vli_mult(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t, mut p_right: *mut uint64_t) {
let mut r01: uint128_t = 0 as libc::c_int as uint128_t;
let mut r2: uint64_t = 0 as libc::c_int as uint64_t;
let mut i: uint = 0;
let mut k: uint = 0;
/* Compute each digit of p_result in sequence, maintaining the carries. */
k = 0 as libc::c_int as uint;
while k < (48 as libc::c_int / 8 as libc::c_int * 2 as libc::c_int - 1 as libc::c_int) as libc::c_uint {
let mut l_min: uint = if k < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
0 as libc::c_int as libc::c_uint
} else {
k.wrapping_add(1 as libc::c_int as libc::c_uint).wrapping_sub((48 as libc::c_int / 8 as libc::c_int) as libc::c_uint)
};
i = l_min;
while i <= k && i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
let mut l_product: uint128_t = (*p_left.offset(i as isize) as uint128_t).wrapping_mul(*p_right.offset(k.wrapping_sub(i) as isize) as u128);
r01 = (r01 as u128).wrapping_add(l_product) as uint128_t as uint128_t;
r2 = (r2 as libc::c_ulong).wrapping_add((r01 < l_product) as libc::c_int as libc::c_ulong) as uint64_t as uint64_t;
i = i.wrapping_add(1)
}
*p_result.offset(k as isize) = r01 as uint64_t;
r01 = r01 >> 64 as libc::c_int | (r2 as uint128_t) << 64 as libc::c_int;
r2 = 0 as libc::c_int as uint64_t;
k = k.wrapping_add(1)
}
*p_result.offset((48 as libc::c_int / 8 as libc::c_int * 2 as libc::c_int - 1 as libc::c_int) as isize) = r01 as uint64_t;
}
/* Computes p_result = p_left^2. */
#[inline(always)]
unsafe fn vli_square(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t) {
let mut r01: uint128_t = 0 as libc::c_int as uint128_t;
let mut r2: uint64_t = 0 as libc::c_int as uint64_t;
let mut i: uint = 0;
let mut k: uint = 0;
k = 0 as libc::c_int as uint;
while k < (48 as libc::c_int / 8 as libc::c_int * 2 as libc::c_int - 1 as libc::c_int) as libc::c_uint {
let mut l_min: uint = if k < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
0 as libc::c_int as libc::c_uint
} else {
k.wrapping_add(1 as libc::c_int as libc::c_uint).wrapping_sub((48 as libc::c_int / 8 as libc::c_int) as libc::c_uint)
};
i = l_min;
while i <= k && i <= k.wrapping_sub(i) {
let mut l_product: uint128_t = (*p_left.offset(i as isize) as uint128_t).wrapping_mul(*p_left.offset(k.wrapping_sub(i) as isize) as u128);
if i < k.wrapping_sub(i) {
r2 = (r2 as u128).wrapping_add(l_product >> 127 as libc::c_int) as uint64_t as uint64_t;
l_product = (l_product as u128).wrapping_mul(2 as libc::c_int as u128) as uint128_t as uint128_t
}
r01 = (r01 as u128).wrapping_add(l_product) as uint128_t as uint128_t;
r2 = (r2 as libc::c_ulong).wrapping_add((r01 < l_product) as libc::c_int as libc::c_ulong) as uint64_t as uint64_t;
i = i.wrapping_add(1)
}
*p_result.offset(k as isize) = r01 as uint64_t;
r01 = r01 >> 64 as libc::c_int | (r2 as uint128_t) << 64 as libc::c_int;
r2 = 0 as libc::c_int as uint64_t;
k = k.wrapping_add(1)
}
*p_result.offset((48 as libc::c_int / 8 as libc::c_int * 2 as libc::c_int - 1 as libc::c_int) as isize) = r01 as uint64_t;
}
/* #if SUPPORTS_INT128 */
/* SUPPORTS_INT128 */
/* Computes p_result = (p_left + p_right) % p_mod.
Assumes that p_left < p_mod and p_right < p_mod, p_result != p_mod. */
#[inline(always)]
unsafe fn vli_modAdd(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t, mut p_right: *mut uint64_t, mut p_mod: *mut uint64_t) {
let mut l_carry: uint64_t = vli_add(p_result, p_left, p_right);
if l_carry != 0 || vli_cmp(p_result, p_mod) >= 0 as libc::c_int {
/* p_result > p_mod (p_result = p_mod + remainder), so subtract p_mod to get remainder. */
vli_sub(p_result, p_result, p_mod);
};
}
/* Computes p_result = (p_left - p_right) % p_mod.
Assumes that p_left < p_mod and p_right < p_mod, p_result != p_mod. */
#[inline(always)]
unsafe fn vli_modSub(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t, mut p_right: *mut uint64_t, mut p_mod: *mut uint64_t) {
let mut l_borrow: uint64_t = vli_sub(p_result, p_left, p_right);
if l_borrow != 0 {
/* In this case, p_result == -diff == (max int) - diff.
Since -x % d == d - x, we can get the correct result from p_result + p_mod (with overflow). */
vli_add(p_result, p_result, p_mod);
};
}
//#elif ECC_CURVE == secp384r1
#[inline(always)]
unsafe fn omega_mult(mut p_result: *mut uint64_t, mut p_right: *mut uint64_t) {
let mut l_tmp: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_carry: uint64_t = 0;
let mut l_diff: uint64_t = 0;
/* Multiply by (2^128 + 2^96 - 2^32 + 1). */
vli_set(p_result, p_right); /* 1 */
l_carry = vli_lshift(l_tmp.as_mut_ptr(), p_right, 32 as libc::c_int as uint); /* 2^96 + 1 */
*p_result.offset((1 as libc::c_int + 48 as libc::c_int / 8 as libc::c_int) as isize) = l_carry.wrapping_add(vli_add(p_result.offset(1 as libc::c_int as isize), p_result.offset(1 as libc::c_int as isize), l_tmp.as_mut_ptr())); /* 2^128 + 2^96 + 1 */
*p_result.offset((2 as libc::c_int + 48 as libc::c_int / 8 as libc::c_int) as isize) = vli_add(p_result.offset(2 as libc::c_int as isize), p_result.offset(2 as libc::c_int as isize), p_right); /* 2^128 + 2^96 - 2^32 + 1 */
l_carry = (l_carry as libc::c_ulong).wrapping_add(vli_sub(p_result, p_result, l_tmp.as_mut_ptr())) as uint64_t as uint64_t;
l_diff = (*p_result.offset((48 as libc::c_int / 8 as libc::c_int) as isize)).wrapping_sub(l_carry);
if l_diff > *p_result.offset((48 as libc::c_int / 8 as libc::c_int) as isize) {
/* Propagate borrow if necessary. */
let mut i: uint = 0;
i = (1 as libc::c_int + 48 as libc::c_int / 8 as libc::c_int) as uint;
loop {
let ref mut fresh1 = *p_result.offset(i as isize);
*fresh1 = (*fresh1).wrapping_sub(1);
if *p_result.offset(i as isize) != -(1 as libc::c_int) as uint64_t {
break;
}
i = i.wrapping_add(1)
}
}
*p_result.offset((48 as libc::c_int / 8 as libc::c_int) as isize) = l_diff;
}
/* Computes p_result = p_product % curve_p
see PDF "Comparing Elliptic Curve Cryptography and RSA on 8-bit CPUs"
section "Curve-Specific Optimizations" */
#[inline(always)]
unsafe fn vli_mmod_fast(mut p_result: *mut uint64_t, mut p_product: *mut uint64_t) {
let mut l_tmp: [uint64_t; 12] = std::mem::MaybeUninit::uninit().assume_init();
while vli_isZero(p_product.offset((48 as libc::c_int / 8 as libc::c_int) as isize)) == 0 {
/* While c1 != 0 */
let mut l_carry: uint64_t = 0 as libc::c_int as uint64_t; /* tmp = w * c1 */
let mut i: uint = 0; /* p = c0 */
vli_clear(l_tmp.as_mut_ptr());
vli_clear(l_tmp.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize));
omega_mult(l_tmp.as_mut_ptr(), p_product.offset((48 as libc::c_int / 8 as libc::c_int) as isize));
vli_clear(p_product.offset((48 as libc::c_int / 8 as libc::c_int) as isize));
/* (c1, c0) = c0 + w * c1 */
i = 0 as libc::c_int as uint;
while i < (48 as libc::c_int / 8 as libc::c_int + 3 as libc::c_int) as libc::c_uint {
let mut l_sum: uint64_t = (*p_product.offset(i as isize)).wrapping_add(l_tmp[i as usize]).wrapping_add(l_carry);
if l_sum != *p_product.offset(i as isize) {
l_carry = (l_sum < *p_product.offset(i as isize)) as libc::c_int as uint64_t
}
*p_product.offset(i as isize) = l_sum;
i = i.wrapping_add(1)
}
}
while vli_cmp(p_product, curve_p.as_mut_ptr()) > 0 as libc::c_int {
vli_sub(p_product, p_product, curve_p.as_mut_ptr());
}
vli_set(p_result, p_product);
}
//#endif
/* Computes p_result = (p_left * p_right) % curve_p. */
#[inline(always)]
unsafe fn vli_modMult_fast(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t, mut p_right: *mut uint64_t) {
let mut l_product: [uint64_t; 12] = std::mem::MaybeUninit::uninit().assume_init();
vli_mult(l_product.as_mut_ptr(), p_left, p_right);
vli_mmod_fast(p_result, l_product.as_mut_ptr());
}
/* Computes p_result = p_left^2 % curve_p. */
#[inline(always)]
unsafe fn vli_modSquare_fast(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t) {
let mut l_product: [uint64_t; 12] = std::mem::MaybeUninit::uninit().assume_init();
vli_square(l_product.as_mut_ptr(), p_left);
vli_mmod_fast(p_result, l_product.as_mut_ptr());
}
/* Computes p_result = (1 / p_input) % p_mod. All VLIs are the same size.
See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf */
#[inline(always)]
unsafe fn vli_modInv(mut p_result: *mut uint64_t, mut p_input: *mut uint64_t, mut p_mod: *mut uint64_t) {
let mut a: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut b: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut u: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut v: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_carry: uint64_t = 0;
let mut l_cmpResult: libc::c_int = 0;
if vli_isZero(p_input) != 0 {
vli_clear(p_result);
return;
}
vli_set(a.as_mut_ptr(), p_input);
vli_set(b.as_mut_ptr(), p_mod);
vli_clear(u.as_mut_ptr());
u[0 as libc::c_int as usize] = 1 as libc::c_int as uint64_t;
vli_clear(v.as_mut_ptr());
loop {
l_cmpResult = vli_cmp(a.as_mut_ptr(), b.as_mut_ptr());
if !(l_cmpResult != 0 as libc::c_int) {
break;
}
l_carry = 0 as libc::c_int as uint64_t;
if a[0 as libc::c_int as usize] & 1 as libc::c_int as libc::c_ulong == 0 {
vli_rshift1(a.as_mut_ptr());
if u[0 as libc::c_int as usize] & 1 as libc::c_int as libc::c_ulong != 0 {
l_carry = vli_add(u.as_mut_ptr(), u.as_mut_ptr(), p_mod)
}
vli_rshift1(u.as_mut_ptr());
if l_carry != 0 {
u[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] = (u[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] as libc::c_ulonglong | 0x8000000000000000 as libc::c_ulonglong) as uint64_t
}
} else if b[0 as libc::c_int as usize] & 1 as libc::c_int as libc::c_ulong == 0 {
vli_rshift1(b.as_mut_ptr());
if v[0 as libc::c_int as usize] & 1 as libc::c_int as libc::c_ulong != 0 {
l_carry = vli_add(v.as_mut_ptr(), v.as_mut_ptr(), p_mod)
}
vli_rshift1(v.as_mut_ptr());
if l_carry != 0 {
v[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] = (v[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] as libc::c_ulonglong | 0x8000000000000000 as libc::c_ulonglong) as uint64_t
}
} else if l_cmpResult > 0 as libc::c_int {
vli_sub(a.as_mut_ptr(), a.as_mut_ptr(), b.as_mut_ptr());
vli_rshift1(a.as_mut_ptr());
if vli_cmp(u.as_mut_ptr(), v.as_mut_ptr()) < 0 as libc::c_int {
vli_add(u.as_mut_ptr(), u.as_mut_ptr(), p_mod);
}
vli_sub(u.as_mut_ptr(), u.as_mut_ptr(), v.as_mut_ptr());
if u[0 as libc::c_int as usize] & 1 as libc::c_int as libc::c_ulong != 0 {
l_carry = vli_add(u.as_mut_ptr(), u.as_mut_ptr(), p_mod)
}
vli_rshift1(u.as_mut_ptr());
if l_carry != 0 {
u[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] = (u[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] as libc::c_ulonglong | 0x8000000000000000 as libc::c_ulonglong) as uint64_t
}
} else {
vli_sub(b.as_mut_ptr(), b.as_mut_ptr(), a.as_mut_ptr());
vli_rshift1(b.as_mut_ptr());
if vli_cmp(v.as_mut_ptr(), u.as_mut_ptr()) < 0 as libc::c_int {
vli_add(v.as_mut_ptr(), v.as_mut_ptr(), p_mod);
}
vli_sub(v.as_mut_ptr(), v.as_mut_ptr(), u.as_mut_ptr());
if v[0 as libc::c_int as usize] & 1 as libc::c_int as libc::c_ulong != 0 {
l_carry = vli_add(v.as_mut_ptr(), v.as_mut_ptr(), p_mod)
}
vli_rshift1(v.as_mut_ptr());
if l_carry != 0 {
v[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] = (v[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] as libc::c_ulonglong | 0x8000000000000000 as libc::c_ulonglong) as uint64_t
}
}
}
vli_set(p_result, u.as_mut_ptr());
}
/* ------ Point operations ------ */
/* Returns 1 if p_point is the point at infinity, 0 otherwise. */
#[inline(always)]
unsafe fn EccPoint_isZero(mut p_point: *mut EccPoint) -> libc::c_int {
return (vli_isZero((*p_point).x.as_mut_ptr()) != 0 && vli_isZero((*p_point).y.as_mut_ptr()) != 0) as libc::c_int;
}
/* Point multiplication algorithm using Montgomery's ladder with co-Z coordinates.
From http://eprint.iacr.org/2011/338.pdf
*/
/* Double in place */
#[inline(always)]
unsafe fn EccPoint_double_jacobian(mut X1: *mut uint64_t, mut Y1: *mut uint64_t, mut Z1: *mut uint64_t) {
/* t1 = X, t2 = Y, t3 = Z */
let mut t4: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init(); /* t4 = y1^2 */
let mut t5: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init(); /* t5 = x1*y1^2 = A */
if vli_isZero(Z1) != 0 {
return;
} /* t4 = y1^4 */
vli_modSquare_fast(t4.as_mut_ptr(), Y1); /* t2 = y1*z1 = z3 */
vli_modMult_fast(t5.as_mut_ptr(), X1, t4.as_mut_ptr()); /* t3 = z1^2 */
vli_modSquare_fast(t4.as_mut_ptr(), t4.as_mut_ptr()); /* t1 = x1 + z1^2 */
vli_modMult_fast(Y1, Y1, Z1); /* t3 = 2*z1^2 */
vli_modSquare_fast(Z1, Z1); /* t3 = x1 - z1^2 */
vli_modAdd(X1, X1, Z1, curve_p.as_mut_ptr()); /* t1 = x1^2 - z1^4 */
vli_modAdd(Z1, Z1, Z1, curve_p.as_mut_ptr()); /* t3 = 2*(x1^2 - z1^4) */
vli_modSub(Z1, X1, Z1, curve_p.as_mut_ptr()); /* t1 = 3*(x1^2 - z1^4) */
vli_modMult_fast(X1, X1, Z1);
vli_modAdd(Z1, X1, X1, curve_p.as_mut_ptr());
vli_modAdd(X1, X1, Z1, curve_p.as_mut_ptr());
if vli_testBit(X1, 0 as libc::c_int as uint) != 0 {
let mut l_carry: uint64_t = vli_add(X1, X1, curve_p.as_mut_ptr());
vli_rshift1(X1);
let ref mut fresh2 = *X1.offset((48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as isize);
*fresh2 |= l_carry << 63 as libc::c_int
} else {
vli_rshift1(X1);
}
/* t1 = 3/2*(x1^2 - z1^4) = B */
vli_modSquare_fast(Z1, X1); /* t3 = B^2 */
vli_modSub(Z1, Z1, t5.as_mut_ptr(), curve_p.as_mut_ptr()); /* t3 = B^2 - A */
vli_modSub(Z1, Z1, t5.as_mut_ptr(), curve_p.as_mut_ptr()); /* t3 = B^2 - 2A = x3 */
vli_modSub(t5.as_mut_ptr(), t5.as_mut_ptr(), Z1, curve_p.as_mut_ptr()); /* t5 = A - x3 */
vli_modMult_fast(X1, X1, t5.as_mut_ptr()); /* t1 = B * (A - x3) */
vli_modSub(t4.as_mut_ptr(), X1, t4.as_mut_ptr(), curve_p.as_mut_ptr()); /* t4 = B * (A - x3) - y1^4 = y3 */
vli_set(X1, Z1);
vli_set(Z1, Y1);
vli_set(Y1, t4.as_mut_ptr());
}
/* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
#[inline(always)]
unsafe fn apply_z(mut X1: *mut uint64_t, mut Y1: *mut uint64_t, mut Z: *mut uint64_t) {
let mut t1: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init(); /* z^2 */
vli_modSquare_fast(t1.as_mut_ptr(), Z); /* x1 * z^2 */
vli_modMult_fast(X1, X1, t1.as_mut_ptr()); /* z^3 */
vli_modMult_fast(t1.as_mut_ptr(), t1.as_mut_ptr(), Z);
vli_modMult_fast(Y1, Y1, t1.as_mut_ptr());
/* y1 * z^3 */
}
/* P = (x1, y1) => 2P, (x2, y2) => P' */
#[inline(always)]
unsafe fn XYcZ_initial_double(mut X1: *mut uint64_t, mut Y1: *mut uint64_t, mut X2: *mut uint64_t, mut Y2: *mut uint64_t, mut p_initialZ: *mut uint64_t) {
let mut z: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
vli_set(X2, X1);
vli_set(Y2, Y1);
vli_clear(z.as_mut_ptr());
z[0 as libc::c_int as usize] = 1 as libc::c_int as uint64_t;
if !p_initialZ.is_null() {
vli_set(z.as_mut_ptr(), p_initialZ);
}
apply_z(X1, Y1, z.as_mut_ptr());
EccPoint_double_jacobian(X1, Y1, z.as_mut_ptr());
apply_z(X2, Y2, z.as_mut_ptr());
}
/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
or P => P', Q => P + Q
*/
#[inline(always)]
unsafe fn XYcZ_add(mut X1: *mut uint64_t, mut Y1: *mut uint64_t, mut X2: *mut uint64_t, mut Y2: *mut uint64_t) {
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
let mut t5: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init(); /* t5 = x2 - x1 */
vli_modSub(t5.as_mut_ptr(), X2, X1, curve_p.as_mut_ptr()); /* t5 = (x2 - x1)^2 = A */
vli_modSquare_fast(t5.as_mut_ptr(), t5.as_mut_ptr()); /* t1 = x1*A = B */
vli_modMult_fast(X1, X1, t5.as_mut_ptr()); /* t3 = x2*A = C */
vli_modMult_fast(X2, X2, t5.as_mut_ptr()); /* t4 = y2 - y1 */
vli_modSub(Y2, Y2, Y1, curve_p.as_mut_ptr()); /* t5 = (y2 - y1)^2 = D */
vli_modSquare_fast(t5.as_mut_ptr(), Y2); /* t5 = D - B */
vli_modSub(t5.as_mut_ptr(), t5.as_mut_ptr(), X1, curve_p.as_mut_ptr()); /* t5 = D - B - C = x3 */
vli_modSub(t5.as_mut_ptr(), t5.as_mut_ptr(), X2, curve_p.as_mut_ptr()); /* t3 = C - B */
vli_modSub(X2, X2, X1, curve_p.as_mut_ptr()); /* t2 = y1*(C - B) */
vli_modMult_fast(Y1, Y1, X2); /* t3 = B - x3 */
vli_modSub(X2, X1, t5.as_mut_ptr(), curve_p.as_mut_ptr()); /* t4 = (y2 - y1)*(B - x3) */
vli_modMult_fast(Y2, Y2, X2); /* t4 = y3 */
vli_modSub(Y2, Y2, Y1, curve_p.as_mut_ptr());
vli_set(X2, t5.as_mut_ptr());
}
/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
or P => P - Q, Q => P + Q
*/
#[inline(always)]
unsafe fn XYcZ_addC(mut X1: *mut uint64_t, mut Y1: *mut uint64_t, mut X2: *mut uint64_t, mut Y2: *mut uint64_t) {
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
let mut t5: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init(); /* t5 = x2 - x1 */
let mut t6: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init(); /* t5 = (x2 - x1)^2 = A */
let mut t7: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init(); /* t1 = x1*A = B */
vli_modSub(t5.as_mut_ptr(), X2, X1, curve_p.as_mut_ptr()); /* t3 = x2*A = C */
vli_modSquare_fast(t5.as_mut_ptr(), t5.as_mut_ptr()); /* t4 = y2 + y1 */
vli_modMult_fast(X1, X1, t5.as_mut_ptr()); /* t4 = y2 - y1 */
vli_modMult_fast(X2, X2, t5.as_mut_ptr()); /* t6 = C - B */
vli_modAdd(t5.as_mut_ptr(), Y2, Y1, curve_p.as_mut_ptr()); /* t2 = y1 * (C - B) */
vli_modSub(Y2, Y2, Y1, curve_p.as_mut_ptr()); /* t6 = B + C */
vli_modSub(t6.as_mut_ptr(), X2, X1, curve_p.as_mut_ptr()); /* t3 = (y2 - y1)^2 */
vli_modMult_fast(Y1, Y1, t6.as_mut_ptr()); /* t3 = x3 */
vli_modAdd(t6.as_mut_ptr(), X1, X2, curve_p.as_mut_ptr()); /* t7 = B - x3 */
vli_modSquare_fast(X2, Y2); /* t4 = (y2 - y1)*(B - x3) */
vli_modSub(X2, X2, t6.as_mut_ptr(), curve_p.as_mut_ptr()); /* t4 = y3 */
vli_modSub(t7.as_mut_ptr(), X1, X2, curve_p.as_mut_ptr()); /* t7 = (y2 + y1)^2 = F */
vli_modMult_fast(Y2, Y2, t7.as_mut_ptr()); /* t7 = x3' */
vli_modSub(Y2, Y2, Y1, curve_p.as_mut_ptr()); /* t6 = x3' - B */
vli_modSquare_fast(t7.as_mut_ptr(), t5.as_mut_ptr()); /* t6 = (y2 + y1)*(x3' - B) */
vli_modSub(t7.as_mut_ptr(), t7.as_mut_ptr(), t6.as_mut_ptr(), curve_p.as_mut_ptr()); /* t2 = y3' */
vli_modSub(t6.as_mut_ptr(), t7.as_mut_ptr(), X1, curve_p.as_mut_ptr());
vli_modMult_fast(t6.as_mut_ptr(), t6.as_mut_ptr(), t5.as_mut_ptr());
vli_modSub(Y1, t6.as_mut_ptr(), Y1, curve_p.as_mut_ptr());
vli_set(X1, t7.as_mut_ptr());
}
#[inline(always)]
unsafe fn EccPoint_mult(mut p_result: *mut EccPoint, mut p_point: *mut EccPoint, mut p_scalar: *mut uint64_t, mut p_initialZ: *mut uint64_t) {
/* R0 and R1 */
let mut Rx: [[uint64_t; 6]; 2] = std::mem::MaybeUninit::uninit().assume_init();
let mut Ry: [[uint64_t; 6]; 2] = std::mem::MaybeUninit::uninit().assume_init();
let mut z: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut i: libc::c_int = 0;
let mut nb: libc::c_int = 0;
vli_set(Rx[1 as libc::c_int as usize].as_mut_ptr(), (*p_point).x.as_mut_ptr());
vli_set(Ry[1 as libc::c_int as usize].as_mut_ptr(), (*p_point).y.as_mut_ptr());
XYcZ_initial_double(
Rx[1 as libc::c_int as usize].as_mut_ptr(),
Ry[1 as libc::c_int as usize].as_mut_ptr(),
Rx[0 as libc::c_int as usize].as_mut_ptr(),
Ry[0 as libc::c_int as usize].as_mut_ptr(),
p_initialZ,
);
i = vli_numBits(p_scalar).wrapping_sub(2 as libc::c_int as libc::c_uint) as libc::c_int;
while i > 0 as libc::c_int {
nb = (vli_testBit(p_scalar, i as uint) == 0) as libc::c_int;
XYcZ_addC(Rx[(1 as libc::c_int - nb) as usize].as_mut_ptr(), Ry[(1 as libc::c_int - nb) as usize].as_mut_ptr(), Rx[nb as usize].as_mut_ptr(), Ry[nb as usize].as_mut_ptr());
XYcZ_add(Rx[nb as usize].as_mut_ptr(), Ry[nb as usize].as_mut_ptr(), Rx[(1 as libc::c_int - nb) as usize].as_mut_ptr(), Ry[(1 as libc::c_int - nb) as usize].as_mut_ptr());
i -= 1
}
nb = (vli_testBit(p_scalar, 0 as libc::c_int as uint) == 0) as libc::c_int;
XYcZ_addC(Rx[(1 as libc::c_int - nb) as usize].as_mut_ptr(), Ry[(1 as libc::c_int - nb) as usize].as_mut_ptr(), Rx[nb as usize].as_mut_ptr(), Ry[nb as usize].as_mut_ptr());
/* Find final 1/Z value. */
vli_modSub(z.as_mut_ptr(), Rx[1 as libc::c_int as usize].as_mut_ptr(), Rx[0 as libc::c_int as usize].as_mut_ptr(), curve_p.as_mut_ptr()); /* X1 - X0 */
vli_modMult_fast(z.as_mut_ptr(), z.as_mut_ptr(), Ry[(1 as libc::c_int - nb) as usize].as_mut_ptr()); /* Yb * (X1 - X0) */
vli_modMult_fast(z.as_mut_ptr(), z.as_mut_ptr(), (*p_point).x.as_mut_ptr()); /* xP * Yb * (X1 - X0) */
vli_modInv(z.as_mut_ptr(), z.as_mut_ptr(), curve_p.as_mut_ptr()); /* 1 / (xP * Yb * (X1 - X0)) */
vli_modMult_fast(z.as_mut_ptr(), z.as_mut_ptr(), (*p_point).y.as_mut_ptr()); /* yP / (xP * Yb * (X1 - X0)) */
vli_modMult_fast(z.as_mut_ptr(), z.as_mut_ptr(), Rx[(1 as libc::c_int - nb) as usize].as_mut_ptr()); /* Xb * yP / (xP * Yb * (X1 - X0)) */
/* End 1/Z calculation */
XYcZ_add(Rx[nb as usize].as_mut_ptr(), Ry[nb as usize].as_mut_ptr(), Rx[(1 as libc::c_int - nb) as usize].as_mut_ptr(), Ry[(1 as libc::c_int - nb) as usize].as_mut_ptr());
apply_z(Rx[0 as libc::c_int as usize].as_mut_ptr(), Ry[0 as libc::c_int as usize].as_mut_ptr(), z.as_mut_ptr());
vli_set((*p_result).x.as_mut_ptr(), Rx[0 as libc::c_int as usize].as_mut_ptr());
vli_set((*p_result).y.as_mut_ptr(), Ry[0 as libc::c_int as usize].as_mut_ptr());
}
#[inline(always)]
unsafe fn ecc_bytes2native(mut p_native: *mut uint64_t, mut p_bytes: *const uint8_t) {
let mut i: libc::c_uint = 0;
i = 0 as libc::c_int as libc::c_uint;
while i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
let mut p_digit: *const uint8_t = p_bytes.offset((8 as libc::c_int as libc::c_uint).wrapping_mul(((48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as libc::c_uint).wrapping_sub(i)) as isize);
*p_native.offset(i as isize) = (*p_digit.offset(0 as libc::c_int as isize) as uint64_t) << 56 as libc::c_int
| (*p_digit.offset(1 as libc::c_int as isize) as uint64_t) << 48 as libc::c_int
| (*p_digit.offset(2 as libc::c_int as isize) as uint64_t) << 40 as libc::c_int
| (*p_digit.offset(3 as libc::c_int as isize) as uint64_t) << 32 as libc::c_int
| (*p_digit.offset(4 as libc::c_int as isize) as uint64_t) << 24 as libc::c_int
| (*p_digit.offset(5 as libc::c_int as isize) as uint64_t) << 16 as libc::c_int
| (*p_digit.offset(6 as libc::c_int as isize) as uint64_t) << 8 as libc::c_int
| *p_digit.offset(7 as libc::c_int as isize) as uint64_t;
i = i.wrapping_add(1)
}
}
#[inline(always)]
unsafe fn ecc_native2bytes(mut p_bytes: *mut uint8_t, mut p_native: *const uint64_t) {
let mut i: libc::c_uint = 0;
i = 0 as libc::c_int as libc::c_uint;
while i < (48 as libc::c_int / 8 as libc::c_int) as libc::c_uint {
let mut p_digit: *mut uint8_t = p_bytes.offset((8 as libc::c_int as libc::c_uint).wrapping_mul(((48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as libc::c_uint).wrapping_sub(i)) as isize);
*p_digit.offset(0 as libc::c_int as isize) = (*p_native.offset(i as isize) >> 56 as libc::c_int) as uint8_t;
*p_digit.offset(1 as libc::c_int as isize) = (*p_native.offset(i as isize) >> 48 as libc::c_int) as uint8_t;
*p_digit.offset(2 as libc::c_int as isize) = (*p_native.offset(i as isize) >> 40 as libc::c_int) as uint8_t;
*p_digit.offset(3 as libc::c_int as isize) = (*p_native.offset(i as isize) >> 32 as libc::c_int) as uint8_t;
*p_digit.offset(4 as libc::c_int as isize) = (*p_native.offset(i as isize) >> 24 as libc::c_int) as uint8_t;
*p_digit.offset(5 as libc::c_int as isize) = (*p_native.offset(i as isize) >> 16 as libc::c_int) as uint8_t;
*p_digit.offset(6 as libc::c_int as isize) = (*p_native.offset(i as isize) >> 8 as libc::c_int) as uint8_t;
*p_digit.offset(7 as libc::c_int as isize) = *p_native.offset(i as isize) as uint8_t;
i = i.wrapping_add(1)
}
}
/* Compute a = sqrt(a) (mod curve_p). */
#[inline(always)]
unsafe fn mod_sqrt(mut a: *mut uint64_t) {
let mut i: libc::c_uint = 0;
let mut p1: [uint64_t; 6] = [1 as libc::c_int as uint64_t, 0, 0, 0, 0, 0];
let mut l_result: [uint64_t; 6] = [1 as libc::c_int as uint64_t, 0, 0, 0, 0, 0];
/* Since curve_p == 3 (mod 4) for all supported curves, we can
compute sqrt(a) = a^((curve_p + 1) / 4) (mod curve_p). */
vli_add(p1.as_mut_ptr(), curve_p.as_mut_ptr(), p1.as_mut_ptr()); /* p1 = curve_p + 1 */
i = vli_numBits(p1.as_mut_ptr()).wrapping_sub(1 as libc::c_int as libc::c_uint); /* -a = 3 */
while i > 1 as libc::c_int as libc::c_uint {
vli_modSquare_fast(l_result.as_mut_ptr(), l_result.as_mut_ptr()); /* y = x^2 */
if vli_testBit(p1.as_mut_ptr(), i) != 0 {
vli_modMult_fast(l_result.as_mut_ptr(), l_result.as_mut_ptr(), a); /* y = x^2 - 3 */
} /* y = x^3 - 3x */
i = i.wrapping_sub(1)
} /* y = x^3 - 3x + b */
vli_set(a, l_result.as_mut_ptr());
}
#[inline(always)]
unsafe fn ecc_point_decompress(mut p_point: *mut EccPoint, mut p_compressed: *const uint8_t) {
let mut _3: [uint64_t; 6] = [3 as libc::c_int as uint64_t, 0, 0, 0, 0, 0];
ecc_bytes2native((*p_point).x.as_mut_ptr(), p_compressed.offset(1 as libc::c_int as isize));
vli_modSquare_fast((*p_point).y.as_mut_ptr(), (*p_point).x.as_mut_ptr());
vli_modSub((*p_point).y.as_mut_ptr(), (*p_point).y.as_mut_ptr(), _3.as_mut_ptr(), curve_p.as_mut_ptr());
vli_modMult_fast((*p_point).y.as_mut_ptr(), (*p_point).y.as_mut_ptr(), (*p_point).x.as_mut_ptr());
vli_modAdd((*p_point).y.as_mut_ptr(), (*p_point).y.as_mut_ptr(), curve_b.as_mut_ptr(), curve_p.as_mut_ptr());
mod_sqrt((*p_point).y.as_mut_ptr());
if (*p_point).y[0 as libc::c_int as usize] & 0x1 as libc::c_int as libc::c_ulong != (*p_compressed.offset(0 as libc::c_int as isize) as libc::c_int & 0x1 as libc::c_int) as libc::c_ulong {
vli_sub((*p_point).y.as_mut_ptr(), curve_p.as_mut_ptr(), (*p_point).y.as_mut_ptr());
};
}
pub unsafe fn ecc_make_key(mut p_publicKey: *mut uint8_t, mut p_privateKey: *mut uint8_t) -> libc::c_int {
let mut l_private: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_public: EccPoint = std::mem::MaybeUninit::uninit().assume_init();
let mut l_tries: libc::c_uint = 0 as libc::c_int as libc::c_uint;
loop {
if getRandomNumber(l_private.as_mut_ptr()) == 0 || {
let fresh3 = l_tries;
l_tries = l_tries.wrapping_add(1);
(fresh3) >= 1024 as libc::c_int as libc::c_uint
} {
return 0 as libc::c_int;
}
if !(vli_isZero(l_private.as_mut_ptr()) != 0) {
/* Make sure the private key is in the range [1, n-1].
For the supported curves, n is always large enough that we only need to subtract once at most. */
if vli_cmp(curve_n.as_mut_ptr(), l_private.as_mut_ptr()) != 1 as libc::c_int {
vli_sub(l_private.as_mut_ptr(), l_private.as_mut_ptr(), curve_n.as_mut_ptr());
}
EccPoint_mult(&mut l_public, &mut curve_G, l_private.as_mut_ptr(), 0 as *mut uint64_t);
}
if !(EccPoint_isZero(&mut l_public) != 0) {
break;
}
}
ecc_native2bytes(p_privateKey, l_private.as_mut_ptr() as *const uint64_t);
ecc_native2bytes(p_publicKey.offset(1 as libc::c_int as isize), l_public.x.as_mut_ptr() as *const uint64_t);
*p_publicKey.offset(0 as libc::c_int as isize) = (2 as libc::c_int as libc::c_ulong).wrapping_add(l_public.y[0 as libc::c_int as usize] & 0x1 as libc::c_int as libc::c_ulong) as uint8_t;
return 1 as libc::c_int;
}
pub unsafe fn ecdh_shared_secret(mut p_publicKey: *const uint8_t, mut p_privateKey: *const uint8_t, mut p_secret: *mut uint8_t) -> libc::c_int {
let mut l_public: EccPoint = std::mem::MaybeUninit::uninit().assume_init();
let mut l_private: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_random: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
if getRandomNumber(l_random.as_mut_ptr()) == 0 {
return 0 as libc::c_int;
}
ecc_point_decompress(&mut l_public, p_publicKey);
ecc_bytes2native(l_private.as_mut_ptr(), p_privateKey);
let mut l_product: EccPoint = EccPoint { x: [0; 6], y: [0; 6] };
EccPoint_mult(&mut l_product, &mut l_public, l_private.as_mut_ptr(), l_random.as_mut_ptr());
ecc_native2bytes(p_secret, l_product.x.as_mut_ptr() as *const uint64_t);
return (EccPoint_isZero(&mut l_product) == 0) as libc::c_int;
}
/* -------- ECDSA code -------- */
/* Computes p_result = (p_left * p_right) % p_mod. */
#[inline(always)]
unsafe fn vli_modMult(mut p_result: *mut uint64_t, mut p_left: *mut uint64_t, mut p_right: *mut uint64_t, mut p_mod: *mut uint64_t) {
let mut l_product: [uint64_t; 12] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_modMultiple: [uint64_t; 12] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_digitShift: uint = 0;
let mut l_bitShift: uint = 0;
let mut l_productBits: uint = 0;
let mut l_modBits: uint = vli_numBits(p_mod);
vli_mult(l_product.as_mut_ptr(), p_left, p_right);
l_productBits = vli_numBits(l_product.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize));
if l_productBits != 0 {
l_productBits = (l_productBits as libc::c_uint).wrapping_add((48 as libc::c_int / 8 as libc::c_int * 64 as libc::c_int) as libc::c_uint) as uint as uint
} else {
l_productBits = vli_numBits(l_product.as_mut_ptr())
}
if l_productBits < l_modBits {
/* l_product < p_mod. */
vli_set(p_result, l_product.as_mut_ptr());
return;
}
/* Shift p_mod by (l_leftBits - l_modBits). This multiplies p_mod by the largest
power of two possible while still resulting in a number less than p_left. */
vli_clear(l_modMultiple.as_mut_ptr());
vli_clear(l_modMultiple.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize));
l_digitShift = l_productBits.wrapping_sub(l_modBits).wrapping_div(64 as libc::c_int as libc::c_uint);
l_bitShift = l_productBits.wrapping_sub(l_modBits).wrapping_rem(64 as libc::c_int as libc::c_uint);
if l_bitShift != 0 {
l_modMultiple[l_digitShift.wrapping_add((48 as libc::c_int / 8 as libc::c_int) as libc::c_uint) as usize] = vli_lshift(l_modMultiple.as_mut_ptr().offset(l_digitShift as isize), p_mod, l_bitShift)
} else {
vli_set(l_modMultiple.as_mut_ptr().offset(l_digitShift as isize), p_mod);
}
/* Subtract all multiples of p_mod to get the remainder. */
vli_clear(p_result); /* Use p_result as a temp var to store 1 (for subtraction) */
*p_result.offset(0 as libc::c_int as isize) = 1 as libc::c_int as uint64_t;
while l_productBits > (48 as libc::c_int / 8 as libc::c_int * 64 as libc::c_int) as libc::c_uint || vli_cmp(l_modMultiple.as_mut_ptr(), p_mod) >= 0 as libc::c_int {
let mut l_cmp: libc::c_int = vli_cmp(l_modMultiple.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize), l_product.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize));
if l_cmp < 0 as libc::c_int || l_cmp == 0 as libc::c_int && vli_cmp(l_modMultiple.as_mut_ptr(), l_product.as_mut_ptr()) <= 0 as libc::c_int {
if vli_sub(l_product.as_mut_ptr(), l_product.as_mut_ptr(), l_modMultiple.as_mut_ptr()) != 0 {
/* borrow */
vli_sub(l_product.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize), l_product.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize), p_result);
}
vli_sub(
l_product.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize),
l_product.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize),
l_modMultiple.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize),
);
}
let mut l_carry: uint64_t = (l_modMultiple[(48 as libc::c_int / 8 as libc::c_int) as usize] & 0x1 as libc::c_int as libc::c_ulong) << 63 as libc::c_int;
vli_rshift1(l_modMultiple.as_mut_ptr().offset((48 as libc::c_int / 8 as libc::c_int) as isize));
vli_rshift1(l_modMultiple.as_mut_ptr());
l_modMultiple[(48 as libc::c_int / 8 as libc::c_int - 1 as libc::c_int) as usize] |= l_carry;
l_productBits = l_productBits.wrapping_sub(1)
}
vli_set(p_result, l_product.as_mut_ptr());
}
#[inline(always)]
unsafe fn umax(mut a: uint, mut b: uint) -> uint {
a.max(b)
}
pub unsafe fn ecdsa_sign(mut p_privateKey: *const uint8_t, mut p_hash: *const uint8_t, mut p_signature: *mut uint8_t) -> libc::c_int {
let mut k: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_tmp: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_s: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut p: EccPoint = std::mem::MaybeUninit::uninit().assume_init();
let mut l_tries: libc::c_uint = 0 as libc::c_int as libc::c_uint;
loop {
if getRandomNumber(k.as_mut_ptr()) == 0 || {
let fresh4 = l_tries;
l_tries = l_tries.wrapping_add(1);
(fresh4) >= 1024 as libc::c_int as libc::c_uint
} {
return 0 as libc::c_int;
}
if !(vli_isZero(k.as_mut_ptr()) != 0) {
if vli_cmp(curve_n.as_mut_ptr(), k.as_mut_ptr()) != 1 as libc::c_int {
vli_sub(k.as_mut_ptr(), k.as_mut_ptr(), curve_n.as_mut_ptr());
}
/* tmp = k * G */
EccPoint_mult(&mut p, &mut curve_G, k.as_mut_ptr(), 0 as *mut uint64_t);
/* r = x1 (mod n) */
if vli_cmp(curve_n.as_mut_ptr(), p.x.as_mut_ptr()) != 1 as libc::c_int {
vli_sub(p.x.as_mut_ptr(), p.x.as_mut_ptr(), curve_n.as_mut_ptr());
/* s = r*d */
}
} /* s = e + r*d */
if !(vli_isZero(p.x.as_mut_ptr()) != 0) {
break; /* k = 1 / k */
}
} /* s = (e + r*d) / k */
ecc_native2bytes(p_signature, p.x.as_mut_ptr() as *const uint64_t);
ecc_bytes2native(l_tmp.as_mut_ptr(), p_privateKey);
vli_modMult(l_s.as_mut_ptr(), p.x.as_mut_ptr(), l_tmp.as_mut_ptr(), curve_n.as_mut_ptr());
ecc_bytes2native(l_tmp.as_mut_ptr(), p_hash);
vli_modAdd(l_s.as_mut_ptr(), l_tmp.as_mut_ptr(), l_s.as_mut_ptr(), curve_n.as_mut_ptr());
vli_modInv(k.as_mut_ptr(), k.as_mut_ptr(), curve_n.as_mut_ptr());
vli_modMult(l_s.as_mut_ptr(), l_s.as_mut_ptr(), k.as_mut_ptr(), curve_n.as_mut_ptr());
ecc_native2bytes(p_signature.offset(48 as libc::c_int as isize), l_s.as_mut_ptr() as *const uint64_t);
return 1 as libc::c_int;
}
pub unsafe fn ecdsa_verify(mut p_publicKey: *const uint8_t, mut p_hash: *const uint8_t, mut p_signature: *const uint8_t) -> libc::c_int {
let mut u1: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut u2: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut z: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_public: EccPoint = std::mem::MaybeUninit::uninit().assume_init();
let mut l_sum: EccPoint = std::mem::MaybeUninit::uninit().assume_init();
let mut rx: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut ry: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut tx: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut ty: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut tz: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_r: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
let mut l_s: [uint64_t; 6] = std::mem::MaybeUninit::uninit().assume_init();
ecc_point_decompress(&mut l_public, p_publicKey);
ecc_bytes2native(l_r.as_mut_ptr(), p_signature);
ecc_bytes2native(l_s.as_mut_ptr(), p_signature.offset(48 as libc::c_int as isize));
if vli_isZero(l_r.as_mut_ptr()) != 0 || vli_isZero(l_s.as_mut_ptr()) != 0 {
/* r, s must not be 0. */
return 0 as libc::c_int;
}
if vli_cmp(curve_n.as_mut_ptr(), l_r.as_mut_ptr()) != 1 as libc::c_int || vli_cmp(curve_n.as_mut_ptr(), l_s.as_mut_ptr()) != 1 as libc::c_int {
/* r, s must be < n. */
return 0 as libc::c_int;
}
/* Calculate u1 and u2. */
vli_modInv(z.as_mut_ptr(), l_s.as_mut_ptr(), curve_n.as_mut_ptr()); /* Z = s^-1 */
ecc_bytes2native(u1.as_mut_ptr(), p_hash); /* u1 = e/s */
vli_modMult(u1.as_mut_ptr(), u1.as_mut_ptr(), z.as_mut_ptr(), curve_n.as_mut_ptr()); /* u2 = r/s */
vli_modMult(u2.as_mut_ptr(), l_r.as_mut_ptr(), z.as_mut_ptr(), curve_n.as_mut_ptr());
/* Calculate l_sum = G + Q. */
vli_set(l_sum.x.as_mut_ptr(), l_public.x.as_mut_ptr()); /* Z = x2 - x1 */
vli_set(l_sum.y.as_mut_ptr(), l_public.y.as_mut_ptr()); /* Z = 1/Z */
vli_set(tx.as_mut_ptr(), curve_G.x.as_mut_ptr());
vli_set(ty.as_mut_ptr(), curve_G.y.as_mut_ptr());
vli_modSub(z.as_mut_ptr(), l_sum.x.as_mut_ptr(), tx.as_mut_ptr(), curve_p.as_mut_ptr());
XYcZ_add(tx.as_mut_ptr(), ty.as_mut_ptr(), l_sum.x.as_mut_ptr(), l_sum.y.as_mut_ptr());
vli_modInv(z.as_mut_ptr(), z.as_mut_ptr(), curve_p.as_mut_ptr());
apply_z(l_sum.x.as_mut_ptr(), l_sum.y.as_mut_ptr(), z.as_mut_ptr());
/* Use Shamir's trick to calculate u1*G + u2*Q */
let mut l_points: [*mut EccPoint; 4] = [0 as *mut EccPoint, &mut curve_G, &mut l_public, &mut l_sum]; /* Z = x2 - x1 */
let mut l_numBits: uint = umax(vli_numBits(u1.as_mut_ptr()), vli_numBits(u2.as_mut_ptr())); /* Z = 1/Z */
let mut l_point: *mut EccPoint = l_points[((vli_testBit(u1.as_mut_ptr(), l_numBits.wrapping_sub(1 as libc::c_int as libc::c_uint)) != 0) as libc::c_int | ((vli_testBit(u2.as_mut_ptr(), l_numBits.wrapping_sub(1 as libc::c_int as libc::c_uint)) != 0) as libc::c_int) << 1 as libc::c_int) as usize];
vli_set(rx.as_mut_ptr(), (*l_point).x.as_mut_ptr());
vli_set(ry.as_mut_ptr(), (*l_point).y.as_mut_ptr());
vli_clear(z.as_mut_ptr());
z[0 as libc::c_int as usize] = 1 as libc::c_int as uint64_t;
let mut i: libc::c_int = 0;
i = l_numBits.wrapping_sub(2 as libc::c_int as libc::c_uint) as libc::c_int;
while i >= 0 as libc::c_int {
EccPoint_double_jacobian(rx.as_mut_ptr(), ry.as_mut_ptr(), z.as_mut_ptr());
let mut l_index: libc::c_int = (vli_testBit(u1.as_mut_ptr(), i as uint) != 0) as libc::c_int | ((vli_testBit(u2.as_mut_ptr(), i as uint) != 0) as libc::c_int) << 1 as libc::c_int;
let mut l_point_0: *mut EccPoint = l_points[l_index as usize];
if !l_point_0.is_null() {
vli_set(tx.as_mut_ptr(), (*l_point_0).x.as_mut_ptr());
vli_set(ty.as_mut_ptr(), (*l_point_0).y.as_mut_ptr());
apply_z(tx.as_mut_ptr(), ty.as_mut_ptr(), z.as_mut_ptr());
vli_modSub(tz.as_mut_ptr(), rx.as_mut_ptr(), tx.as_mut_ptr(), curve_p.as_mut_ptr());
XYcZ_add(tx.as_mut_ptr(), ty.as_mut_ptr(), rx.as_mut_ptr(), ry.as_mut_ptr());
vli_modMult_fast(z.as_mut_ptr(), z.as_mut_ptr(), tz.as_mut_ptr());
}
i -= 1
}
vli_modInv(z.as_mut_ptr(), z.as_mut_ptr(), curve_p.as_mut_ptr());
apply_z(rx.as_mut_ptr(), ry.as_mut_ptr(), z.as_mut_ptr());
/* v = x1 (mod n) */
if vli_cmp(curve_n.as_mut_ptr(), rx.as_mut_ptr()) != 1 as libc::c_int {
vli_sub(rx.as_mut_ptr(), rx.as_mut_ptr(), curve_n.as_mut_ptr());
}
/* Accept only if v == r. */
return (vli_cmp(rx.as_mut_ptr(), l_r.as_mut_ptr()) == 0 as libc::c_int) as libc::c_int;
}

View file

@ -1,6 +1,8 @@
// (c) 2020-2022 ZeroTier, Inc. -- currently propritery pending actual release and licensing. See LICENSE.md.
use std::io::Write;
use std::num::NonZeroU64;
use std::ops::Deref;
use std::sync::atomic::{AtomicU64, Ordering};
use crate::aes::{Aes, AesGcm};
@ -8,56 +10,11 @@ use crate::hash::{hmac_sha384, hmac_sha512, SHA384, SHA512};
use crate::p384::{P384KeyPair, P384PublicKey, P384_PUBLIC_KEY_SIZE};
use crate::random;
use crate::secret::Secret;
use crate::varint;
use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard};
/*
ZeroTier V2 Noise(-like?) Session Protocol
This protocol implements the Noise_IK key exchange pattern using NIST P-384 ECDH, AES-GCM,
and SHA512. So yes, Virginia, it's a FIPS-compliant Noise implementation. NIST P-384 is
not listed in official Noise documentation though, so consider it "Noise-like" if you
prefer.
See also: http://noiseprotocol.org/noise.html
Secondary hybrid exchange using Kyber512, the recently approved post-quantum KEX algorithm,
is also supported but is optional. When it is enabled the additional shared secret is
mixed into the final Noise_IK secret with HMAC/HKDF. This provides an exchange at least as
strong as the stronger of the two algorithms (ECDH and Kyber) since hashing anything with
a secret yields a secret.
Kyber theoretically provides data forward secrecy into the post-quantum era if and when it
arrives. It might also reassure those paranoid about NIST elliptic curves a little, though
we tend to accept the arguments of Koblitz and Menezes against the curves being backdoored.
These arguments are explained at the end of this post:
https://blog.cryptographyengineering.com/2015/10/22/a-riddle-wrapped-in-curve/
Kyber is used as long as both sides set the "jedi" parameter to true. It should be used
by default but can be disabled on tiny and slow devices or systems that talk to vast
numbers of endpoints and don't want the extra overhead.
Lastly, this protocol includes obfusation using a hash of the recipient's public identity
as a key. AES is used to encrypt the first block of each packet (or the first few blocks
for key exchange packets), making packets appear as pure noise to anyone who does not know
the identity of the recipient.
Obfuscation renders ZeroTier traffic uncategorizable to those who do not know the identity
of a packet's recipient, helping to defend against bulk de-anonymization. It also makes it
easy for recipient nodes to silently discard packets from senders that do not know them,
maintaining invisibility from naive network scanners.
Obfuscation doesn't play any meaningful role in data privacy or authentication. It can be
ignored when analyzing the "real" security of the protocol.
*/
/// Minimum packet size / minimum size for work buffers.
pub const MIN_BUFFER_SIZE: usize = 1400;
/// Minimum possible packet size.
/// Minimum possible packet size. Packets smaller than this are rejected.
pub const MIN_PACKET_SIZE: usize = HEADER_SIZE + 1 + AES_GCM_TAG_SIZE;
/// Start attempting to rekey after a key has been used to send packets this many times.
@ -75,19 +32,22 @@ const REKEY_AFTER_TIME_MS: i64 = 1000 * 60 * 60; // 1 hour
/// Maximum random jitter to add to rekey-after time.
const REKEY_AFTER_TIME_MS_MAX_JITTER: u32 = 1000 * 60 * 5;
/// Don't send or process inbound offers more often than this.
const OFFER_RATE_LIMIT_MS: i64 = 1000;
/// Rate limit for sending new offers to attempt to re-key.
const OFFER_RATE_LIMIT_MS: i64 = 2000;
/// Version 1: NIST P-384 forward secrecy and authentication with optional Kyber1024 forward secrecy (but not authentication)
const SESSION_PROTOCOL_VERSION: u8 = 1;
const PACKET_TYPE_DATA: u8 = 0;
const PACKET_TYPE_NOP: u8 = 1;
const PACKET_TYPE_KEY_OFFER: u8 = 2; // "alice"
const PACKET_TYPE_KEY_COUNTER_OFFER: u8 = 3; // "bob"
/// Secondary (hybrid) ephemeral key disabled.
const E1_TYPE_NONE: u8 = 0;
const GET_PACKET_TYPE_BIT_MASK: u8 = 0x1f;
const GET_PROTOCOL_VERSION_SHIFT_RIGHT: u32 = 5;
/// Secondary (hybrid) ephemeral key is Kyber512
const E1_TYPE_KYBER512: u8 = 1;
const E1_TYPE_NONE: u8 = 0;
const E1_TYPE_KYBER1024: u8 = 1;
const HEADER_SIZE: usize = 11;
const AES_GCM_TAG_SIZE: usize = 16;
@ -99,9 +59,9 @@ const SESSION_ID_SIZE: usize = 6;
/// It doesn't matter very much what this is, but it's good for it to be unique.
const KEY_DERIVATION_CHAIN_STARTING_SALT: [u8; 64] = [
// macOS command line to generate:
// echo -n 'Noise_IKpsk2_NISTP384+hybrid_AESGCM_SHA512' | shasum -a 512 | cut -d ' ' -f 1 | xxd -r -p | xxd -i
0xc7, 0x66, 0xf3, 0x71, 0xc8, 0xbc, 0xc3, 0x19, 0xc6, 0xf0, 0x2a, 0x6e, 0x5c, 0x4b, 0x3c, 0xc0, 0x83, 0x29, 0x09, 0x09, 0x14, 0x4a, 0xf0, 0xde, 0xea, 0x3d, 0xbd, 0x00, 0x4c, 0x9e, 0x01, 0xa0, 0x6e, 0xb6, 0x9b, 0x56, 0x47, 0x97, 0x86, 0x1d, 0x4e, 0x94, 0xc5, 0xdd, 0xde, 0x4a, 0x1c, 0xc3, 0x4e,
0xcc, 0x8b, 0x09, 0x3b, 0xb3, 0xc3, 0xb0, 0x03, 0xd7, 0xdf, 0x22, 0x49, 0x3f, 0xa5, 0x01,
// echo -n 'ZSSP_Noise_IKpsk2_NISTP384_?KYBER1024_AESGCM_SHA512' | shasum -a 512 | cut -d ' ' -f 1 | xxd -r -p | xxd -i
0x35, 0x6a, 0x75, 0xc0, 0xbf, 0xbe, 0xc3, 0x59, 0x70, 0x94, 0x50, 0x69, 0x4c, 0xa2, 0x08, 0x40, 0xc7, 0xdf, 0x67, 0xa8, 0x68, 0x52, 0x6e, 0xd5, 0xdd, 0x77, 0xec, 0x59, 0x6f, 0x8e, 0xa1, 0x99,
0xb4, 0x32, 0x85, 0xaf, 0x7f, 0x0d, 0xa9, 0x6c, 0x01, 0xfb, 0x72, 0x46, 0xc0, 0x09, 0x58, 0xb8, 0xe0, 0xa8, 0xcf, 0xb1, 0x58, 0x04, 0x6e, 0x32, 0xba, 0xa8, 0xb8, 0xf9, 0x0a, 0xa4, 0xbf, 0x36,
];
const KBKDF_KEY_USAGE_LABEL_HMAC: u8 = b'M';
@ -132,12 +92,26 @@ pub enum Error {
/// Packet ignored by rate limiter.
RateLimited,
/// Other end sent a protocol version we don't support.
UnknownProtocolVersion(u8),
/// An internal error occurred.
OtherError(Box<dyn std::error::Error>),
}
impl From<std::io::Error> for Error {
#[cold]
#[inline(never)]
fn from(e: std::io::Error) -> Self {
Self::OtherError(Box::new(e))
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::UnknownLocalSessionId(_) => f.write_str("UnknownLocalSessionId"),
Self::UnknownLocalSessionId(id) => f.write_str(format!("UnknownLocalSessionId({})", id.0.get()).as_str()),
Self::InvalidPacket => f.write_str("InvalidPacket"),
Self::InvalidParameter => f.write_str("InvalidParameter"),
Self::FailedAuthentication => f.write_str("FailedAuthentication"),
@ -145,6 +119,8 @@ impl std::fmt::Display for Error {
Self::MaxKeyLifetimeExceeded => f.write_str("MaxKeyLifetimeExceeded"),
Self::SessionNotEstablished => f.write_str("SessionNotEstablished"),
Self::RateLimited => f.write_str("RateLimited"),
Self::UnknownProtocolVersion(v) => f.write_str(format!("UnknownProtocolVersion({})", v).as_str()),
Self::OtherError(e) => f.write_str(format!("OtherError({})", e.to_string()).as_str()),
}
}
}
@ -152,7 +128,6 @@ impl std::fmt::Display for Error {
impl std::error::Error for Error {}
impl std::fmt::Debug for Error {
#[inline(always)]
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
@ -168,7 +143,7 @@ impl Obfuscator {
}
}
pub enum ReceiveResult<'a, O> {
pub enum ReceiveResult<'a, H: SessionHost> {
/// Packet is valid and contained a data payload.
OkData(&'a [u8], u32),
@ -176,7 +151,7 @@ pub enum ReceiveResult<'a, O> {
OkSendReply(&'a [u8]),
/// Packet is valid and a new session was created, also includes a reply to be sent back.
OkNewSession(Session<O>, &'a [u8]),
OkNewSession(Session<H>, &'a [u8]),
/// Packet is valid, no action needs to be taken.
Ok,
@ -212,8 +187,8 @@ impl SessionId {
}
#[inline(always)]
pub fn copy_to(&self, b: &mut [u8]) {
b[..6].copy_from_slice(&self.0.get().to_le_bytes()[..6])
pub fn to_bytes(&self) -> [u8; SESSION_ID_SIZE] {
self.0.get().to_le_bytes()[0..6].try_into().unwrap()
}
}
@ -237,10 +212,37 @@ impl From<SessionId> for u64 {
}
}
/// ZeroTier Noise encrypted channel session.
pub struct Session<O> {
/// Local session ID
pub trait SessionHost: Sized {
type Buffer: AsRef<[u8]> + AsMut<[u8]> + Write;
type AssociatedObject: Sized;
type SessionRef: Deref<Target = Session<Self>>;
/// Get a reference to this host's static public key blob.
fn get_local_s_public(&self) -> &[u8];
/// Get a reference to this hosts' static public key's NIST P-384 secret key pair
fn get_local_s_keypair_p384(&self) -> &P384KeyPair;
/// Get an empty writable buffer, or None if none are available and the operation in progress should fail.
fn get_buffer(&self) -> Option<Self::Buffer>;
/// Extract a NIST P-384 ECC public key from a static public key blob.
fn extract_p384_static(static_public: &[u8]) -> Option<P384PublicKey>;
/// Look up a local session by local ID.
fn session_lookup(&self, local_session_id: SessionId) -> Option<Self::SessionRef>;
/// Check whether a new session should be accepted.
///
/// On success a tuple of local session ID, static secret, and associated object is returned. The
/// static secret is whatever results from agreement between the local and remote static public
/// keys.
fn accept_new_session(&self, remote_static_public: &[u8], remote_metadata: &[u8]) -> Option<(SessionId, Secret<64>, Self::AssociatedObject)>;
}
pub struct Session<H: SessionHost> {
pub id: SessionId,
pub associated_object: H::AssociatedObject,
send_counter: Counter,
remote_s_public_hash: [u8; 48],
@ -249,9 +251,6 @@ pub struct Session<O> {
outgoing_obfuscator: Obfuscator,
state: RwLock<MutableState>,
remote_s_public_p384: [u8; P384_PUBLIC_KEY_SIZE],
/// Arbitrary object associated with this session
pub associated_object: O,
}
struct MutableState {
@ -260,28 +259,40 @@ struct MutableState {
offer: Option<EphemeralOffer>,
}
impl<O> Session<O> {
/// Create a new session and return this plus an outgoing packet to send to the other end.
pub fn new<'a, const MAX_PACKET_SIZE: usize, const STATIC_PUBLIC_SIZE: usize>(
buffer: &'a mut [u8; MAX_PACKET_SIZE],
impl<H: SessionHost> Session<H> {
pub fn new<SendFunction: FnMut(&[u8])>(
host: &H,
send: SendFunction,
local_session_id: SessionId,
local_s_public: &[u8; STATIC_PUBLIC_SIZE],
local_s_keypair_p384: &P384KeyPair,
remote_s_public: &[u8; STATIC_PUBLIC_SIZE],
remote_s_public_p384: &P384PublicKey,
remote_s_public: &[u8],
offer_metadata: &[u8],
psk: &Secret<64>,
associated_object: O,
associated_object: H::AssociatedObject,
mtu: usize,
current_time: i64,
jedi: bool,
) -> Result<(Self, &'a [u8]), Error> {
debug_assert!(MAX_PACKET_SIZE >= MIN_BUFFER_SIZE);
let counter = Counter::new();
if let Some(ss) = local_s_keypair_p384.agree(remote_s_public_p384) {
let outgoing_obfuscator = Obfuscator::new(remote_s_public);
if let Some((offer, psize)) = EphemeralOffer::create_alice_offer(buffer, counter.next(), local_session_id, None, local_s_public, remote_s_public_p384, &ss, &outgoing_obfuscator, current_time, jedi) {
return Ok((
Session::<O> {
) -> Result<Self, Error> {
if let Some(remote_s_public_p384) = H::extract_p384_static(remote_s_public) {
if let Some(ss) = host.get_local_s_keypair_p384().agree(&remote_s_public_p384) {
let outgoing_obfuscator = Obfuscator::new(remote_s_public);
let counter = Counter::new();
if let Ok(offer) = EphemeralOffer::create_alice_offer(
send,
counter.next(),
local_session_id,
None,
host.get_local_s_public(),
offer_metadata,
&remote_s_public_p384,
&ss,
&outgoing_obfuscator,
mtu,
current_time,
jedi,
) {
return Ok(Self {
id: local_session_id,
associated_object,
send_counter: counter,
remote_s_public_hash: SHA384::hash(remote_s_public),
psk: psk.clone(),
@ -293,41 +304,70 @@ impl<O> Session<O> {
offer: Some(offer),
}),
remote_s_public_p384: remote_s_public_p384.as_bytes().clone(),
associated_object,
},
&buffer[..psize],
));
});
}
}
}
return Err(Error::InvalidParameter);
}
/// Check whether this session should initiate a re-key, returning a packet to send if true.
///
/// This must be checked often enough to ensure that the hard key usage limit is not reached, which in the
/// usual UDP use case means once every ~3TiB of traffic.
pub fn rekey_check<'a, const MAX_PACKET_SIZE: usize, const STATIC_PUBLIC_SIZE: usize>(&self, buffer: &'a mut [u8; MAX_PACKET_SIZE], local_s_public: &[u8; STATIC_PUBLIC_SIZE], current_time: i64, force: bool, jedi: bool) -> Option<&'a [u8]> {
pub fn rekey_check<SendFunction: FnMut(&[u8])>(&self, host: &H, send: SendFunction, offer_metadata: &[u8], mtu: usize, current_time: i64, force: bool, jedi: bool) {
let state = self.state.upgradable_read();
if let Some(key) = state.keys[0].as_ref() {
if force || (key.lifetime.should_rekey(self.send_counter.current(), current_time) && state.offer.as_ref().map_or(true, |o| (current_time - o.creation_time) > OFFER_RATE_LIMIT_MS)) {
if let Some(remote_s_public_p384) = P384PublicKey::from_bytes(&self.remote_s_public_p384) {
if let Some((offer, psize)) = EphemeralOffer::create_alice_offer(buffer, self.send_counter.next(), self.id, state.remote_session_id, local_s_public, &remote_s_public_p384, &self.ss, &self.outgoing_obfuscator, current_time, jedi) {
let mut state = RwLockUpgradableReadGuard::upgrade(state);
let _ = state.offer.replace(offer);
return Some(&buffer[..psize]);
if let Ok(offer) = EphemeralOffer::create_alice_offer(
send,
self.send_counter.next(),
self.id,
state.remote_session_id,
host.get_local_s_public(),
offer_metadata,
&remote_s_public_p384,
&self.ss,
&self.outgoing_obfuscator,
mtu,
current_time,
jedi,
) {
let _ = RwLockUpgradableReadGuard::upgrade(state).offer.replace(offer);
}
}
}
}
return None;
}
/// Send a data packet to the other side, returning packet to send.
pub fn send<'a, const MAX_PACKET_SIZE: usize>(&self, buffer: &'a mut [u8; MAX_PACKET_SIZE], data: &[u8]) -> Result<&'a [u8], Error> {
/*
debug_assert!(packet_type == PACKET_TYPE_DATA || packet_type == PACKET_TYPE_NOP);
buffer[0] = packet_type;
buffer[1..7].copy_from_slice(&remote_session_id.to_le_bytes()[..SESSION_ID_SIZE]);
buffer[7..11].copy_from_slice(&counter.to_bytes());
let payload_end = HEADER_SIZE + data.len();
let tag_end = payload_end + AES_GCM_TAG_SIZE;
if tag_end < MAX_PACKET_SIZE {
let mut c = key.get_send_cipher(counter)?;
buffer[11..16].fill(0);
c.init(&buffer[..16]);
c.crypt(data, &mut buffer[HEADER_SIZE..payload_end]);
buffer[payload_end..tag_end].copy_from_slice(&c.finish());
key.return_send_cipher(c);
outgoing_obfuscator.0.encrypt_block_in_place(&mut buffer[..16]);
Ok(tag_end)
} else {
unlikely_branch();
Err(Error::InvalidParameter)
}
*/
/*
pub fn send(&self, data: &[u8]) -> Result<H::Buffer, Error> {
let state = self.state.read();
if let Some(key) = state.keys[0].as_ref() {
if let Some(remote_session_id) = state.remote_session_id {
let data_len = assemble_and_armor_DATA(buffer, data, PACKET_TYPE_DATA, u64::from(remote_session_id), self.send_counter.next(), &key, &self.outgoing_obfuscator)?;
//let data_len = assemble_and_armor_DATA(buffer, data, PACKET_TYPE_DATA, u64::from(remote_session_id), self.send_counter.next(), &key, &self.outgoing_obfuscator)?;
Ok(&buffer[..data_len])
} else {
unlikely_branch();
@ -338,7 +378,9 @@ impl<O> Session<O> {
Err(Error::SessionNotEstablished)
}
}
*/
/*
/// Receive a packet from the network and take the appropriate action.
///
/// Check ReceiveResult to see if it includes data or a reply packet.
@ -361,16 +403,19 @@ impl<O> Session<O> {
current_time: i64,
jedi: bool,
) -> Result<ReceiveResult<'a, O>, Error> {
debug_assert!(MAX_PACKET_SIZE >= MIN_BUFFER_SIZE);
debug_assert!(MAX_PACKET_SIZE >= (64 + STATIC_PUBLIC_SIZE + P384_PUBLIC_KEY_SIZE + pqc_kyber::KYBER_PUBLICKEYBYTES));
if incoming_packet.len() > MAX_PACKET_SIZE || incoming_packet.len() <= MIN_PACKET_SIZE {
unlikely_branch();
return Err(Error::InvalidPacket);
}
incoming_obfuscator.0.decrypt_block(&incoming_packet[..16], &mut buffer[..16]);
let packet_type = buffer[0];
let local_session_id = SessionId::new_from_bytes(&buffer[1..7]);
let mut packet_type = buffer[0];
let continued = (packet_type & PACKET_FLAG_CONTINUED) != 0;
packet_type &= PACKET_TYPE_MASK;
let local_session_id = SessionId::new_from_bytes(&buffer[1..7]);
let session = local_session_id.and_then(|sid| session_lookup(sid));
debug_assert_eq!(PACKET_TYPE_DATA, 0);
@ -423,9 +468,13 @@ impl<O> Session<O> {
}
if incoming_packet.len() > (HEADER_SIZE + P384_PUBLIC_KEY_SIZE + AES_GCM_TAG_SIZE + HMAC_SIZE) {
incoming_obfuscator.0.decrypt_block(&incoming_packet[16..32], &mut buffer[16..32]);
incoming_obfuscator.0.decrypt_block(&incoming_packet[32..48], &mut buffer[32..48]);
incoming_obfuscator.0.decrypt_block(&incoming_packet[48..64], &mut buffer[48..64]);
for i in (16..64).step_by(16) {
let j = i + 16;
incoming_obfuscator.0.decrypt_block(&incoming_packet[i..j], &mut buffer[i..j]);
for k in i..j {
buffer[k] ^= incoming_packet[k - 16];
}
}
buffer[64..incoming_packet.len()].copy_from_slice(&incoming_packet[64..]);
} else {
return Err(Error::InvalidPacket);
@ -491,6 +540,7 @@ impl<O> Session<O> {
if let Some((local_session_id, psk, associated_object)) = new_session_auth(&alice_s_public) {
Some(Session::<O> {
id: local_session_id,
associated_object,
send_counter: Counter::new(),
remote_s_public_hash: SHA384::hash(&alice_s_public),
psk,
@ -502,7 +552,6 @@ impl<O> Session<O> {
offer: None,
}),
remote_s_public_p384: alice_s_public_p384.as_bytes().clone(),
associated_object,
})
} else {
return Err(Error::NewSessionRejected);
@ -557,9 +606,13 @@ impl<O> Session<O> {
// Bob now has final key state for this exchange. Yay! Now reply to Alice so she can construct it.
session.outgoing_obfuscator.0.encrypt_block_in_place(&mut buffer[0..16]);
session.outgoing_obfuscator.0.encrypt_block_in_place(&mut buffer[16..32]);
session.outgoing_obfuscator.0.encrypt_block_in_place(&mut buffer[32..48]);
session.outgoing_obfuscator.0.encrypt_block_in_place(&mut buffer[48..64]);
for i in (16..64).step_by(16) {
let j = i + 16;
for k in i..j {
buffer[k] ^= buffer[k - 16];
}
session.outgoing_obfuscator.0.encrypt_block_in_place(&mut buffer[i..j]);
}
return new_session.map_or_else(|| Ok(ReceiveResult::OkSendReply(&buffer[..reply_size])), |ns| Ok(ReceiveResult::OkNewSession(ns, &buffer[..reply_size])));
}
@ -642,6 +695,7 @@ impl<O> Session<O> {
}
}
}
*/
}
#[repr(transparent)]
@ -666,8 +720,8 @@ impl Counter {
/// A value of the outgoing packet counter.
///
/// The counter is internally 64-bit so we can more easily track usage limits without
/// confusing modular difference stuff. The counter as seen externally and placed in
/// packets is the least significant 32 bits.
/// confusing logic to handle 32-bit wrapping. The least significant 32 bits are the
/// actual counter put in the packet.
#[repr(transparent)]
#[derive(Copy, Clone)]
struct CounterValue(u64);
@ -677,6 +731,11 @@ impl CounterValue {
pub fn to_bytes(&self) -> [u8; 4] {
(self.0 as u32).to_le_bytes()
}
#[inline(always)]
pub fn lsb(&self) -> u8 {
self.0 as u8
}
}
struct KeyLifetime {
@ -714,60 +773,186 @@ struct EphemeralOffer {
}
impl EphemeralOffer {
fn create_alice_offer<const MAX_PACKET_SIZE: usize, const STATIC_PUBLIC_SIZE: usize>(
buffer: &mut [u8; MAX_PACKET_SIZE],
fn create_alice_offer<SendFunction: FnMut(&[u8])>(
mut send: SendFunction,
counter: CounterValue,
alice_session_id: SessionId,
bob_session_id: Option<SessionId>,
alice_s_public: &[u8; STATIC_PUBLIC_SIZE],
alice_s_public: &[u8],
alice_metadata: &[u8],
bob_s_public_p384: &P384PublicKey,
ss: &Secret<48>,
outgoing_obfuscator: &Obfuscator, // bobfuscator?
outgoing_obfuscator: &Obfuscator,
mtu: usize,
current_time: i64,
jedi: bool,
) -> Option<(EphemeralOffer, usize)> {
debug_assert!(MAX_PACKET_SIZE >= MIN_BUFFER_SIZE);
) -> Result<EphemeralOffer, Error> {
let alice_e0_keypair = P384KeyPair::generate();
let e0s = alice_e0_keypair.agree(bob_s_public_p384)?;
let e0s = alice_e0_keypair.agree(bob_s_public_p384);
if e0s.is_none() {
return Err(Error::InvalidPacket);
}
let alice_e1_keypair = if jedi {
Some(pqc_kyber::keypair(&mut random::SecureRandom::get()))
} else {
None
};
let key = Secret(hmac_sha512(&hmac_sha512(&KEY_DERIVATION_CHAIN_STARTING_SALT, alice_e0_keypair.public_key_bytes()), e0s.as_bytes()));
let bob_session_id_bytes = bob_session_id.map_or(0_u64, |i| i.into()).to_le_bytes();
let mut packet_size = assemble_KEY_OFFER(buffer, counter, bob_session_id, alice_e0_keypair.public_key(), alice_session_id, alice_s_public, alice_e1_keypair.as_ref().map(|s| &s.public));
const PACKET_BUF_SIZE: usize = 3072;
let mut packet_buf = [0_u8; PACKET_BUF_SIZE];
debug_assert!(packet_size <= MAX_PACKET_SIZE);
let mut c = AesGcm::new(kbkdf512(key.as_bytes(), KBKDF_KEY_USAGE_LABEL_AES_GCM_ALICE_TO_BOB).first_n::<32>(), true);
c.init(&get_aes_gcm_nonce(buffer));
c.crypt_in_place(&mut buffer[(HEADER_SIZE + P384_PUBLIC_KEY_SIZE)..packet_size]);
let c = c.finish();
buffer[packet_size..packet_size + AES_GCM_TAG_SIZE].copy_from_slice(&c);
packet_size += AES_GCM_TAG_SIZE;
let mut packet_len = {
let mut p = &mut packet_buf[..];
p.write_all(&[PACKET_TYPE_KEY_OFFER])?;
p.write_all(&bob_session_id_bytes[..SESSION_ID_SIZE])?;
p.write_all(&counter.to_bytes())?;
p.write_all(alice_e0_keypair.public_key_bytes())?;
p.write_all(&alice_session_id.0.get().to_le_bytes()[..SESSION_ID_SIZE])?;
varint::write(&mut p, alice_s_public.len() as u64)?;
p.write_all(alice_s_public)?;
varint::write(&mut p, alice_metadata.len() as u64)?;
p.write_all(alice_metadata)?;
if let Some(e1kp) = alice_e1_keypair {
p.write_all(&[E1_TYPE_KYBER1024])?;
p.write_all(&e1kp.public)?;
} else {
p.write_all(&[E1_TYPE_NONE])?;
}
PACKET_BUF_SIZE - p.len()
};
if packet_len > mtu {
packet_buf[0] |= PACKET_FLAG_CONTINUED;
}
let key = Secret(hmac_sha512(
&hmac_sha512(&KEY_DERIVATION_CHAIN_STARTING_SALT, alice_e0_keypair.public_key_bytes()),
e0s.unwrap().as_bytes(),
));
let gcm_tag = {
let mut c = AesGcm::new(kbkdf512(key.as_bytes(), KBKDF_KEY_USAGE_LABEL_AES_GCM_ALICE_TO_BOB).first_n::<32>(), true);
c.init(&get_aes_gcm_nonce(&packet_buf));
c.crypt_in_place(&mut packet_buf[(HEADER_SIZE + P384_PUBLIC_KEY_SIZE)..packet_len]);
c.finish()
};
packet_buf[packet_len..(packet_len + AES_GCM_TAG_SIZE)].copy_from_slice(&gcm_tag);
packet_len += AES_GCM_TAG_SIZE;
let key = Secret(hmac_sha512(key.as_bytes(), ss.as_bytes()));
let hmac = hmac_sha384(kbkdf512(key.as_bytes(), KBKDF_KEY_USAGE_LABEL_HMAC).first_n::<48>(), &buffer[..packet_size]);
buffer[packet_size..packet_size + HMAC_SIZE].copy_from_slice(&hmac);
packet_size += HMAC_SIZE;
let hmac = hmac_sha384(kbkdf512(key.as_bytes(), KBKDF_KEY_USAGE_LABEL_HMAC).first_n::<48>(), &packet_buf[..packet_len]);
packet_buf[packet_len..(packet_len + HMAC_SIZE)].copy_from_slice(&hmac);
packet_len += HMAC_SIZE;
outgoing_obfuscator.0.encrypt_block_in_place(&mut buffer[0..16]);
outgoing_obfuscator.0.encrypt_block_in_place(&mut buffer[16..32]);
outgoing_obfuscator.0.encrypt_block_in_place(&mut buffer[32..48]);
outgoing_obfuscator.0.encrypt_block_in_place(&mut buffer[48..64]);
cbc_obfuscate_first_64(outgoing_obfuscator, &mut packet_buf);
if packet_len > mtu {
send_fragmented(send, &mut packet_buf[..packet_len], counter, mtu, &bob_session_id_bytes[..SESSION_ID_SIZE], outgoing_obfuscator)?;
} else {
send(&packet_buf[..packet_len]);
}
Some((
EphemeralOffer {
creation_time: current_time,
key,
alice_e0_keypair,
alice_e1_keypair,
},
packet_size,
))
Ok(EphemeralOffer {
creation_time: current_time,
key,
alice_e0_keypair,
alice_e1_keypair,
})
}
}
/// Send a packet that must be fragmented.
///
/// The packet MUST have its CONTINUED flag set in its header. This isn't used
/// for unfragmented packets. Those are just sent directly.
///
/// The packet should be obfuscated as normal. This handles obfuscation of
/// fragments after the head. The contents of 'packet' are partly overwritten.
fn send_fragmented<SendFunction: FnMut(&[u8])>(
mut send: SendFunction,
packet: &mut [u8],
counter: CounterValue,
mtu: usize,
remote_session_id_bytes: &[u8],
outgoing_obfuscator: &Obfuscator,
) -> std::io::Result<()> {
let packet_len = packet.len();
debug_assert!(packet_len >= MIN_PACKET_SIZE);
debug_assert!(mtu > MIN_PACKET_SIZE);
let frag_len_max = ((packet_len as f64) / ((packet_len as f64) / ((mtu - HEADER_SIZE) as f64)).ceil()).ceil() as usize;
debug_assert!(frag_len_max > 0);
let mut frag_len = packet_len.min(frag_len_max);
debug_assert!(frag_len > 0);
send(&packet[..frag_len]);
let frag0_tail = [packet[frag_len - 2], packet[frag_len - 1]];
let mut next_frag_start = frag_len;
let mut frag_no = 1_u8;
while next_frag_start < packet_len {
debug_assert!(next_frag_start > HEADER_SIZE);
frag_len = (packet_len - next_frag_start).min(frag_len_max);
debug_assert!(frag_len > MIN_PACKET_SIZE);
let frag_end = next_frag_start + frag_len;
debug_assert!(frag_end <= packet_len);
next_frag_start -= HEADER_SIZE;
let mut frag_hdr = &mut packet[next_frag_start..];
frag_hdr.write_all(&[if frag_end == packet_len {
PACKET_TYPE_CONTINUATION
} else {
PACKET_TYPE_CONTINUATION | PACKET_FLAG_CONTINUED
}])?;
frag_hdr.write_all(&remote_session_id_bytes)?;
frag_hdr.write_all(&[counter.lsb(), frag_no])?;
frag_no += 1;
frag_hdr.write_all(&frag0_tail)?;
outgoing_obfuscator.0.encrypt_block_in_place(&mut packet[next_frag_start..(next_frag_start + 16)]);
send(&packet[next_frag_start..frag_end]);
next_frag_start = frag_end;
}
Ok(())
}
/// Obfuscate the first 64 bytes of a packet (used for key exchanges).
fn cbc_obfuscate_first_64(ob: &Obfuscator, data: &mut [u8]) {
ob.0.encrypt_block_in_place(&mut data[0..16]);
let mut i = 16;
while i < 64 {
let j = i + 16;
for k in i..j {
data[k] ^= data[k - 16];
}
ob.0.encrypt_block_in_place(&mut data[i..j]);
i = j;
}
}
/// Deobfuscate the last 48 bytes of a packet (used for key exchanges).
///
/// This is used when decoding key exchange packets. The first 16 bytes are always
/// deobfuscated, so this assumes that's already been done and finishes.
fn cbc_debofuscate_16_to_64(ob: &Obfuscator, input: &[u8], output: &mut [u8]) {
let mut i = 16;
while i < 64 {
let j = i + 16;
ob.0.decrypt_block(&input[i..j], &mut output[i..j]);
for k in i..j {
output[k] ^= input[k - 16];
}
i = j;
}
}
@ -812,7 +997,6 @@ impl SessionKey {
if !self.lifetime.expired(counter) {
Ok(self.send_cipher_pool.lock().pop().unwrap_or_else(|| Box::new(AesGcm::new(self.send_key.as_bytes(), true))))
} else {
unlikely_branch();
Err(Error::MaxKeyLifetimeExceeded)
}
}
@ -833,84 +1017,7 @@ impl SessionKey {
}
}
#[allow(non_snake_case)]
fn assemble_and_armor_DATA<const MAX_PACKET_SIZE: usize>(buffer: &mut [u8; MAX_PACKET_SIZE], data: &[u8], packet_type: u8, remote_session_id: u64, counter: CounterValue, key: &SessionKey, outgoing_obfuscator: &Obfuscator) -> Result<usize, Error> {
debug_assert!(packet_type == PACKET_TYPE_DATA || packet_type == PACKET_TYPE_NOP);
buffer[0] = packet_type;
buffer[1..7].copy_from_slice(&remote_session_id.to_le_bytes()[..SESSION_ID_SIZE]);
buffer[7..11].copy_from_slice(&counter.to_bytes());
let payload_end = HEADER_SIZE + data.len();
let tag_end = payload_end + AES_GCM_TAG_SIZE;
if tag_end < MAX_PACKET_SIZE {
let mut c = key.get_send_cipher(counter)?;
buffer[11..16].fill(0);
c.init(&buffer[..16]);
c.crypt(data, &mut buffer[HEADER_SIZE..payload_end]);
buffer[payload_end..tag_end].copy_from_slice(&c.finish());
key.return_send_cipher(c);
outgoing_obfuscator.0.encrypt_block_in_place(&mut buffer[..16]);
Ok(tag_end)
} else {
unlikely_branch();
Err(Error::InvalidParameter)
}
}
fn append_random_padding(b: &mut [u8]) -> &mut [u8] {
if b.len() > AES_GCM_TAG_SIZE + HMAC_SIZE {
let random_padding_len = (random::next_u32_secure() as usize) % (b.len() - (AES_GCM_TAG_SIZE + HMAC_SIZE));
b[..random_padding_len].fill(0);
&mut b[random_padding_len..]
} else {
b
}
}
#[allow(non_snake_case)]
fn assemble_KEY_OFFER<const MAX_PACKET_SIZE: usize, const STATIC_PUBLIC_SIZE: usize>(
buffer: &mut [u8; MAX_PACKET_SIZE],
counter: CounterValue,
bob_session_id: Option<SessionId>,
alice_e0_public: &P384PublicKey,
alice_session_id: SessionId,
alice_s_public: &[u8; STATIC_PUBLIC_SIZE],
alice_e1_public: Option<&[u8; pqc_kyber::KYBER_PUBLICKEYBYTES]>,
) -> usize {
buffer[0] = PACKET_TYPE_KEY_OFFER;
buffer[1..7].copy_from_slice(&bob_session_id.map_or(0_u64, |i| i.into()).to_le_bytes()[..SESSION_ID_SIZE]);
buffer[7..11].copy_from_slice(&counter.to_bytes());
let mut b = &mut buffer[HEADER_SIZE..];
b[..P384_PUBLIC_KEY_SIZE].copy_from_slice(alice_e0_public.as_bytes());
b = &mut b[P384_PUBLIC_KEY_SIZE..];
alice_session_id.copy_to(b);
b = &mut b[SESSION_ID_SIZE..];
b[..STATIC_PUBLIC_SIZE].copy_from_slice(alice_s_public);
b = &mut b[STATIC_PUBLIC_SIZE..];
if let Some(k) = alice_e1_public {
b[0] = E1_TYPE_KYBER512;
b[1..1 + pqc_kyber::KYBER_PUBLICKEYBYTES].copy_from_slice(k);
b = &mut b[1 + pqc_kyber::KYBER_PUBLICKEYBYTES..];
} else {
b[0] = E1_TYPE_NONE;
b = &mut b[1..];
}
b[0] = 0;
b[1] = 0; // reserved for future use
b = &mut b[2..];
b = append_random_padding(b);
MAX_PACKET_SIZE - b.len()
}
/*
#[allow(non_snake_case)]
fn parse_KEY_OFFER_after_header<const STATIC_PUBLIC_SIZE: usize>(mut b: &[u8]) -> Result<(SessionId, [u8; STATIC_PUBLIC_SIZE], Option<[u8; pqc_kyber::KYBER_PUBLICKEYBYTES]>), Error> {
if b.len() >= SESSION_ID_SIZE {
@ -922,7 +1029,7 @@ fn parse_KEY_OFFER_after_header<const STATIC_PUBLIC_SIZE: usize>(mut b: &[u8]) -
if b.len() >= 1 {
let e1_type = b[0];
b = &b[1..];
let alice_e1_public = if e1_type == E1_TYPE_KYBER512 {
let alice_e1_public = if e1_type == E1_TYPE_KYBER1024 {
if b.len() >= pqc_kyber::KYBER_PUBLICKEYBYTES {
let k: [u8; pqc_kyber::KYBER_PUBLICKEYBYTES] = b[..pqc_kyber::KYBER_PUBLICKEYBYTES].try_into().unwrap();
b = &b[pqc_kyber::KYBER_PUBLICKEYBYTES..];
@ -943,7 +1050,14 @@ fn parse_KEY_OFFER_after_header<const STATIC_PUBLIC_SIZE: usize>(mut b: &[u8]) -
}
#[allow(non_snake_case)]
fn assemble_KEY_COUNTER_OFFER<const MAX_PACKET_SIZE: usize>(buffer: &mut [u8; MAX_PACKET_SIZE], counter: CounterValue, alice_session_id: SessionId, bob_e0_public: &P384PublicKey, bob_session_id: SessionId, bob_e1_public: Option<&[u8; pqc_kyber::KYBER_CIPHERTEXTBYTES]>) -> usize {
fn assemble_KEY_COUNTER_OFFER<const MAX_PACKET_SIZE: usize>(
buffer: &mut [u8; MAX_PACKET_SIZE],
counter: CounterValue,
alice_session_id: SessionId,
bob_e0_public: &P384PublicKey,
bob_session_id: SessionId,
bob_e1_public: Option<&[u8; pqc_kyber::KYBER_CIPHERTEXTBYTES]>,
) -> usize {
buffer[0] = PACKET_TYPE_KEY_COUNTER_OFFER;
alice_session_id.copy_to(&mut buffer[1..7]);
buffer[7..11].copy_from_slice(&counter.to_bytes());
@ -956,7 +1070,7 @@ fn assemble_KEY_COUNTER_OFFER<const MAX_PACKET_SIZE: usize>(buffer: &mut [u8; MA
b = &mut b[SESSION_ID_SIZE..];
if let Some(k) = bob_e1_public {
b[0] = E1_TYPE_KYBER512;
b[0] = E1_TYPE_KYBER1024;
b[1..1 + pqc_kyber::KYBER_CIPHERTEXTBYTES].copy_from_slice(k);
b = &mut b[1 + pqc_kyber::KYBER_CIPHERTEXTBYTES..];
} else {
@ -968,8 +1082,6 @@ fn assemble_KEY_COUNTER_OFFER<const MAX_PACKET_SIZE: usize>(buffer: &mut [u8; MA
b[1] = 0; // reserved for future use
b = &mut b[2..];
b = append_random_padding(b);
MAX_PACKET_SIZE - b.len()
}
@ -981,7 +1093,7 @@ fn parse_KEY_COUNTER_OFFER_after_header(mut b: &[u8]) -> Result<(SessionId, Opti
if b.len() >= 1 {
let e1_type = b[0];
b = &b[1..];
let bob_e1_public = if e1_type == E1_TYPE_KYBER512 {
let bob_e1_public = if e1_type == E1_TYPE_KYBER1024 {
if b.len() >= pqc_kyber::KYBER_CIPHERTEXTBYTES {
let k: [u8; pqc_kyber::KYBER_CIPHERTEXTBYTES] = b[..pqc_kyber::KYBER_CIPHERTEXTBYTES].try_into().unwrap();
b = &b[pqc_kyber::KYBER_CIPHERTEXTBYTES..];
@ -1000,6 +1112,9 @@ fn parse_KEY_COUNTER_OFFER_after_header(mut b: &[u8]) -> Result<(SessionId, Opti
return Err(Error::InvalidPacket);
}
*/
/// HMAC-SHA512 key derivation function modeled on: https://csrc.nist.gov/publications/detail/sp/800-108/final (page 12)
fn kbkdf512(key: &[u8], label: u8) -> Secret<64> {
Secret(hmac_sha512(key, &[0, 0, 0, 0, b'Z', b'T', label, 0, 0, 0, 0, 0x02, 0x00]))
}
@ -1011,10 +1126,10 @@ fn get_aes_gcm_nonce(deobfuscated_packet: &[u8]) -> [u8; 16] {
tmp
}
#[cold]
#[inline(never)]
extern "C" fn unlikely_branch() {}
#[cfg(test)]
mod tests {}
/*
#[cfg(test)]
mod tests {
use std::rc::Rc;
@ -1178,3 +1293,4 @@ mod tests {
}
}
}
*/

View file

@ -1,5 +1,5 @@
#unstable_features = true
max_width = 300
max_width = 200
#use_small_heuristics = "Max"
edition = "2021"
empty_item_single_line = true