Various cleanup...

This commit is contained in:
Adam Ierymenko 2021-01-12 15:23:35 -05:00
parent d6969c41f0
commit 053452b4a0
No known key found for this signature in database
GPG key ID: C8877CF2D7A5D7F3
2 changed files with 177 additions and 337 deletions

View file

@ -15,7 +15,7 @@ namespace {
#define ECC_CURVE secp384r1 #define ECC_CURVE secp384r1
#define ECC_BYTES ECC_CURVE #define ECC_BYTES ECC_CURVE
#define NUM_ECC_DIGITS (ECC_BYTES/8) #define NUM_ECC_DIGITS (ECC_BYTES/8)
#define MAX_TRIES 1024 #define ECC_CREATE_KEY_MAX_ATTEMPTS 1024
#ifdef ZT_HAVE_UINT128 #ifdef ZT_HAVE_UINT128
#define SUPPORTS_INT128 1 #define SUPPORTS_INT128 1
@ -45,144 +45,100 @@ const uint64_t curve_b[NUM_ECC_DIGITS] = CONCAT(Curve_B_, ECC_CURVE);
const EccPoint curve_G = CONCAT(Curve_G_, ECC_CURVE); const EccPoint curve_G = CONCAT(Curve_G_, ECC_CURVE);
const uint64_t curve_n[NUM_ECC_DIGITS] = CONCAT(Curve_N_, ECC_CURVE); const uint64_t curve_n[NUM_ECC_DIGITS] = CONCAT(Curve_N_, ECC_CURVE);
// Use ZeroTier's secure PRNG
ZT_INLINE int getRandomNumber(uint64_t *p_vli) ZT_INLINE int getRandomNumber(uint64_t *p_vli)
{ {
Utils::getSecureRandom(p_vli,ECC_BYTES); Utils::getSecureRandom(p_vli, ECC_BYTES);
return 1; return 1;
} }
ZT_INLINE void vli_clear(uint64_t *p_vli) ZT_INLINE void vli_clear(uint64_t *p_vli)
{ { Utils::zero< sizeof(uint64_t) * NUM_ECC_DIGITS >(p_vli); }
uint i;
for(i=0; i<NUM_ECC_DIGITS; ++i)
{
p_vli[i] = 0;
}
}
/* Returns 1 if p_vli == 0, 0 otherwise. */
ZT_INLINE int vli_isZero(const uint64_t *p_vli) ZT_INLINE int vli_isZero(const uint64_t *p_vli)
{ {
uint i; uint i;
for(i = 0; i < NUM_ECC_DIGITS; ++i) for (i = 0; i < NUM_ECC_DIGITS; ++i) {
{ if (p_vli[i])
if(p_vli[i])
{
return 0; return 0;
} }
}
return 1; return 1;
} }
/* Returns nonzero if bit p_bit of p_vli is set. */ ZT_INLINE uint64_t vli_testBit(const uint64_t *p_vli, uint p_bit)
ZT_INLINE uint64_t vli_testBit(const uint64_t *p_vli,uint p_bit) { return (p_vli[p_bit / 64] & ((uint64_t)1 << (p_bit % 64))); }
{
return (p_vli[p_bit/64] & ((uint64_t)1 << (p_bit % 64)));
}
/* Counts the number of 64-bit "digits" in p_vli. */
ZT_INLINE uint vli_numDigits(const uint64_t *p_vli) ZT_INLINE uint vli_numDigits(const uint64_t *p_vli)
{ {
int i; int i;
/* Search from the end until we find a non-zero digit. for (i = NUM_ECC_DIGITS - 1; i >= 0 && p_vli[i] == 0; --i) {}
We do it in reverse because we expect that most digits will be nonzero. */ return (uint)(i + 1);
for(i = NUM_ECC_DIGITS - 1; i >= 0 && p_vli[i] == 0; --i)
{
}
return (i + 1);
} }
/* Counts the number of bits required for p_vli. */
ZT_INLINE uint vli_numBits(const uint64_t *p_vli) ZT_INLINE uint vli_numBits(const uint64_t *p_vli)
{ {
uint i; uint i;
uint64_t l_digit; uint64_t l_digit;
uint l_numDigits = vli_numDigits(p_vli); uint l_numDigits = vli_numDigits(p_vli);
if(l_numDigits == 0) if (l_numDigits == 0) {
{
return 0; return 0;
} }
l_digit = p_vli[l_numDigits - 1]; l_digit = p_vli[l_numDigits - 1];
for(i=0; l_digit; ++i) for (i = 0; l_digit; ++i) {
{
l_digit >>= 1; l_digit >>= 1;
} }
return ((l_numDigits - 1) * 64 + i); return ((l_numDigits - 1) * 64 + i);
} }
/* Sets p_dest = p_src. */ ZT_INLINE void vli_set(uint64_t *p_dest, const uint64_t *p_src)
ZT_INLINE void vli_set(uint64_t *p_dest,const uint64_t *p_src)
{ {
uint i; for (uint i = 0; i < NUM_ECC_DIGITS; ++i)
for(i=0; i<NUM_ECC_DIGITS; ++i)
{
p_dest[i] = p_src[i]; p_dest[i] = p_src[i];
}
} }
/* Returns sign of p_left - p_right. */ ZT_INLINE int vli_cmp(const uint64_t *p_left, const uint64_t *p_right)
ZT_INLINE int vli_cmp(const uint64_t *p_left,const uint64_t *p_right)
{ {
int i; for (int i = NUM_ECC_DIGITS - 1; i >= 0; --i) {
for(i = NUM_ECC_DIGITS-1; i >= 0; --i) if (p_left[i] > p_right[i]) {
{
if(p_left[i] > p_right[i])
{
return 1; return 1;
} } else if (p_left[i] < p_right[i]) {
else if(p_left[i] < p_right[i])
{
return -1; return -1;
} }
} }
return 0; return 0;
} }
/* Computes p_result = p_in << c, returning carry. Can modify in place (if p_result == p_in). 0 < p_shift < 64. */ ZT_INLINE uint64_t vli_lshift(uint64_t *p_result, const uint64_t *p_in, uint p_shift)
ZT_INLINE uint64_t vli_lshift(uint64_t *p_result,const uint64_t *p_in,uint p_shift)
{ {
uint64_t l_carry = 0; uint64_t l_carry = 0;
uint i; for (uint i = 0; i < NUM_ECC_DIGITS; ++i) {
for(i = 0; i < NUM_ECC_DIGITS; ++i)
{
uint64_t l_temp = p_in[i]; uint64_t l_temp = p_in[i];
p_result[i] = (l_temp << p_shift) | l_carry; p_result[i] = (l_temp << p_shift) | l_carry;
l_carry = l_temp >> (64 - p_shift); l_carry = l_temp >> (64 - p_shift);
} }
return l_carry; return l_carry;
} }
/* Computes p_vli = p_vli >> 1. */
ZT_INLINE void vli_rshift1(uint64_t *p_vli) ZT_INLINE void vli_rshift1(uint64_t *p_vli)
{ {
uint64_t *l_end = p_vli; uint64_t *l_end = p_vli;
uint64_t l_carry = 0; uint64_t l_carry = 0;
p_vli += NUM_ECC_DIGITS; p_vli += NUM_ECC_DIGITS;
while(p_vli-- > l_end) while (p_vli-- > l_end) {
{
uint64_t l_temp = *p_vli; uint64_t l_temp = *p_vli;
*p_vli = (l_temp >> 1) | l_carry; *p_vli = (l_temp >> 1) | l_carry;
l_carry = l_temp << 63; l_carry = l_temp << 63;
} }
} }
/* Computes p_result = p_left + p_right, returning carry. Can modify in place. */ ZT_INLINE uint64_t vli_add(uint64_t *p_result, const uint64_t *p_left, const uint64_t *p_right)
ZT_INLINE uint64_t vli_add(uint64_t *p_result,const uint64_t *p_left,const uint64_t *p_right)
{ {
uint64_t l_carry = 0; uint64_t l_carry = 0;
uint i; for (uint i = 0; i < NUM_ECC_DIGITS; ++i) {
for(i=0; i<NUM_ECC_DIGITS; ++i)
{
uint64_t l_sum = p_left[i] + p_right[i] + l_carry; uint64_t l_sum = p_left[i] + p_right[i] + l_carry;
if(l_sum != p_left[i]) if (l_sum != p_left[i]) {
{
l_carry = (l_sum < p_left[i]); l_carry = (l_sum < p_left[i]);
} }
p_result[i] = l_sum; p_result[i] = l_sum;
@ -190,16 +146,12 @@ ZT_INLINE uint64_t vli_add(uint64_t *p_result,const uint64_t *p_left,const uint6
return l_carry; return l_carry;
} }
/* Computes p_result = p_left - p_right, returning borrow. Can modify in place. */ ZT_INLINE uint64_t vli_sub(uint64_t *p_result, const uint64_t *p_left, const uint64_t *p_right)
ZT_INLINE uint64_t vli_sub(uint64_t *p_result,const uint64_t *p_left,const uint64_t *p_right)
{ {
uint64_t l_borrow = 0; uint64_t l_borrow = 0;
uint i; for (uint i = 0; i < NUM_ECC_DIGITS; ++i) {
for(i=0; i<NUM_ECC_DIGITS; ++i)
{
uint64_t l_diff = p_left[i] - p_right[i] - l_borrow; uint64_t l_diff = p_left[i] - p_right[i] - l_borrow;
if(l_diff != p_left[i]) if (l_diff != p_left[i]) {
{
l_borrow = (l_diff > p_left[i]); l_borrow = (l_diff > p_left[i]);
} }
p_result[i] = l_diff; p_result[i] = l_diff;
@ -209,7 +161,6 @@ ZT_INLINE uint64_t vli_sub(uint64_t *p_result,const uint64_t *p_left,const uint6
#if SUPPORTS_INT128 == 1 #if SUPPORTS_INT128 == 1
/* Computes p_result = p_left * p_right. */
void vli_mult(uint64_t *p_result, const uint64_t *p_left, const uint64_t *p_right) void vli_mult(uint64_t *p_result, const uint64_t *p_left, const uint64_t *p_right)
{ {
uint128_t r01 = 0; uint128_t r01 = 0;
@ -217,13 +168,10 @@ void vli_mult(uint64_t *p_result, const uint64_t *p_left, const uint64_t *p_righ
uint i, k; uint i, k;
/* Compute each digit of p_result in sequence, maintaining the carries. */ for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; ++k) {
for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k)
{
uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS); uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
for(i=l_min; i<=k && i<NUM_ECC_DIGITS; ++i) for (i = l_min; i <= k && i < NUM_ECC_DIGITS; ++i) {
{ uint128_t l_product = (uint128_t)p_left[i] * p_right[k - i];
uint128_t l_product = (uint128_t)p_left[i] * p_right[k-i];
r01 += l_product; r01 += l_product;
r2 += (r01 < l_product); r2 += (r01 < l_product);
} }
@ -232,24 +180,20 @@ void vli_mult(uint64_t *p_result, const uint64_t *p_left, const uint64_t *p_righ
r2 = 0; r2 = 0;
} }
p_result[NUM_ECC_DIGITS*2 - 1] = (uint64_t)r01; p_result[NUM_ECC_DIGITS * 2 - 1] = (uint64_t)r01;
} }
/* Computes p_result = p_left^2. */
void vli_square(uint64_t *p_result, const uint64_t *p_left) void vli_square(uint64_t *p_result, const uint64_t *p_left)
{ {
uint128_t r01 = 0; uint128_t r01 = 0;
uint64_t r2 = 0; uint64_t r2 = 0;
uint i, k; uint i, k;
for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k) for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; ++k) {
{
uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS); uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
for(i=l_min; i<=k && i<=k-i; ++i) for (i = l_min; i <= k && i <= k - i; ++i) {
{ uint128_t l_product = (uint128_t)p_left[i] * p_left[k - i];
uint128_t l_product = (uint128_t)p_left[i] * p_left[k-i]; if (i < k - i) {
if(i < k-i)
{
r2 += l_product >> 127; r2 += l_product >> 127;
l_product *= 2; l_product *= 2;
} }
@ -261,7 +205,7 @@ void vli_square(uint64_t *p_result, const uint64_t *p_left)
r2 = 0; r2 = 0;
} }
p_result[NUM_ECC_DIGITS*2 - 1] = (uint64_t)r01; p_result[NUM_ECC_DIGITS * 2 - 1] = (uint64_t)r01;
} }
#else /* #if SUPPORTS_INT128 */ #else /* #if SUPPORTS_INT128 */
@ -282,8 +226,7 @@ uint128_t mul_64_64(uint64_t p_left, uint64_t p_right)
m2 += (m0 >> 32); m2 += (m0 >> 32);
m2 += m1; m2 += m1;
if(m2 < m1) if (m2 < m1) { // overflow
{ // overflow
m3 += 0x100000000ull; m3 += 0x100000000ull;
} }
@ -309,12 +252,10 @@ void vli_mult(uint64_t *p_result, uint64_t *p_left, const uint64_t *p_right)
uint i, k; uint i, k;
/* Compute each digit of p_result in sequence, maintaining the carries. */ /* Compute each digit of p_result in sequence, maintaining the carries. */
for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k) for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; ++k) {
{
uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS); uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
for(i=l_min; i<=k && i<NUM_ECC_DIGITS; ++i) for (i = l_min; i <= k && i < NUM_ECC_DIGITS; ++i) {
{ uint128_t l_product = mul_64_64(p_left[i], p_right[k - i]);
uint128_t l_product = mul_64_64(p_left[i], p_right[k-i]);
r01 = add_128_128(r01, l_product); r01 = add_128_128(r01, l_product);
r2 += (r01.m_high < l_product.m_high); r2 += (r01.m_high < l_product.m_high);
} }
@ -324,7 +265,7 @@ void vli_mult(uint64_t *p_result, uint64_t *p_left, const uint64_t *p_right)
r2 = 0; r2 = 0;
} }
p_result[NUM_ECC_DIGITS*2 - 1] = r01.m_low; p_result[NUM_ECC_DIGITS * 2 - 1] = r01.m_low;
} }
void vli_square(uint64_t *p_result, uint64_t *p_left) void vli_square(uint64_t *p_result, uint64_t *p_left)
@ -333,14 +274,11 @@ void vli_square(uint64_t *p_result, uint64_t *p_left)
uint64_t r2 = 0; uint64_t r2 = 0;
uint i, k; uint i, k;
for(k=0; k < NUM_ECC_DIGITS*2 - 1; ++k) for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; ++k) {
{
uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS); uint l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
for(i=l_min; i<=k && i<=k-i; ++i) for (i = l_min; i <= k && i <= k - i; ++i) {
{ uint128_t l_product = mul_64_64(p_left[i], p_left[k - i]);
uint128_t l_product = mul_64_64(p_left[i], p_left[k-i]); if (i < k - i) {
if(i < k-i)
{
r2 += l_product.m_high >> 63; r2 += l_product.m_high >> 63;
l_product.m_high = (l_product.m_high << 1) | (l_product.m_low >> 63); l_product.m_high = (l_product.m_high << 1) | (l_product.m_low >> 63);
l_product.m_low <<= 1; l_product.m_low <<= 1;
@ -354,30 +292,23 @@ void vli_square(uint64_t *p_result, uint64_t *p_left)
r2 = 0; r2 = 0;
} }
p_result[NUM_ECC_DIGITS*2 - 1] = r01.m_low; p_result[NUM_ECC_DIGITS * 2 - 1] = r01.m_low;
} }
#endif /* SUPPORTS_INT128 */ #endif /* SUPPORTS_INT128 */
/* Computes p_result = (p_left + p_right) % p_mod.
Assumes that p_left < p_mod and p_right < p_mod, p_result != p_mod. */
void vli_modAdd(uint64_t *p_result, uint64_t *p_left, const uint64_t *p_right, const uint64_t *p_mod) void vli_modAdd(uint64_t *p_result, uint64_t *p_left, const uint64_t *p_right, const uint64_t *p_mod)
{ {
uint64_t l_carry = vli_add(p_result, p_left, p_right); uint64_t l_carry = vli_add(p_result, p_left, p_right);
if(l_carry || vli_cmp(p_result, p_mod) >= 0) if (l_carry || vli_cmp(p_result, p_mod) >= 0) {
{ /* p_result > p_mod (p_result = p_mod + remainder), so subtract p_mod to get remainder. */
vli_sub(p_result, p_result, p_mod); vli_sub(p_result, p_result, p_mod);
} }
} }
/* Computes p_result = (p_left - p_right) % p_mod.
Assumes that p_left < p_mod and p_right < p_mod, p_result != p_mod. */
void vli_modSub(uint64_t *p_result, uint64_t *p_left, const uint64_t *p_right, const uint64_t *p_mod) void vli_modSub(uint64_t *p_result, uint64_t *p_left, const uint64_t *p_right, const uint64_t *p_mod)
{ {
uint64_t l_borrow = vli_sub(p_result, p_left, p_right); uint64_t l_borrow = vli_sub(p_result, p_left, p_right);
if(l_borrow) if (l_borrow) {
{ /* In this case, p_result == -diff == (max int) - diff.
Since -x % d == d - x, we can get the correct result from p_result + p_mod (with overflow). */
vli_add(p_result, p_result, p_mod); vli_add(p_result, p_result, p_mod);
} }
} }
@ -387,21 +318,17 @@ void omega_mult(uint64_t *p_result, uint64_t *p_right)
uint64_t l_tmp[NUM_ECC_DIGITS]; uint64_t l_tmp[NUM_ECC_DIGITS];
uint64_t l_carry, l_diff; uint64_t l_carry, l_diff;
/* Multiply by (2^128 + 2^96 - 2^32 + 1). */ vli_set(p_result, p_right);
vli_set(p_result, p_right); /* 1 */
l_carry = vli_lshift(l_tmp, p_right, 32); l_carry = vli_lshift(l_tmp, p_right, 32);
p_result[1 + NUM_ECC_DIGITS] = l_carry + vli_add(p_result + 1, p_result + 1, l_tmp); /* 2^96 + 1 */ p_result[1 + NUM_ECC_DIGITS] = l_carry + vli_add(p_result + 1, p_result + 1, l_tmp);
p_result[2 + NUM_ECC_DIGITS] = vli_add(p_result + 2, p_result + 2, p_right); /* 2^128 + 2^96 + 1 */ p_result[2 + NUM_ECC_DIGITS] = vli_add(p_result + 2, p_result + 2, p_right);
l_carry += vli_sub(p_result, p_result, l_tmp); /* 2^128 + 2^96 - 2^32 + 1 */ l_carry += vli_sub(p_result, p_result, l_tmp);
l_diff = p_result[NUM_ECC_DIGITS] - l_carry; l_diff = p_result[NUM_ECC_DIGITS] - l_carry;
if(l_diff > p_result[NUM_ECC_DIGITS]) if (l_diff > p_result[NUM_ECC_DIGITS]) {
{ /* Propagate borrow if necessary. */
uint i; uint i;
for(i = 1 + NUM_ECC_DIGITS; ; ++i) for (i = 1 + NUM_ECC_DIGITS;; ++i) {
{
--p_result[i]; --p_result[i];
if(p_result[i] != (uint64_t)-1) if (p_result[i] != (uint64_t)-1) {
{
break; break;
} }
} }
@ -409,59 +336,50 @@ void omega_mult(uint64_t *p_result, uint64_t *p_right)
p_result[NUM_ECC_DIGITS] = l_diff; p_result[NUM_ECC_DIGITS] = l_diff;
} }
/* Computes p_result = p_product % curve_p
see PDF "Comparing Elliptic Curve Cryptography and RSA on 8-bit CPUs"
section "Curve-Specific Optimizations" */
void vli_mmod_fast(uint64_t *p_result, uint64_t *p_product) void vli_mmod_fast(uint64_t *p_result, uint64_t *p_product)
{ {
uint64_t l_tmp[2*NUM_ECC_DIGITS]; uint64_t l_tmp[2 * NUM_ECC_DIGITS];
while(!vli_isZero(p_product + NUM_ECC_DIGITS)) /* While c1 != 0 */ while (!vli_isZero(p_product + NUM_ECC_DIGITS)) {
{
uint64_t l_carry = 0; uint64_t l_carry = 0;
uint i; uint i;
vli_clear(l_tmp); vli_clear(l_tmp);
vli_clear(l_tmp + NUM_ECC_DIGITS); vli_clear(l_tmp + NUM_ECC_DIGITS);
omega_mult(l_tmp, p_product + NUM_ECC_DIGITS); /* tmp = w * c1 */ omega_mult(l_tmp, p_product + NUM_ECC_DIGITS);
vli_clear(p_product + NUM_ECC_DIGITS); /* p = c0 */ vli_clear(p_product + NUM_ECC_DIGITS);
/* (c1, c0) = c0 + w * c1 */ for (i = 0; i < NUM_ECC_DIGITS + 3; ++i) {
for(i=0; i<NUM_ECC_DIGITS+3; ++i)
{
uint64_t l_sum = p_product[i] + l_tmp[i] + l_carry; uint64_t l_sum = p_product[i] + l_tmp[i] + l_carry;
if(l_sum != p_product[i]) if (l_sum != p_product[i]) {
{
l_carry = (l_sum < p_product[i]); l_carry = (l_sum < p_product[i]);
} }
p_product[i] = l_sum; p_product[i] = l_sum;
} }
} }
while(vli_cmp(p_product, curve_p) > 0) while (vli_cmp(p_product, curve_p) > 0) {
{
vli_sub(p_product, p_product, curve_p); vli_sub(p_product, p_product, curve_p);
} }
vli_set(p_result, p_product); vli_set(p_result, p_product);
} }
/* Computes p_result = (p_left * p_right) % curve_p. */ ZT_INLINE void vli_modMult_fast(uint64_t *p_result, uint64_t *p_left, const uint64_t *p_right)
ZT_INLINE void vli_modMult_fast(uint64_t *p_result,uint64_t *p_left,const uint64_t *p_right)
{ {
uint64_t l_product[2 * NUM_ECC_DIGITS]; uint64_t l_product[2 * NUM_ECC_DIGITS];
vli_mult(l_product, p_left, p_right); vli_mult(l_product, p_left, p_right);
vli_mmod_fast(p_result, l_product); vli_mmod_fast(p_result, l_product);
} }
/* Computes p_result = p_left^2 % curve_p. */ ZT_INLINE void vli_modSquare_fast(uint64_t *p_result, uint64_t *p_left)
ZT_INLINE void vli_modSquare_fast(uint64_t *p_result,uint64_t *p_left)
{ {
uint64_t l_product[2 * NUM_ECC_DIGITS]; uint64_t l_product[2 * NUM_ECC_DIGITS];
vli_square(l_product, p_left); vli_square(l_product, p_left);
vli_mmod_fast(p_result, l_product); vli_mmod_fast(p_result, l_product);
} }
#define EVEN(vli) (!(vli[0] & 1)) #define vli_isEven(vli) (!(vli[0] & 1))
/* Computes p_result = (1 / p_input) % p_mod. All VLIs are the same size. /* Computes p_result = (1 / p_input) % p_mod. All VLIs are the same size.
See "From Euclid's GCD to Montgomery Multiplication to the Great Divide" See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf */ https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf */
@ -471,8 +389,7 @@ void vli_modInv(uint64_t *p_result, uint64_t *p_input, const uint64_t *p_mod)
uint64_t l_carry; uint64_t l_carry;
int l_cmpResult; int l_cmpResult;
if(vli_isZero(p_input)) if (vli_isZero(p_input)) {
{
vli_clear(p_result); vli_clear(p_result);
return; return;
} }
@ -483,71 +400,53 @@ void vli_modInv(uint64_t *p_result, uint64_t *p_input, const uint64_t *p_mod)
u[0] = 1; u[0] = 1;
vli_clear(v); vli_clear(v);
while((l_cmpResult = vli_cmp(a, b)) != 0) while ((l_cmpResult = vli_cmp(a, b)) != 0) {
{
l_carry = 0; l_carry = 0;
if(EVEN(a)) if (vli_isEven(a)) {
{
vli_rshift1(a); vli_rshift1(a);
if(!EVEN(u)) if (!vli_isEven(u)) {
{
l_carry = vli_add(u, u, p_mod); l_carry = vli_add(u, u, p_mod);
} }
vli_rshift1(u); vli_rshift1(u);
if(l_carry) if (l_carry) {
{ u[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull;
u[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
} }
} } else if (vli_isEven(b)) {
else if(EVEN(b))
{
vli_rshift1(b); vli_rshift1(b);
if(!EVEN(v)) if (!vli_isEven(v)) {
{
l_carry = vli_add(v, v, p_mod); l_carry = vli_add(v, v, p_mod);
} }
vli_rshift1(v); vli_rshift1(v);
if(l_carry) if (l_carry) {
{ v[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull;
v[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
} }
} } else if (l_cmpResult > 0) {
else if(l_cmpResult > 0)
{
vli_sub(a, a, b); vli_sub(a, a, b);
vli_rshift1(a); vli_rshift1(a);
if(vli_cmp(u, v) < 0) if (vli_cmp(u, v) < 0) {
{
vli_add(u, u, p_mod); vli_add(u, u, p_mod);
} }
vli_sub(u, u, v); vli_sub(u, u, v);
if(!EVEN(u)) if (!vli_isEven(u)) {
{
l_carry = vli_add(u, u, p_mod); l_carry = vli_add(u, u, p_mod);
} }
vli_rshift1(u); vli_rshift1(u);
if(l_carry) if (l_carry) {
{ u[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull;
u[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
} }
} } else {
else
{
vli_sub(b, b, a); vli_sub(b, b, a);
vli_rshift1(b); vli_rshift1(b);
if(vli_cmp(v, u) < 0) if (vli_cmp(v, u) < 0) {
{
vli_add(v, v, p_mod); vli_add(v, v, p_mod);
} }
vli_sub(v, v, u); vli_sub(v, v, u);
if(!EVEN(v)) if (!vli_isEven(v)) {
{
l_carry = vli_add(v, v, p_mod); l_carry = vli_add(v, v, p_mod);
} }
vli_rshift1(v); vli_rshift1(v);
if(l_carry) if (l_carry) {
{ v[NUM_ECC_DIGITS - 1] |= 0x8000000000000000ull;
v[NUM_ECC_DIGITS-1] |= 0x8000000000000000ull;
} }
} }
} }
@ -555,27 +454,16 @@ void vli_modInv(uint64_t *p_result, uint64_t *p_input, const uint64_t *p_mod)
vli_set(p_result, u); vli_set(p_result, u);
} }
/* ------ Point operations ------ */
/* Returns 1 if p_point is the point at infinity, 0 otherwise. */
ZT_INLINE int EccPoint_isZero(EccPoint *p_point) ZT_INLINE int EccPoint_isZero(EccPoint *p_point)
{ { return (vli_isZero(p_point->x) && vli_isZero(p_point->y)); }
return (vli_isZero(p_point->x) && vli_isZero(p_point->y));
}
/* Point multiplication algorithm using Montgomery's ladder with co-Z coordinates.
From http://eprint.iacr.org/2011/338.pdf
*/
/* Double in place */
void EccPoint_double_jacobian(uint64_t *X1, uint64_t *Y1, uint64_t *Z1) void EccPoint_double_jacobian(uint64_t *X1, uint64_t *Y1, uint64_t *Z1)
{ {
/* t1 = X, t2 = Y, t3 = Z */ /* t1 = X, t2 = Y, t3 = Z */
uint64_t t4[NUM_ECC_DIGITS]; uint64_t t4[NUM_ECC_DIGITS];
uint64_t t5[NUM_ECC_DIGITS]; uint64_t t5[NUM_ECC_DIGITS];
if(vli_isZero(Z1)) if (vli_isZero(Z1)) {
{
return; return;
} }
@ -592,14 +480,11 @@ void EccPoint_double_jacobian(uint64_t *X1, uint64_t *Y1, uint64_t *Z1)
vli_modAdd(Z1, X1, X1, curve_p); /* t3 = 2*(x1^2 - z1^4) */ vli_modAdd(Z1, X1, X1, curve_p); /* t3 = 2*(x1^2 - z1^4) */
vli_modAdd(X1, X1, Z1, curve_p); /* t1 = 3*(x1^2 - z1^4) */ vli_modAdd(X1, X1, Z1, curve_p); /* t1 = 3*(x1^2 - z1^4) */
if(vli_testBit(X1, 0)) if (vli_testBit(X1, 0)) {
{
uint64_t l_carry = vli_add(X1, X1, curve_p); uint64_t l_carry = vli_add(X1, X1, curve_p);
vli_rshift1(X1); vli_rshift1(X1);
X1[NUM_ECC_DIGITS-1] |= l_carry << 63U; X1[NUM_ECC_DIGITS - 1] |= l_carry << 63U;
} } else {
else
{
vli_rshift1(X1); vli_rshift1(X1);
} }
/* t1 = 3/2*(x1^2 - z1^4) = B */ /* t1 = 3/2*(x1^2 - z1^4) = B */
@ -616,7 +501,6 @@ void EccPoint_double_jacobian(uint64_t *X1, uint64_t *Y1, uint64_t *Z1)
vli_set(Y1, t4); vli_set(Y1, t4);
} }
/* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
void apply_z(uint64_t *X1, uint64_t *Y1, uint64_t *Z) void apply_z(uint64_t *X1, uint64_t *Y1, uint64_t *Z)
{ {
uint64_t t1[NUM_ECC_DIGITS]; uint64_t t1[NUM_ECC_DIGITS];
@ -627,7 +511,6 @@ void apply_z(uint64_t *X1, uint64_t *Y1, uint64_t *Z)
vli_modMult_fast(Y1, Y1, t1); /* y1 * z^3 */ vli_modMult_fast(Y1, Y1, t1); /* y1 * z^3 */
} }
/* P = (x1, y1) => 2P, (x2, y2) => P' */
void XYcZ_initial_double(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2, uint64_t *p_initialZ) void XYcZ_initial_double(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2, uint64_t *p_initialZ)
{ {
uint64_t z[NUM_ECC_DIGITS]; uint64_t z[NUM_ECC_DIGITS];
@ -637,8 +520,7 @@ void XYcZ_initial_double(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2,
vli_clear(z); vli_clear(z);
z[0] = 1; z[0] = 1;
if(p_initialZ) if (p_initialZ) {
{
vli_set(z, p_initialZ); vli_set(z, p_initialZ);
} }
@ -649,10 +531,6 @@ void XYcZ_initial_double(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2,
apply_z(X2, Y2, z); apply_z(X2, Y2, z);
} }
/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
or P => P', Q => P + Q
*/
void XYcZ_add(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2) void XYcZ_add(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2)
{ {
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */ /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
@ -676,10 +554,6 @@ void XYcZ_add(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2)
vli_set(X2, t5); vli_set(X2, t5);
} }
/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
or P => P - Q, Q => P + Q
*/
void XYcZ_addC(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2) void XYcZ_addC(uint64_t *X1, uint64_t *Y1, uint64_t *X2, uint64_t *Y2)
{ {
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */ /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
@ -727,26 +601,25 @@ void EccPoint_mult(EccPoint *p_result, const EccPoint *p_point, uint64_t *p_scal
XYcZ_initial_double(Rx[1], Ry[1], Rx[0], Ry[0], p_initialZ); XYcZ_initial_double(Rx[1], Ry[1], Rx[0], Ry[0], p_initialZ);
for(i = (int)vli_numBits(p_scalar) - 2; i > 0; --i) for (i = (int)vli_numBits(p_scalar) - 2; i > 0; --i) {
{
nb = !vli_testBit(p_scalar, i); nb = !vli_testBit(p_scalar, i);
XYcZ_addC(Rx[1-nb], Ry[1-nb], Rx[nb], Ry[nb]); XYcZ_addC(Rx[1 - nb], Ry[1 - nb], Rx[nb], Ry[nb]);
XYcZ_add(Rx[nb], Ry[nb], Rx[1-nb], Ry[1-nb]); XYcZ_add(Rx[nb], Ry[nb], Rx[1 - nb], Ry[1 - nb]);
} }
nb = !vli_testBit(p_scalar, 0); nb = !vli_testBit(p_scalar, 0);
XYcZ_addC(Rx[1-nb], Ry[1-nb], Rx[nb], Ry[nb]); XYcZ_addC(Rx[1 - nb], Ry[1 - nb], Rx[nb], Ry[nb]);
/* Find final 1/Z value. */ /* Find final 1/Z value. */
vli_modSub(z, Rx[1], Rx[0], curve_p); /* X1 - X0 */ vli_modSub(z, Rx[1], Rx[0], curve_p); /* X1 - X0 */
vli_modMult_fast(z, z, Ry[1-nb]); /* Yb * (X1 - X0) */ vli_modMult_fast(z, z, Ry[1 - nb]); /* Yb * (X1 - X0) */
vli_modMult_fast(z, z, p_point->x); /* xP * Yb * (X1 - X0) */ vli_modMult_fast(z, z, p_point->x); /* xP * Yb * (X1 - X0) */
vli_modInv(z, z, curve_p); /* 1 / (xP * Yb * (X1 - X0)) */ vli_modInv(z, z, curve_p); /* 1 / (xP * Yb * (X1 - X0)) */
vli_modMult_fast(z, z, p_point->y); /* yP / (xP * Yb * (X1 - X0)) */ vli_modMult_fast(z, z, p_point->y); /* yP / (xP * Yb * (X1 - X0)) */
vli_modMult_fast(z, z, Rx[1-nb]); /* Xb * yP / (xP * Yb * (X1 - X0)) */ vli_modMult_fast(z, z, Rx[1 - nb]); /* Xb * yP / (xP * Yb * (X1 - X0)) */
/* End 1/Z calculation */ /* End 1/Z calculation */
XYcZ_add(Rx[nb], Ry[nb], Rx[1-nb], Ry[1-nb]); XYcZ_add(Rx[nb], Ry[nb], Rx[1 - nb], Ry[1 - nb]);
apply_z(Rx[0], Ry[0], z); apply_z(Rx[0], Ry[0], z);
@ -754,22 +627,18 @@ void EccPoint_mult(EccPoint *p_result, const EccPoint *p_point, uint64_t *p_scal
vli_set(p_result->y, Ry[0]); vli_set(p_result->y, Ry[0]);
} }
ZT_INLINE void ecc_bytes2native(uint64_t p_native[NUM_ECC_DIGITS],const uint8_t p_bytes[ECC_BYTES]) ZT_INLINE void ecc_bytes2native(uint64_t p_native[NUM_ECC_DIGITS], const uint8_t p_bytes[ECC_BYTES])
{ {
unsigned i; for (uint i = 0; i < NUM_ECC_DIGITS; ++i) {
for(i=0; i<NUM_ECC_DIGITS; ++i)
{
const uint8_t *p_digit = p_bytes + 8 * (NUM_ECC_DIGITS - 1 - i); const uint8_t *p_digit = p_bytes + 8 * (NUM_ECC_DIGITS - 1 - i);
p_native[i] = ((uint64_t)p_digit[0] << 56) | ((uint64_t)p_digit[1] << 48) | ((uint64_t)p_digit[2] << 40) | ((uint64_t)p_digit[3] << 32) | p_native[i] = ((uint64_t)p_digit[0] << 56) | ((uint64_t)p_digit[1] << 48) | ((uint64_t)p_digit[2] << 40) | ((uint64_t)p_digit[3] << 32) |
((uint64_t)p_digit[4] << 24) | ((uint64_t)p_digit[5] << 16) | ((uint64_t)p_digit[6] << 8) | (uint64_t)p_digit[7]; ((uint64_t)p_digit[4] << 24) | ((uint64_t)p_digit[5] << 16) | ((uint64_t)p_digit[6] << 8) | (uint64_t)p_digit[7];
} }
} }
ZT_INLINE void ecc_native2bytes(uint8_t p_bytes[ECC_BYTES],const uint64_t p_native[NUM_ECC_DIGITS]) ZT_INLINE void ecc_native2bytes(uint8_t p_bytes[ECC_BYTES], const uint64_t p_native[NUM_ECC_DIGITS])
{ {
unsigned i; for (uint i = 0; i < NUM_ECC_DIGITS; ++i) {
for(i=0; i<NUM_ECC_DIGITS; ++i)
{
uint8_t *p_digit = p_bytes + 8 * (NUM_ECC_DIGITS - 1 - i); uint8_t *p_digit = p_bytes + 8 * (NUM_ECC_DIGITS - 1 - i);
p_digit[0] = p_native[i] >> 56; p_digit[0] = p_native[i] >> 56;
p_digit[1] = p_native[i] >> 48; p_digit[1] = p_native[i] >> 48;
@ -782,31 +651,25 @@ ZT_INLINE void ecc_native2bytes(uint8_t p_bytes[ECC_BYTES],const uint64_t p_nati
} }
} }
/* Compute a = sqrt(a) (mod curve_p). */
void mod_sqrt(uint64_t a[NUM_ECC_DIGITS]) void mod_sqrt(uint64_t a[NUM_ECC_DIGITS])
{ {
unsigned i;
uint64_t p1[NUM_ECC_DIGITS] = {1}; uint64_t p1[NUM_ECC_DIGITS] = {1};
uint64_t l_result[NUM_ECC_DIGITS] = {1}; uint64_t l_result[NUM_ECC_DIGITS] = {1};
/* Since curve_p == 3 (mod 4) for all supported curves, we can
compute sqrt(a) = a^((curve_p + 1) / 4) (mod curve_p). */
vli_add(p1, curve_p, p1); /* p1 = curve_p + 1 */ vli_add(p1, curve_p, p1); /* p1 = curve_p + 1 */
for(i = vli_numBits(p1) - 1; i > 1; --i) for (uint i = vli_numBits(p1) - 1; i > 1; --i) {
{
vli_modSquare_fast(l_result, l_result); vli_modSquare_fast(l_result, l_result);
if(vli_testBit(p1, i)) if (vli_testBit(p1, i)) {
{
vli_modMult_fast(l_result, l_result, a); vli_modMult_fast(l_result, l_result, a);
} }
} }
vli_set(a, l_result); vli_set(a, l_result);
} }
void ecc_point_decompress(EccPoint *p_point, const uint8_t p_compressed[ECC_BYTES+1]) void ecc_point_decompress(EccPoint *p_point, const uint8_t p_compressed[ECC_BYTES + 1])
{ {
uint64_t _3[NUM_ECC_DIGITS] = {3}; /* -a = 3 */ uint64_t _3[NUM_ECC_DIGITS] = {3}; /* -a = 3 */
ecc_bytes2native(p_point->x, p_compressed+1); ecc_bytes2native(p_point->x, p_compressed + 1);
vli_modSquare_fast(p_point->y, p_point->x); /* y = x^2 */ vli_modSquare_fast(p_point->y, p_point->x); /* y = x^2 */
vli_modSub(p_point->y, p_point->y, _3, curve_p); /* y = x^2 - 3 */ vli_modSub(p_point->y, p_point->y, _3, curve_p); /* y = x^2 - 3 */
@ -815,38 +678,31 @@ void ecc_point_decompress(EccPoint *p_point, const uint8_t p_compressed[ECC_BYTE
mod_sqrt(p_point->y); mod_sqrt(p_point->y);
if((p_point->y[0] & 0x01) != (p_compressed[0] & 0x01)) if ((p_point->y[0] & 0x01) != (p_compressed[0] & 0x01)) {
{
vli_sub(p_point->y, curve_p, p_point->y); vli_sub(p_point->y, curve_p, p_point->y);
} }
} }
ZT_INLINE int ecc_make_key(uint8_t p_publicKey[ECC_BYTES + 1],uint8_t p_privateKey[ECC_BYTES]) ZT_INLINE int ecc_make_key(uint8_t p_publicKey[ECC_BYTES + 1], uint8_t p_privateKey[ECC_BYTES])
{ {
uint64_t l_private[NUM_ECC_DIGITS]; uint64_t l_private[NUM_ECC_DIGITS];
EccPoint l_public; EccPoint l_public;
unsigned l_tries = 0; unsigned l_tries = 0;
do do {
{ if (!getRandomNumber(l_private) || (l_tries++ >= ECC_CREATE_KEY_MAX_ATTEMPTS)) {
if(!getRandomNumber(l_private) || (l_tries++ >= MAX_TRIES))
{
return 0; return 0;
} }
if(vli_isZero(l_private)) if (vli_isZero(l_private)) {
{
continue; continue;
} }
/* Make sure the private key is in the range [1, n-1]. if (vli_cmp(curve_n, l_private) != 1) {
For the supported curves, n is always large enough that we only need to subtract once at most. */
if(vli_cmp(curve_n, l_private) != 1)
{
vli_sub(l_private, l_private, curve_n); vli_sub(l_private, l_private, curve_n);
} }
EccPoint_mult(&l_public, &curve_G, l_private, NULL); EccPoint_mult(&l_public, &curve_G, l_private, NULL);
} while(EccPoint_isZero(&l_public)); } while (EccPoint_isZero(&l_public));
ecc_native2bytes(p_privateKey, l_private); ecc_native2bytes(p_privateKey, l_private);
ecc_native2bytes(p_publicKey + 1, l_public.x); ecc_native2bytes(p_publicKey + 1, l_public.x);
@ -854,14 +710,13 @@ ZT_INLINE int ecc_make_key(uint8_t p_publicKey[ECC_BYTES + 1],uint8_t p_privateK
return 1; return 1;
} }
ZT_INLINE int ecdh_shared_secret(const uint8_t p_publicKey[ECC_BYTES + 1],const uint8_t p_privateKey[ECC_BYTES],uint8_t p_secret[ECC_BYTES]) ZT_INLINE int ecdh_shared_secret(const uint8_t p_publicKey[ECC_BYTES + 1], const uint8_t p_privateKey[ECC_BYTES], uint8_t p_secret[ECC_BYTES])
{ {
EccPoint l_public; EccPoint l_public;
uint64_t l_private[NUM_ECC_DIGITS]; uint64_t l_private[NUM_ECC_DIGITS];
uint64_t l_random[NUM_ECC_DIGITS]; uint64_t l_random[NUM_ECC_DIGITS];
if(!getRandomNumber(l_random)) if (!getRandomNumber(l_random)) {
{
return 0; return 0;
} }
@ -876,8 +731,6 @@ ZT_INLINE int ecdh_shared_secret(const uint8_t p_publicKey[ECC_BYTES + 1],const
return !EccPoint_isZero(&l_product); return !EccPoint_isZero(&l_product);
} }
/* -------- ECDSA code -------- */
/* Computes p_result = (p_left * p_right) % p_mod. */ /* Computes p_result = (p_left * p_right) % p_mod. */
void vli_modMult(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right, const uint64_t *p_mod) void vli_modMult(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right, const uint64_t *p_mod)
{ {
@ -889,17 +742,13 @@ void vli_modMult(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right, const
vli_mult(l_product, p_left, p_right); vli_mult(l_product, p_left, p_right);
l_productBits = vli_numBits(l_product + NUM_ECC_DIGITS); l_productBits = vli_numBits(l_product + NUM_ECC_DIGITS);
if(l_productBits) if (l_productBits) {
{
l_productBits += NUM_ECC_DIGITS * 64; l_productBits += NUM_ECC_DIGITS * 64;
} } else {
else
{
l_productBits = vli_numBits(l_product); l_productBits = vli_numBits(l_product);
} }
if(l_productBits < l_modBits) if (l_productBits < l_modBits) { /* l_product < p_mod. */
{ /* l_product < p_mod. */
vli_set(p_result, l_product); vli_set(p_result, l_product);
return; return;
} }
@ -910,25 +759,19 @@ void vli_modMult(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right, const
vli_clear(l_modMultiple + NUM_ECC_DIGITS); vli_clear(l_modMultiple + NUM_ECC_DIGITS);
l_digitShift = (l_productBits - l_modBits) / 64; l_digitShift = (l_productBits - l_modBits) / 64;
l_bitShift = (l_productBits - l_modBits) % 64; l_bitShift = (l_productBits - l_modBits) % 64;
if(l_bitShift) if (l_bitShift) {
{
l_modMultiple[l_digitShift + NUM_ECC_DIGITS] = vli_lshift(l_modMultiple + l_digitShift, p_mod, l_bitShift); l_modMultiple[l_digitShift + NUM_ECC_DIGITS] = vli_lshift(l_modMultiple + l_digitShift, p_mod, l_bitShift);
} } else {
else
{
vli_set(l_modMultiple + l_digitShift, p_mod); vli_set(l_modMultiple + l_digitShift, p_mod);
} }
/* Subtract all multiples of p_mod to get the remainder. */ /* Subtract all multiples of p_mod to get the remainder. */
vli_clear(p_result); vli_clear(p_result);
p_result[0] = 1; /* Use p_result as a temp var to store 1 (for subtraction) */ p_result[0] = 1; /* Use p_result as a temp var to store 1 (for subtraction) */
while(l_productBits > NUM_ECC_DIGITS * 64 || vli_cmp(l_modMultiple, p_mod) >= 0) while (l_productBits > NUM_ECC_DIGITS * 64 || vli_cmp(l_modMultiple, p_mod) >= 0) {
{
int l_cmp = vli_cmp(l_modMultiple + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS); int l_cmp = vli_cmp(l_modMultiple + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS);
if(l_cmp < 0 || (l_cmp == 0 && vli_cmp(l_modMultiple, l_product) <= 0)) if (l_cmp < 0 || (l_cmp == 0 && vli_cmp(l_modMultiple, l_product) <= 0)) {
{ if (vli_sub(l_product, l_product, l_modMultiple)) { /* borrow */
if(vli_sub(l_product, l_product, l_modMultiple))
{ /* borrow */
vli_sub(l_product + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS, p_result); vli_sub(l_product + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS, p_result);
} }
vli_sub(l_product + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS, l_modMultiple + NUM_ECC_DIGITS); vli_sub(l_product + NUM_ECC_DIGITS, l_product + NUM_ECC_DIGITS, l_modMultiple + NUM_ECC_DIGITS);
@ -936,19 +779,19 @@ void vli_modMult(uint64_t *p_result, uint64_t *p_left, uint64_t *p_right, const
uint64_t l_carry = (l_modMultiple[NUM_ECC_DIGITS] & 0x01) << 63; uint64_t l_carry = (l_modMultiple[NUM_ECC_DIGITS] & 0x01) << 63;
vli_rshift1(l_modMultiple + NUM_ECC_DIGITS); vli_rshift1(l_modMultiple + NUM_ECC_DIGITS);
vli_rshift1(l_modMultiple); vli_rshift1(l_modMultiple);
l_modMultiple[NUM_ECC_DIGITS-1] |= l_carry; l_modMultiple[NUM_ECC_DIGITS - 1] |= l_carry;
--l_productBits; --l_productBits;
} }
vli_set(p_result, l_product); vli_set(p_result, l_product);
} }
ZT_INLINE uint umax(uint a,uint b) ZT_INLINE uint umax(uint a, uint b)
{ {
return (a > b ? a : b); return (a > b ? a : b);
} }
ZT_INLINE int ecdsa_sign(const uint8_t p_privateKey[ECC_BYTES],const uint8_t p_hash[ECC_BYTES],uint8_t p_signature[ECC_BYTES * 2]) ZT_INLINE int ecdsa_sign(const uint8_t p_privateKey[ECC_BYTES], const uint8_t p_hash[ECC_BYTES], uint8_t p_signature[ECC_BYTES * 2])
{ {
uint64_t k[NUM_ECC_DIGITS]; uint64_t k[NUM_ECC_DIGITS];
uint64_t l_tmp[NUM_ECC_DIGITS]; uint64_t l_tmp[NUM_ECC_DIGITS];
@ -956,19 +799,15 @@ ZT_INLINE int ecdsa_sign(const uint8_t p_privateKey[ECC_BYTES],const uint8_t p_h
EccPoint p; EccPoint p;
unsigned l_tries = 0; unsigned l_tries = 0;
do do {
{ if (!getRandomNumber(k) || (l_tries++ >= ECC_CREATE_KEY_MAX_ATTEMPTS)) {
if(!getRandomNumber(k) || (l_tries++ >= MAX_TRIES))
{
return 0; return 0;
} }
if(vli_isZero(k)) if (vli_isZero(k)) {
{
continue; continue;
} }
if(vli_cmp(curve_n, k) != 1) if (vli_cmp(curve_n, k) != 1) {
{
vli_sub(k, k, curve_n); vli_sub(k, k, curve_n);
} }
@ -976,11 +815,10 @@ ZT_INLINE int ecdsa_sign(const uint8_t p_privateKey[ECC_BYTES],const uint8_t p_h
EccPoint_mult(&p, &curve_G, k, NULL); EccPoint_mult(&p, &curve_G, k, NULL);
/* r = x1 (mod n) */ /* r = x1 (mod n) */
if(vli_cmp(curve_n, p.x) != 1) if (vli_cmp(curve_n, p.x) != 1) {
{
vli_sub(p.x, p.x, curve_n); vli_sub(p.x, p.x, curve_n);
} }
} while(vli_isZero(p.x)); } while (vli_isZero(p.x));
ecc_native2bytes(p_signature, p.x); ecc_native2bytes(p_signature, p.x);
@ -995,7 +833,7 @@ ZT_INLINE int ecdsa_sign(const uint8_t p_privateKey[ECC_BYTES],const uint8_t p_h
return 1; return 1;
} }
ZT_INLINE int ecdsa_verify(const uint8_t p_publicKey[ECC_BYTES + 1],const uint8_t p_hash[ECC_BYTES],const uint8_t p_signature[ECC_BYTES * 2]) ZT_INLINE int ecdsa_verify(const uint8_t p_publicKey[ECC_BYTES + 1], const uint8_t p_hash[ECC_BYTES], const uint8_t p_signature[ECC_BYTES * 2])
{ {
uint64_t u1[NUM_ECC_DIGITS], u2[NUM_ECC_DIGITS]; uint64_t u1[NUM_ECC_DIGITS], u2[NUM_ECC_DIGITS];
uint64_t z[NUM_ECC_DIGITS]; uint64_t z[NUM_ECC_DIGITS];
@ -1012,13 +850,11 @@ ZT_INLINE int ecdsa_verify(const uint8_t p_publicKey[ECC_BYTES + 1],const uint8_
ecc_bytes2native(l_r, p_signature); ecc_bytes2native(l_r, p_signature);
ecc_bytes2native(l_s, p_signature + ECC_BYTES); ecc_bytes2native(l_s, p_signature + ECC_BYTES);
if(vli_isZero(l_r) || vli_isZero(l_s)) if (vli_isZero(l_r) || vli_isZero(l_s)) { /* r, s must not be 0. */
{ /* r, s must not be 0. */
return 0; return 0;
} }
if(vli_cmp(curve_n, l_r) != 1 || vli_cmp(curve_n, l_s) != 1) if (vli_cmp(curve_n, l_r) != 1 || vli_cmp(curve_n, l_s) != 1) { /* r, s must be < n. */
{ /* r, s must be < n. */
return 0; return 0;
} }
@ -1042,21 +878,19 @@ ZT_INLINE int ecdsa_verify(const uint8_t p_publicKey[ECC_BYTES + 1],const uint8_
const EccPoint *l_points[4] = {NULL, &curve_G, &l_public, &l_sum}; const EccPoint *l_points[4] = {NULL, &curve_G, &l_public, &l_sum};
uint l_numBits = umax(vli_numBits(u1), vli_numBits(u2)); uint l_numBits = umax(vli_numBits(u1), vli_numBits(u2));
const EccPoint *l_point = l_points[(!!vli_testBit(u1, l_numBits-1)) | ((!!vli_testBit(u2, l_numBits-1)) << 1)]; const EccPoint *l_point = l_points[(!!vli_testBit(u1, l_numBits - 1)) | ((!!vli_testBit(u2, l_numBits - 1)) << 1)];
vli_set(rx, l_point->x); vli_set(rx, l_point->x);
vli_set(ry, l_point->y); vli_set(ry, l_point->y);
vli_clear(z); vli_clear(z);
z[0] = 1; z[0] = 1;
int i; int i;
for(i = l_numBits - 2; i >= 0; --i) for (i = l_numBits - 2; i >= 0; --i) {
{
EccPoint_double_jacobian(rx, ry, z); EccPoint_double_jacobian(rx, ry, z);
int l_index = (!!vli_testBit(u1, i)) | ((!!vli_testBit(u2, i)) << 1); int l_index = (!!vli_testBit(u1, i)) | ((!!vli_testBit(u2, i)) << 1);
const EccPoint *l_point = l_points[l_index]; const EccPoint *l_point = l_points[l_index];
if(l_point) if (l_point) {
{
vli_set(tx, l_point->x); vli_set(tx, l_point->x);
vli_set(ty, l_point->y); vli_set(ty, l_point->y);
apply_z(tx, ty, z); apply_z(tx, ty, z);
@ -1070,8 +904,7 @@ ZT_INLINE int ecdsa_verify(const uint8_t p_publicKey[ECC_BYTES + 1],const uint8_
apply_z(rx, ry, z); apply_z(rx, ry, z);
/* v = x1 (mod n) */ /* v = x1 (mod n) */
if(vli_cmp(curve_n, rx) != 1) if (vli_cmp(curve_n, rx) != 1) {
{
vli_sub(rx, rx, curve_n); vli_sub(rx, rx, curve_n);
} }
@ -1081,30 +914,30 @@ ZT_INLINE int ecdsa_verify(const uint8_t p_publicKey[ECC_BYTES + 1],const uint8_
} // anonymous namespace } // anonymous namespace
void ECC384GenerateKey(uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE],uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE]) void ECC384GenerateKey(uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE], uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE])
{ {
if (!ecc_make_key(pub,priv)) { if (!ecc_make_key(pub, priv)) {
fprintf(stderr,"FATAL: ecdsa_make_key() failed!" ZT_EOL_S); fprintf(stderr, "FATAL: ecdsa_make_key() failed!" ZT_EOL_S);
abort(); abort();
} }
} }
void ECC384ECDSASign(const uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE],const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE],uint8_t sig[ZT_ECC384_SIGNATURE_SIZE]) void ECC384ECDSASign(const uint8_t priv[ZT_ECC384_PRIVATE_KEY_SIZE], const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE], uint8_t sig[ZT_ECC384_SIGNATURE_SIZE])
{ {
if (!ecdsa_sign(priv,hash,sig)) { if (!ecdsa_sign(priv, hash, sig)) {
fprintf(stderr,"FATAL: ecdsa_sign() failed!" ZT_EOL_S); fprintf(stderr, "FATAL: ecdsa_sign() failed!" ZT_EOL_S);
abort(); abort();
} }
} }
bool ECC384ECDSAVerify(const uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE],const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE],const uint8_t sig[ZT_ECC384_SIGNATURE_SIZE]) bool ECC384ECDSAVerify(const uint8_t pub[ZT_ECC384_PUBLIC_KEY_SIZE], const uint8_t hash[ZT_ECC384_SIGNATURE_HASH_SIZE], const uint8_t sig[ZT_ECC384_SIGNATURE_SIZE])
{ {
return (ecdsa_verify(pub,hash,sig) != 0); return (ecdsa_verify(pub, hash, sig) != 0);
} }
bool ECC384ECDH(const uint8_t theirPub[ZT_ECC384_PUBLIC_KEY_SIZE],const uint8_t ourPriv[ZT_ECC384_PRIVATE_KEY_SIZE],uint8_t secret[ZT_ECC384_SHARED_SECRET_SIZE]) bool ECC384ECDH(const uint8_t theirPub[ZT_ECC384_PUBLIC_KEY_SIZE], const uint8_t ourPriv[ZT_ECC384_PRIVATE_KEY_SIZE], uint8_t secret[ZT_ECC384_SHARED_SECRET_SIZE])
{ {
return (ecdh_shared_secret(theirPub,ourPriv,secret) != 0); return (ecdh_shared_secret(theirPub, ourPriv, secret) != 0);
} }
} // namespace ZeroTier } // namespace ZeroTier

View file

@ -27,6 +27,8 @@ namespace ZeroTier {
namespace { namespace {
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// This is the memory-intensive hash function used to compute v0 identities from v0 public keys. // This is the memory-intensive hash function used to compute v0 identities from v0 public keys.
#define ZT_V0_IDENTITY_GEN_MEMORY 2097152 #define ZT_V0_IDENTITY_GEN_MEMORY 2097152
@ -80,12 +82,15 @@ struct identityV0ProofOfWorkCriteria
char *genmem; char *genmem;
}; };
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#define ZT_IDENTITY_V1_POW_MEMORY_SIZE 131072 #define ZT_IDENTITY_V1_POW_MEMORY_SIZE 131072
struct p_CompareLittleEndian struct p_CompareLittleEndian
{ {
#if __BYTE_ORDER == __BIG_ENDIAN #if __BYTE_ORDER == __BIG_ENDIAN
ZT_INLINE bool operator()(const uint64_t a,const uint64_t b) const noexcept { return Utils::swapBytes(a) < Utils::swapBytes(b); } ZT_INLINE bool operator()(const uint64_t a,const uint64_t b) const noexcept
{ return Utils::swapBytes(a) < Utils::swapBytes(b); }
#else #else
ZT_INLINE bool operator()(const uint64_t a, const uint64_t b) const noexcept ZT_INLINE bool operator()(const uint64_t a, const uint64_t b) const noexcept
{ return a < b; } { return a < b; }
@ -136,6 +141,8 @@ bool identityV1ProofOfWorkCriteria(const void *in, const unsigned int len, uint6
return (Utils::ntoh(w[0]) % 1000U) == 0; return (Utils::ntoh(w[0]) % 1000U) == 0;
} }
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
} // anonymous namespace } // anonymous namespace
const Identity Identity::NIL; const Identity Identity::NIL;