Path: blob/main/contrib/bearssl/src/ec/ec_p256_m64.c
39507 views
/*1* Copyright (c) 2018 Thomas Pornin <[email protected]>2*3* Permission is hereby granted, free of charge, to any person obtaining4* a copy of this software and associated documentation files (the5* "Software"), to deal in the Software without restriction, including6* without limitation the rights to use, copy, modify, merge, publish,7* distribute, sublicense, and/or sell copies of the Software, and to8* permit persons to whom the Software is furnished to do so, subject to9* the following conditions:10*11* The above copyright notice and this permission notice shall be12* included in all copies or substantial portions of the Software.13*14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,15* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF16* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND17* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS18* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN19* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN20* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE21* SOFTWARE.22*/2324#include "inner.h"2526#if BR_INT128 || BR_UMUL1282728#if BR_UMUL12829#include <intrin.h>30#endif3132static const unsigned char P256_G[] = {330x04, 0x6B, 0x17, 0xD1, 0xF2, 0xE1, 0x2C, 0x42, 0x47, 0xF8,340xBC, 0xE6, 0xE5, 0x63, 0xA4, 0x40, 0xF2, 0x77, 0x03, 0x7D,350x81, 0x2D, 0xEB, 0x33, 0xA0, 0xF4, 0xA1, 0x39, 0x45, 0xD8,360x98, 0xC2, 0x96, 0x4F, 0xE3, 0x42, 0xE2, 0xFE, 0x1A, 0x7F,370x9B, 0x8E, 0xE7, 0xEB, 0x4A, 0x7C, 0x0F, 0x9E, 0x16, 0x2B,380xCE, 0x33, 0x57, 0x6B, 0x31, 0x5E, 0xCE, 0xCB, 0xB6, 0x40,390x68, 0x37, 0xBF, 0x51, 0xF540};4142static const unsigned char P256_N[] = {430xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF,440xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xBC, 0xE6, 0xFA, 0xAD,450xA7, 0x17, 0x9E, 0x84, 0xF3, 0xB9, 0xCA, 0xC2, 0xFC, 0x63,460x25, 0x5147};4849static const unsigned char *50api_generator(int curve, size_t *len)51{52(void)curve;53*len = sizeof P256_G;54return P256_G;55}5657static const unsigned char *58api_order(int curve, size_t *len)59{60(void)curve;61*len = sizeof P256_N;62return P256_N;63}6465static size_t66api_xoff(int curve, size_t *len)67{68(void)curve;69*len = 32;70return 1;71}7273/*74* A field element is encoded as four 64-bit integers, in basis 2^64.75* Values may reach up to 2^256-1. Montgomery multiplication is used.76*/7778/* R = 2^256 mod p */79static const uint64_t F256_R[] = {800x0000000000000001, 0xFFFFFFFF00000000,810xFFFFFFFFFFFFFFFF, 0x00000000FFFFFFFE82};8384/* Curve equation is y^2 = x^3 - 3*x + B. This constant is B*R mod p85(Montgomery representation of B). */86static const uint64_t P256_B_MONTY[] = {870xD89CDF6229C4BDDF, 0xACF005CD78843090,880xE5A220ABF7212ED6, 0xDC30061D0487483489};9091/*92* Addition in the field.93*/94static inline void95f256_add(uint64_t *d, const uint64_t *a, const uint64_t *b)96{97#if BR_INT12898unsigned __int128 w;99uint64_t t;100101/*102* Do the addition, with an extra carry in t.103*/104w = (unsigned __int128)a[0] + b[0];105d[0] = (uint64_t)w;106w = (unsigned __int128)a[1] + b[1] + (w >> 64);107d[1] = (uint64_t)w;108w = (unsigned __int128)a[2] + b[2] + (w >> 64);109d[2] = (uint64_t)w;110w = (unsigned __int128)a[3] + b[3] + (w >> 64);111d[3] = (uint64_t)w;112t = (uint64_t)(w >> 64);113114/*115* Fold carry t, using: 2^256 = 2^224 - 2^192 - 2^96 + 1 mod p.116*/117w = (unsigned __int128)d[0] + t;118d[0] = (uint64_t)w;119w = (unsigned __int128)d[1] + (w >> 64) - (t << 32);120d[1] = (uint64_t)w;121/* Here, carry "w >> 64" can only be 0 or -1 */122w = (unsigned __int128)d[2] - ((w >> 64) & 1);123d[2] = (uint64_t)w;124/* Again, carry is 0 or -1. But there can be carry only if t = 1,125in which case the addition of (t << 32) - t is positive. */126w = (unsigned __int128)d[3] - ((w >> 64) & 1) + (t << 32) - t;127d[3] = (uint64_t)w;128t = (uint64_t)(w >> 64);129130/*131* There can be an extra carry here, which we must fold again.132*/133w = (unsigned __int128)d[0] + t;134d[0] = (uint64_t)w;135w = (unsigned __int128)d[1] + (w >> 64) - (t << 32);136d[1] = (uint64_t)w;137w = (unsigned __int128)d[2] - ((w >> 64) & 1);138d[2] = (uint64_t)w;139d[3] += (t << 32) - t - (uint64_t)((w >> 64) & 1);140141#elif BR_UMUL128142143unsigned char cc;144uint64_t t;145146cc = _addcarry_u64(0, a[0], b[0], &d[0]);147cc = _addcarry_u64(cc, a[1], b[1], &d[1]);148cc = _addcarry_u64(cc, a[2], b[2], &d[2]);149cc = _addcarry_u64(cc, a[3], b[3], &d[3]);150151/*152* If there is a carry, then we want to subtract p, which we153* do by adding 2^256 - p.154*/155t = cc;156cc = _addcarry_u64(cc, d[0], 0, &d[0]);157cc = _addcarry_u64(cc, d[1], -(t << 32), &d[1]);158cc = _addcarry_u64(cc, d[2], -t, &d[2]);159cc = _addcarry_u64(cc, d[3], (t << 32) - (t << 1), &d[3]);160161/*162* We have to do it again if there still is a carry.163*/164t = cc;165cc = _addcarry_u64(cc, d[0], 0, &d[0]);166cc = _addcarry_u64(cc, d[1], -(t << 32), &d[1]);167cc = _addcarry_u64(cc, d[2], -t, &d[2]);168(void)_addcarry_u64(cc, d[3], (t << 32) - (t << 1), &d[3]);169170#endif171}172173/*174* Subtraction in the field.175*/176static inline void177f256_sub(uint64_t *d, const uint64_t *a, const uint64_t *b)178{179#if BR_INT128180181unsigned __int128 w;182uint64_t t;183184w = (unsigned __int128)a[0] - b[0];185d[0] = (uint64_t)w;186w = (unsigned __int128)a[1] - b[1] - ((w >> 64) & 1);187d[1] = (uint64_t)w;188w = (unsigned __int128)a[2] - b[2] - ((w >> 64) & 1);189d[2] = (uint64_t)w;190w = (unsigned __int128)a[3] - b[3] - ((w >> 64) & 1);191d[3] = (uint64_t)w;192t = (uint64_t)(w >> 64) & 1;193194/*195* If there is a borrow (t = 1), then we must add the modulus196* p = 2^256 - 2^224 + 2^192 + 2^96 - 1.197*/198w = (unsigned __int128)d[0] - t;199d[0] = (uint64_t)w;200w = (unsigned __int128)d[1] + (t << 32) - ((w >> 64) & 1);201d[1] = (uint64_t)w;202/* Here, carry "w >> 64" can only be 0 or +1 */203w = (unsigned __int128)d[2] + (w >> 64);204d[2] = (uint64_t)w;205/* Again, carry is 0 or +1 */206w = (unsigned __int128)d[3] + (w >> 64) - (t << 32) + t;207d[3] = (uint64_t)w;208t = (uint64_t)(w >> 64) & 1;209210/*211* There may be again a borrow, in which case we must add the212* modulus again.213*/214w = (unsigned __int128)d[0] - t;215d[0] = (uint64_t)w;216w = (unsigned __int128)d[1] + (t << 32) - ((w >> 64) & 1);217d[1] = (uint64_t)w;218w = (unsigned __int128)d[2] + (w >> 64);219d[2] = (uint64_t)w;220d[3] += (uint64_t)(w >> 64) - (t << 32) + t;221222#elif BR_UMUL128223224unsigned char cc;225uint64_t t;226227cc = _subborrow_u64(0, a[0], b[0], &d[0]);228cc = _subborrow_u64(cc, a[1], b[1], &d[1]);229cc = _subborrow_u64(cc, a[2], b[2], &d[2]);230cc = _subborrow_u64(cc, a[3], b[3], &d[3]);231232/*233* If there is a borrow, then we need to add p. We (virtually)234* add 2^256, then subtract 2^256 - p.235*/236t = cc;237cc = _subborrow_u64(0, d[0], t, &d[0]);238cc = _subborrow_u64(cc, d[1], -(t << 32), &d[1]);239cc = _subborrow_u64(cc, d[2], -t, &d[2]);240cc = _subborrow_u64(cc, d[3], (t << 32) - (t << 1), &d[3]);241242/*243* If there still is a borrow, then we need to add p again.244*/245t = cc;246cc = _subborrow_u64(0, d[0], t, &d[0]);247cc = _subborrow_u64(cc, d[1], -(t << 32), &d[1]);248cc = _subborrow_u64(cc, d[2], -t, &d[2]);249(void)_subborrow_u64(cc, d[3], (t << 32) - (t << 1), &d[3]);250251#endif252}253254/*255* Montgomery multiplication in the field.256*/257static void258f256_montymul(uint64_t *d, const uint64_t *a, const uint64_t *b)259{260#if BR_INT128261262uint64_t x, f, t0, t1, t2, t3, t4;263unsigned __int128 z, ff;264int i;265266/*267* When computing d <- d + a[u]*b, we also add f*p such268* that d + a[u]*b + f*p is a multiple of 2^64. Since269* p = -1 mod 2^64, we can compute f = d[0] + a[u]*b[0] mod 2^64.270*/271272/*273* Step 1: t <- (a[0]*b + f*p) / 2^64274* We have f = a[0]*b[0] mod 2^64. Since p = -1 mod 2^64, this275* ensures that (a[0]*b + f*p) is a multiple of 2^64.276*277* We also have: f*p = f*2^256 - f*2^224 + f*2^192 + f*2^96 - f.278*/279x = a[0];280z = (unsigned __int128)b[0] * x;281f = (uint64_t)z;282z = (unsigned __int128)b[1] * x + (z >> 64) + (uint64_t)(f << 32);283t0 = (uint64_t)z;284z = (unsigned __int128)b[2] * x + (z >> 64) + (uint64_t)(f >> 32);285t1 = (uint64_t)z;286z = (unsigned __int128)b[3] * x + (z >> 64) + f;287t2 = (uint64_t)z;288t3 = (uint64_t)(z >> 64);289ff = ((unsigned __int128)f << 64) - ((unsigned __int128)f << 32);290z = (unsigned __int128)t2 + (uint64_t)ff;291t2 = (uint64_t)z;292z = (unsigned __int128)t3 + (z >> 64) + (ff >> 64);293t3 = (uint64_t)z;294t4 = (uint64_t)(z >> 64);295296/*297* Steps 2 to 4: t <- (t + a[i]*b + f*p) / 2^64298*/299for (i = 1; i < 4; i ++) {300x = a[i];301302/* t <- (t + x*b - f) / 2^64 */303z = (unsigned __int128)b[0] * x + t0;304f = (uint64_t)z;305z = (unsigned __int128)b[1] * x + t1 + (z >> 64);306t0 = (uint64_t)z;307z = (unsigned __int128)b[2] * x + t2 + (z >> 64);308t1 = (uint64_t)z;309z = (unsigned __int128)b[3] * x + t3 + (z >> 64);310t2 = (uint64_t)z;311z = t4 + (z >> 64);312t3 = (uint64_t)z;313t4 = (uint64_t)(z >> 64);314315/* t <- t + f*2^32, carry in the upper half of z */316z = (unsigned __int128)t0 + (uint64_t)(f << 32);317t0 = (uint64_t)z;318z = (z >> 64) + (unsigned __int128)t1 + (uint64_t)(f >> 32);319t1 = (uint64_t)z;320321/* t <- t + f*2^192 - f*2^160 + f*2^128 */322ff = ((unsigned __int128)f << 64)323- ((unsigned __int128)f << 32) + f;324z = (z >> 64) + (unsigned __int128)t2 + (uint64_t)ff;325t2 = (uint64_t)z;326z = (unsigned __int128)t3 + (z >> 64) + (ff >> 64);327t3 = (uint64_t)z;328t4 += (uint64_t)(z >> 64);329}330331/*332* At that point, we have computed t = (a*b + F*p) / 2^256, where333* F is a 256-bit integer whose limbs are the "f" coefficients334* in the steps above. We have:335* a <= 2^256-1336* b <= 2^256-1337* F <= 2^256-1338* Hence:339* a*b + F*p <= (2^256-1)*(2^256-1) + p*(2^256-1)340* a*b + F*p <= 2^256*(2^256 - 2 + p) + 1 - p341* Therefore:342* t < 2^256 + p - 2343* Since p < 2^256, it follows that:344* t4 can be only 0 or 1345* t - p < 2^256346* We can therefore subtract p from t, conditionally on t4, to347* get a nonnegative result that fits on 256 bits.348*/349z = (unsigned __int128)t0 + t4;350t0 = (uint64_t)z;351z = (unsigned __int128)t1 - (t4 << 32) + (z >> 64);352t1 = (uint64_t)z;353z = (unsigned __int128)t2 - (z >> 127);354t2 = (uint64_t)z;355t3 = t3 - (uint64_t)(z >> 127) - t4 + (t4 << 32);356357d[0] = t0;358d[1] = t1;359d[2] = t2;360d[3] = t3;361362#elif BR_UMUL128363364uint64_t x, f, t0, t1, t2, t3, t4;365uint64_t zl, zh, ffl, ffh;366unsigned char k, m;367int i;368369/*370* When computing d <- d + a[u]*b, we also add f*p such371* that d + a[u]*b + f*p is a multiple of 2^64. Since372* p = -1 mod 2^64, we can compute f = d[0] + a[u]*b[0] mod 2^64.373*/374375/*376* Step 1: t <- (a[0]*b + f*p) / 2^64377* We have f = a[0]*b[0] mod 2^64. Since p = -1 mod 2^64, this378* ensures that (a[0]*b + f*p) is a multiple of 2^64.379*380* We also have: f*p = f*2^256 - f*2^224 + f*2^192 + f*2^96 - f.381*/382x = a[0];383384zl = _umul128(b[0], x, &zh);385f = zl;386t0 = zh;387388zl = _umul128(b[1], x, &zh);389k = _addcarry_u64(0, zl, t0, &zl);390(void)_addcarry_u64(k, zh, 0, &zh);391k = _addcarry_u64(0, zl, f << 32, &zl);392(void)_addcarry_u64(k, zh, 0, &zh);393t0 = zl;394t1 = zh;395396zl = _umul128(b[2], x, &zh);397k = _addcarry_u64(0, zl, t1, &zl);398(void)_addcarry_u64(k, zh, 0, &zh);399k = _addcarry_u64(0, zl, f >> 32, &zl);400(void)_addcarry_u64(k, zh, 0, &zh);401t1 = zl;402t2 = zh;403404zl = _umul128(b[3], x, &zh);405k = _addcarry_u64(0, zl, t2, &zl);406(void)_addcarry_u64(k, zh, 0, &zh);407k = _addcarry_u64(0, zl, f, &zl);408(void)_addcarry_u64(k, zh, 0, &zh);409t2 = zl;410t3 = zh;411412t4 = _addcarry_u64(0, t3, f, &t3);413k = _subborrow_u64(0, t2, f << 32, &t2);414k = _subborrow_u64(k, t3, f >> 32, &t3);415(void)_subborrow_u64(k, t4, 0, &t4);416417/*418* Steps 2 to 4: t <- (t + a[i]*b + f*p) / 2^64419*/420for (i = 1; i < 4; i ++) {421x = a[i];422/* f = t0 + x * b[0]; -- computed below */423424/* t <- (t + x*b - f) / 2^64 */425zl = _umul128(b[0], x, &zh);426k = _addcarry_u64(0, zl, t0, &f);427(void)_addcarry_u64(k, zh, 0, &t0);428429zl = _umul128(b[1], x, &zh);430k = _addcarry_u64(0, zl, t0, &zl);431(void)_addcarry_u64(k, zh, 0, &zh);432k = _addcarry_u64(0, zl, t1, &t0);433(void)_addcarry_u64(k, zh, 0, &t1);434435zl = _umul128(b[2], x, &zh);436k = _addcarry_u64(0, zl, t1, &zl);437(void)_addcarry_u64(k, zh, 0, &zh);438k = _addcarry_u64(0, zl, t2, &t1);439(void)_addcarry_u64(k, zh, 0, &t2);440441zl = _umul128(b[3], x, &zh);442k = _addcarry_u64(0, zl, t2, &zl);443(void)_addcarry_u64(k, zh, 0, &zh);444k = _addcarry_u64(0, zl, t3, &t2);445(void)_addcarry_u64(k, zh, 0, &t3);446447t4 = _addcarry_u64(0, t3, t4, &t3);448449/* t <- t + f*2^32, carry in k */450k = _addcarry_u64(0, t0, f << 32, &t0);451k = _addcarry_u64(k, t1, f >> 32, &t1);452453/* t <- t + f*2^192 - f*2^160 + f*2^128 */454m = _subborrow_u64(0, f, f << 32, &ffl);455(void)_subborrow_u64(m, f, f >> 32, &ffh);456k = _addcarry_u64(k, t2, ffl, &t2);457k = _addcarry_u64(k, t3, ffh, &t3);458(void)_addcarry_u64(k, t4, 0, &t4);459}460461/*462* At that point, we have computed t = (a*b + F*p) / 2^256, where463* F is a 256-bit integer whose limbs are the "f" coefficients464* in the steps above. We have:465* a <= 2^256-1466* b <= 2^256-1467* F <= 2^256-1468* Hence:469* a*b + F*p <= (2^256-1)*(2^256-1) + p*(2^256-1)470* a*b + F*p <= 2^256*(2^256 - 2 + p) + 1 - p471* Therefore:472* t < 2^256 + p - 2473* Since p < 2^256, it follows that:474* t4 can be only 0 or 1475* t - p < 2^256476* We can therefore subtract p from t, conditionally on t4, to477* get a nonnegative result that fits on 256 bits.478*/479k = _addcarry_u64(0, t0, t4, &t0);480k = _addcarry_u64(k, t1, -(t4 << 32), &t1);481k = _addcarry_u64(k, t2, -t4, &t2);482(void)_addcarry_u64(k, t3, (t4 << 32) - (t4 << 1), &t3);483484d[0] = t0;485d[1] = t1;486d[2] = t2;487d[3] = t3;488489#endif490}491492/*493* Montgomery squaring in the field; currently a basic wrapper around494* multiplication (inline, should be optimized away).495* TODO: see if some extra speed can be gained here.496*/497static inline void498f256_montysquare(uint64_t *d, const uint64_t *a)499{500f256_montymul(d, a, a);501}502503/*504* Convert to Montgomery representation.505*/506static void507f256_tomonty(uint64_t *d, const uint64_t *a)508{509/*510* R2 = 2^512 mod p.511* If R = 2^256 mod p, then R2 = R^2 mod p; and the Montgomery512* multiplication of a by R2 is: a*R2/R = a*R mod p, i.e. the513* conversion to Montgomery representation.514*/515static const uint64_t R2[] = {5160x0000000000000003,5170xFFFFFFFBFFFFFFFF,5180xFFFFFFFFFFFFFFFE,5190x00000004FFFFFFFD520};521522f256_montymul(d, a, R2);523}524525/*526* Convert from Montgomery representation.527*/528static void529f256_frommonty(uint64_t *d, const uint64_t *a)530{531/*532* Montgomery multiplication by 1 is division by 2^256 modulo p.533*/534static const uint64_t one[] = { 1, 0, 0, 0 };535536f256_montymul(d, a, one);537}538539/*540* Inversion in the field. If the source value is 0 modulo p, then this541* returns 0 or p. This function uses Montgomery representation.542*/543static void544f256_invert(uint64_t *d, const uint64_t *a)545{546/*547* We compute a^(p-2) mod p. The exponent pattern (from high to548* low) is:549* - 32 bits of value 1550* - 31 bits of value 0551* - 1 bit of value 1552* - 96 bits of value 0553* - 94 bits of value 1554* - 1 bit of value 0555* - 1 bit of value 1556* To speed up the square-and-multiply algorithm, we precompute557* a^(2^31-1).558*/559560uint64_t r[4], t[4];561int i;562563memcpy(t, a, sizeof t);564for (i = 0; i < 30; i ++) {565f256_montysquare(t, t);566f256_montymul(t, t, a);567}568569memcpy(r, t, sizeof t);570for (i = 224; i >= 0; i --) {571f256_montysquare(r, r);572switch (i) {573case 0:574case 2:575case 192:576case 224:577f256_montymul(r, r, a);578break;579case 3:580case 34:581case 65:582f256_montymul(r, r, t);583break;584}585}586memcpy(d, r, sizeof r);587}588589/*590* Finalize reduction.591* Input value fits on 256 bits. This function subtracts p if and only592* if the input is greater than or equal to p.593*/594static inline void595f256_final_reduce(uint64_t *a)596{597#if BR_INT128598599uint64_t t0, t1, t2, t3, cc;600unsigned __int128 z;601602/*603* We add 2^224 - 2^192 - 2^96 + 1 to a. If there is no carry,604* then a < p; otherwise, the addition result we computed is605* the value we must return.606*/607z = (unsigned __int128)a[0] + 1;608t0 = (uint64_t)z;609z = (unsigned __int128)a[1] + (z >> 64) - ((uint64_t)1 << 32);610t1 = (uint64_t)z;611z = (unsigned __int128)a[2] - (z >> 127);612t2 = (uint64_t)z;613z = (unsigned __int128)a[3] - (z >> 127) + 0xFFFFFFFF;614t3 = (uint64_t)z;615cc = -(uint64_t)(z >> 64);616617a[0] ^= cc & (a[0] ^ t0);618a[1] ^= cc & (a[1] ^ t1);619a[2] ^= cc & (a[2] ^ t2);620a[3] ^= cc & (a[3] ^ t3);621622#elif BR_UMUL128623624uint64_t t0, t1, t2, t3, m;625unsigned char k;626627k = _addcarry_u64(0, a[0], (uint64_t)1, &t0);628k = _addcarry_u64(k, a[1], -((uint64_t)1 << 32), &t1);629k = _addcarry_u64(k, a[2], -(uint64_t)1, &t2);630k = _addcarry_u64(k, a[3], ((uint64_t)1 << 32) - 2, &t3);631m = -(uint64_t)k;632633a[0] ^= m & (a[0] ^ t0);634a[1] ^= m & (a[1] ^ t1);635a[2] ^= m & (a[2] ^ t2);636a[3] ^= m & (a[3] ^ t3);637638#endif639}640641/*642* Points in affine and Jacobian coordinates.643*644* - In affine coordinates, the point-at-infinity cannot be encoded.645* - Jacobian coordinates (X,Y,Z) correspond to affine (X/Z^2,Y/Z^3);646* if Z = 0 then this is the point-at-infinity.647*/648typedef struct {649uint64_t x[4];650uint64_t y[4];651} p256_affine;652653typedef struct {654uint64_t x[4];655uint64_t y[4];656uint64_t z[4];657} p256_jacobian;658659/*660* Decode a point. The returned point is in Jacobian coordinates, but661* with z = 1. If the encoding is invalid, or encodes a point which is662* not on the curve, or encodes the point at infinity, then this function663* returns 0. Otherwise, 1 is returned.664*665* The buffer is assumed to have length exactly 65 bytes.666*/667static uint32_t668point_decode(p256_jacobian *P, const unsigned char *buf)669{670uint64_t x[4], y[4], t[4], x3[4], tt;671uint32_t r;672673/*674* Header byte shall be 0x04.675*/676r = EQ(buf[0], 0x04);677678/*679* Decode X and Y coordinates, and convert them into680* Montgomery representation.681*/682x[3] = br_dec64be(buf + 1);683x[2] = br_dec64be(buf + 9);684x[1] = br_dec64be(buf + 17);685x[0] = br_dec64be(buf + 25);686y[3] = br_dec64be(buf + 33);687y[2] = br_dec64be(buf + 41);688y[1] = br_dec64be(buf + 49);689y[0] = br_dec64be(buf + 57);690f256_tomonty(x, x);691f256_tomonty(y, y);692693/*694* Verify y^2 = x^3 + A*x + B. In curve P-256, A = -3.695* Note that the Montgomery representation of 0 is 0. We must696* take care to apply the final reduction to make sure we have697* 0 and not p.698*/699f256_montysquare(t, y);700f256_montysquare(x3, x);701f256_montymul(x3, x3, x);702f256_sub(t, t, x3);703f256_add(t, t, x);704f256_add(t, t, x);705f256_add(t, t, x);706f256_sub(t, t, P256_B_MONTY);707f256_final_reduce(t);708tt = t[0] | t[1] | t[2] | t[3];709r &= EQ((uint32_t)(tt | (tt >> 32)), 0);710711/*712* Return the point in Jacobian coordinates (and Montgomery713* representation).714*/715memcpy(P->x, x, sizeof x);716memcpy(P->y, y, sizeof y);717memcpy(P->z, F256_R, sizeof F256_R);718return r;719}720721/*722* Final conversion for a point:723* - The point is converted back to affine coordinates.724* - Final reduction is performed.725* - The point is encoded into the provided buffer.726*727* If the point is the point-at-infinity, all operations are performed,728* but the buffer contents are indeterminate, and 0 is returned. Otherwise,729* the encoded point is written in the buffer, and 1 is returned.730*/731static uint32_t732point_encode(unsigned char *buf, const p256_jacobian *P)733{734uint64_t t1[4], t2[4], z;735736/* Set t1 = 1/z^2 and t2 = 1/z^3. */737f256_invert(t2, P->z);738f256_montysquare(t1, t2);739f256_montymul(t2, t2, t1);740741/* Compute affine coordinates x (in t1) and y (in t2). */742f256_montymul(t1, P->x, t1);743f256_montymul(t2, P->y, t2);744745/* Convert back from Montgomery representation, and finalize746reductions. */747f256_frommonty(t1, t1);748f256_frommonty(t2, t2);749f256_final_reduce(t1);750f256_final_reduce(t2);751752/* Encode. */753buf[0] = 0x04;754br_enc64be(buf + 1, t1[3]);755br_enc64be(buf + 9, t1[2]);756br_enc64be(buf + 17, t1[1]);757br_enc64be(buf + 25, t1[0]);758br_enc64be(buf + 33, t2[3]);759br_enc64be(buf + 41, t2[2]);760br_enc64be(buf + 49, t2[1]);761br_enc64be(buf + 57, t2[0]);762763/* Return success if and only if P->z != 0. */764z = P->z[0] | P->z[1] | P->z[2] | P->z[3];765return NEQ((uint32_t)(z | z >> 32), 0);766}767768/*769* Point doubling in Jacobian coordinates: point P is doubled.770* Note: if the source point is the point-at-infinity, then the result is771* still the point-at-infinity, which is correct. Moreover, if the three772* coordinates were zero, then they still are zero in the returned value.773*774* (Note: this is true even without the final reduction: if the three775* coordinates are encoded as four words of value zero each, then the776* result will also have all-zero coordinate encodings, not the alternate777* encoding as the integer p.)778*/779static void780p256_double(p256_jacobian *P)781{782/*783* Doubling formulas are:784*785* s = 4*x*y^2786* m = 3*(x + z^2)*(x - z^2)787* x' = m^2 - 2*s788* y' = m*(s - x') - 8*y^4789* z' = 2*y*z790*791* These formulas work for all points, including points of order 2792* and points at infinity:793* - If y = 0 then z' = 0. But there is no such point in P-256794* anyway.795* - If z = 0 then z' = 0.796*/797uint64_t t1[4], t2[4], t3[4], t4[4];798799/*800* Compute z^2 in t1.801*/802f256_montysquare(t1, P->z);803804/*805* Compute x-z^2 in t2 and x+z^2 in t1.806*/807f256_add(t2, P->x, t1);808f256_sub(t1, P->x, t1);809810/*811* Compute 3*(x+z^2)*(x-z^2) in t1.812*/813f256_montymul(t3, t1, t2);814f256_add(t1, t3, t3);815f256_add(t1, t3, t1);816817/*818* Compute 4*x*y^2 (in t2) and 2*y^2 (in t3).819*/820f256_montysquare(t3, P->y);821f256_add(t3, t3, t3);822f256_montymul(t2, P->x, t3);823f256_add(t2, t2, t2);824825/*826* Compute x' = m^2 - 2*s.827*/828f256_montysquare(P->x, t1);829f256_sub(P->x, P->x, t2);830f256_sub(P->x, P->x, t2);831832/*833* Compute z' = 2*y*z.834*/835f256_montymul(t4, P->y, P->z);836f256_add(P->z, t4, t4);837838/*839* Compute y' = m*(s - x') - 8*y^4. Note that we already have840* 2*y^2 in t3.841*/842f256_sub(t2, t2, P->x);843f256_montymul(P->y, t1, t2);844f256_montysquare(t4, t3);845f256_add(t4, t4, t4);846f256_sub(P->y, P->y, t4);847}848849/*850* Point addition (Jacobian coordinates): P1 is replaced with P1+P2.851* This function computes the wrong result in the following cases:852*853* - If P1 == 0 but P2 != 0854* - If P1 != 0 but P2 == 0855* - If P1 == P2856*857* In all three cases, P1 is set to the point at infinity.858*859* Returned value is 0 if one of the following occurs:860*861* - P1 and P2 have the same Y coordinate.862* - P1 == 0 and P2 == 0.863* - The Y coordinate of one of the points is 0 and the other point is864* the point at infinity.865*866* The third case cannot actually happen with valid points, since a point867* with Y == 0 is a point of order 2, and there is no point of order 2 on868* curve P-256.869*870* Therefore, assuming that P1 != 0 and P2 != 0 on input, then the caller871* can apply the following:872*873* - If the result is not the point at infinity, then it is correct.874* - Otherwise, if the returned value is 1, then this is a case of875* P1+P2 == 0, so the result is indeed the point at infinity.876* - Otherwise, P1 == P2, so a "double" operation should have been877* performed.878*879* Note that you can get a returned value of 0 with a correct result,880* e.g. if P1 and P2 have the same Y coordinate, but distinct X coordinates.881*/882static uint32_t883p256_add(p256_jacobian *P1, const p256_jacobian *P2)884{885/*886* Addtions formulas are:887*888* u1 = x1 * z2^2889* u2 = x2 * z1^2890* s1 = y1 * z2^3891* s2 = y2 * z1^3892* h = u2 - u1893* r = s2 - s1894* x3 = r^2 - h^3 - 2 * u1 * h^2895* y3 = r * (u1 * h^2 - x3) - s1 * h^3896* z3 = h * z1 * z2897*/898uint64_t t1[4], t2[4], t3[4], t4[4], t5[4], t6[4], t7[4], tt;899uint32_t ret;900901/*902* Compute u1 = x1*z2^2 (in t1) and s1 = y1*z2^3 (in t3).903*/904f256_montysquare(t3, P2->z);905f256_montymul(t1, P1->x, t3);906f256_montymul(t4, P2->z, t3);907f256_montymul(t3, P1->y, t4);908909/*910* Compute u2 = x2*z1^2 (in t2) and s2 = y2*z1^3 (in t4).911*/912f256_montysquare(t4, P1->z);913f256_montymul(t2, P2->x, t4);914f256_montymul(t5, P1->z, t4);915f256_montymul(t4, P2->y, t5);916917/*918* Compute h = h2 - u1 (in t2) and r = s2 - s1 (in t4).919* We need to test whether r is zero, so we will do some extra920* reduce.921*/922f256_sub(t2, t2, t1);923f256_sub(t4, t4, t3);924f256_final_reduce(t4);925tt = t4[0] | t4[1] | t4[2] | t4[3];926ret = (uint32_t)(tt | (tt >> 32));927ret = (ret | -ret) >> 31;928929/*930* Compute u1*h^2 (in t6) and h^3 (in t5);931*/932f256_montysquare(t7, t2);933f256_montymul(t6, t1, t7);934f256_montymul(t5, t7, t2);935936/*937* Compute x3 = r^2 - h^3 - 2*u1*h^2.938*/939f256_montysquare(P1->x, t4);940f256_sub(P1->x, P1->x, t5);941f256_sub(P1->x, P1->x, t6);942f256_sub(P1->x, P1->x, t6);943944/*945* Compute y3 = r*(u1*h^2 - x3) - s1*h^3.946*/947f256_sub(t6, t6, P1->x);948f256_montymul(P1->y, t4, t6);949f256_montymul(t1, t5, t3);950f256_sub(P1->y, P1->y, t1);951952/*953* Compute z3 = h*z1*z2.954*/955f256_montymul(t1, P1->z, P2->z);956f256_montymul(P1->z, t1, t2);957958return ret;959}960961/*962* Point addition (mixed coordinates): P1 is replaced with P1+P2.963* This is a specialised function for the case when P2 is a non-zero point964* in affine coordinates.965*966* This function computes the wrong result in the following cases:967*968* - If P1 == 0969* - If P1 == P2970*971* In both cases, P1 is set to the point at infinity.972*973* Returned value is 0 if one of the following occurs:974*975* - P1 and P2 have the same Y (affine) coordinate.976* - The Y coordinate of P2 is 0 and P1 is the point at infinity.977*978* The second case cannot actually happen with valid points, since a point979* with Y == 0 is a point of order 2, and there is no point of order 2 on980* curve P-256.981*982* Therefore, assuming that P1 != 0 on input, then the caller983* can apply the following:984*985* - If the result is not the point at infinity, then it is correct.986* - Otherwise, if the returned value is 1, then this is a case of987* P1+P2 == 0, so the result is indeed the point at infinity.988* - Otherwise, P1 == P2, so a "double" operation should have been989* performed.990*991* Again, a value of 0 may be returned in some cases where the addition992* result is correct.993*/994static uint32_t995p256_add_mixed(p256_jacobian *P1, const p256_affine *P2)996{997/*998* Addtions formulas are:999*1000* u1 = x11001* u2 = x2 * z1^21002* s1 = y11003* s2 = y2 * z1^31004* h = u2 - u11005* r = s2 - s11006* x3 = r^2 - h^3 - 2 * u1 * h^21007* y3 = r * (u1 * h^2 - x3) - s1 * h^31008* z3 = h * z11009*/1010uint64_t t1[4], t2[4], t3[4], t4[4], t5[4], t6[4], t7[4], tt;1011uint32_t ret;10121013/*1014* Compute u1 = x1 (in t1) and s1 = y1 (in t3).1015*/1016memcpy(t1, P1->x, sizeof t1);1017memcpy(t3, P1->y, sizeof t3);10181019/*1020* Compute u2 = x2*z1^2 (in t2) and s2 = y2*z1^3 (in t4).1021*/1022f256_montysquare(t4, P1->z);1023f256_montymul(t2, P2->x, t4);1024f256_montymul(t5, P1->z, t4);1025f256_montymul(t4, P2->y, t5);10261027/*1028* Compute h = h2 - u1 (in t2) and r = s2 - s1 (in t4).1029* We need to test whether r is zero, so we will do some extra1030* reduce.1031*/1032f256_sub(t2, t2, t1);1033f256_sub(t4, t4, t3);1034f256_final_reduce(t4);1035tt = t4[0] | t4[1] | t4[2] | t4[3];1036ret = (uint32_t)(tt | (tt >> 32));1037ret = (ret | -ret) >> 31;10381039/*1040* Compute u1*h^2 (in t6) and h^3 (in t5);1041*/1042f256_montysquare(t7, t2);1043f256_montymul(t6, t1, t7);1044f256_montymul(t5, t7, t2);10451046/*1047* Compute x3 = r^2 - h^3 - 2*u1*h^2.1048*/1049f256_montysquare(P1->x, t4);1050f256_sub(P1->x, P1->x, t5);1051f256_sub(P1->x, P1->x, t6);1052f256_sub(P1->x, P1->x, t6);10531054/*1055* Compute y3 = r*(u1*h^2 - x3) - s1*h^3.1056*/1057f256_sub(t6, t6, P1->x);1058f256_montymul(P1->y, t4, t6);1059f256_montymul(t1, t5, t3);1060f256_sub(P1->y, P1->y, t1);10611062/*1063* Compute z3 = h*z1*z2.1064*/1065f256_montymul(P1->z, P1->z, t2);10661067return ret;1068}10691070#if 01071/* unused */1072/*1073* Point addition (mixed coordinates, complete): P1 is replaced with P1+P2.1074* This is a specialised function for the case when P2 is a non-zero point1075* in affine coordinates.1076*1077* This function returns the correct result in all cases.1078*/1079static uint32_t1080p256_add_complete_mixed(p256_jacobian *P1, const p256_affine *P2)1081{1082/*1083* Addtions formulas, in the general case, are:1084*1085* u1 = x11086* u2 = x2 * z1^21087* s1 = y11088* s2 = y2 * z1^31089* h = u2 - u11090* r = s2 - s11091* x3 = r^2 - h^3 - 2 * u1 * h^21092* y3 = r * (u1 * h^2 - x3) - s1 * h^31093* z3 = h * z11094*1095* These formulas mishandle the two following cases:1096*1097* - If P1 is the point-at-infinity (z1 = 0), then z3 is1098* incorrectly set to 0.1099*1100* - If P1 = P2, then u1 = u2 and s1 = s2, and x3, y3 and z31101* are all set to 0.1102*1103* However, if P1 + P2 = 0, then u1 = u2 but s1 != s2, and then1104* we correctly get z3 = 0 (the point-at-infinity).1105*1106* To fix the case P1 = 0, we perform at the end a copy of P21107* over P1, conditional to z1 = 0.1108*1109* For P1 = P2: in that case, both h and r are set to 0, and1110* we get x3, y3 and z3 equal to 0. We can test for that1111* occurrence to make a mask which will be all-one if P1 = P2,1112* or all-zero otherwise; then we can compute the double of P21113* and add it, combined with the mask, to (x3,y3,z3).1114*1115* Using the doubling formulas in p256_double() on (x2,y2),1116* simplifying since P2 is affine (i.e. z2 = 1, implicitly),1117* we get:1118* s = 4*x2*y2^21119* m = 3*(x2 + 1)*(x2 - 1)1120* x' = m^2 - 2*s1121* y' = m*(s - x') - 8*y2^41122* z' = 2*y21123* which requires only 6 multiplications. Added to the 111124* multiplications of the normal mixed addition in Jacobian1125* coordinates, we get a cost of 17 multiplications in total.1126*/1127uint64_t t1[4], t2[4], t3[4], t4[4], t5[4], t6[4], t7[4], tt, zz;1128int i;11291130/*1131* Set zz to -1 if P1 is the point at infinity, 0 otherwise.1132*/1133zz = P1->z[0] | P1->z[1] | P1->z[2] | P1->z[3];1134zz = ((zz | -zz) >> 63) - (uint64_t)1;11351136/*1137* Compute u1 = x1 (in t1) and s1 = y1 (in t3).1138*/1139memcpy(t1, P1->x, sizeof t1);1140memcpy(t3, P1->y, sizeof t3);11411142/*1143* Compute u2 = x2*z1^2 (in t2) and s2 = y2*z1^3 (in t4).1144*/1145f256_montysquare(t4, P1->z);1146f256_montymul(t2, P2->x, t4);1147f256_montymul(t5, P1->z, t4);1148f256_montymul(t4, P2->y, t5);11491150/*1151* Compute h = h2 - u1 (in t2) and r = s2 - s1 (in t4).1152* reduce.1153*/1154f256_sub(t2, t2, t1);1155f256_sub(t4, t4, t3);11561157/*1158* If both h = 0 and r = 0, then P1 = P2, and we want to set1159* the mask tt to -1; otherwise, the mask will be 0.1160*/1161f256_final_reduce(t2);1162f256_final_reduce(t4);1163tt = t2[0] | t2[1] | t2[2] | t2[3] | t4[0] | t4[1] | t4[2] | t4[3];1164tt = ((tt | -tt) >> 63) - (uint64_t)1;11651166/*1167* Compute u1*h^2 (in t6) and h^3 (in t5);1168*/1169f256_montysquare(t7, t2);1170f256_montymul(t6, t1, t7);1171f256_montymul(t5, t7, t2);11721173/*1174* Compute x3 = r^2 - h^3 - 2*u1*h^2.1175*/1176f256_montysquare(P1->x, t4);1177f256_sub(P1->x, P1->x, t5);1178f256_sub(P1->x, P1->x, t6);1179f256_sub(P1->x, P1->x, t6);11801181/*1182* Compute y3 = r*(u1*h^2 - x3) - s1*h^3.1183*/1184f256_sub(t6, t6, P1->x);1185f256_montymul(P1->y, t4, t6);1186f256_montymul(t1, t5, t3);1187f256_sub(P1->y, P1->y, t1);11881189/*1190* Compute z3 = h*z1.1191*/1192f256_montymul(P1->z, P1->z, t2);11931194/*1195* The "double" result, in case P1 = P2.1196*/11971198/*1199* Compute z' = 2*y2 (in t1).1200*/1201f256_add(t1, P2->y, P2->y);12021203/*1204* Compute 2*(y2^2) (in t2) and s = 4*x2*(y2^2) (in t3).1205*/1206f256_montysquare(t2, P2->y);1207f256_add(t2, t2, t2);1208f256_add(t3, t2, t2);1209f256_montymul(t3, P2->x, t3);12101211/*1212* Compute m = 3*(x2^2 - 1) (in t4).1213*/1214f256_montysquare(t4, P2->x);1215f256_sub(t4, t4, F256_R);1216f256_add(t5, t4, t4);1217f256_add(t4, t4, t5);12181219/*1220* Compute x' = m^2 - 2*s (in t5).1221*/1222f256_montysquare(t5, t4);1223f256_sub(t5, t3);1224f256_sub(t5, t3);12251226/*1227* Compute y' = m*(s - x') - 8*y2^4 (in t6).1228*/1229f256_sub(t6, t3, t5);1230f256_montymul(t6, t6, t4);1231f256_montysquare(t7, t2);1232f256_sub(t6, t6, t7);1233f256_sub(t6, t6, t7);12341235/*1236* We now have the alternate (doubling) coordinates in (t5,t6,t1).1237* We combine them with (x3,y3,z3).1238*/1239for (i = 0; i < 4; i ++) {1240P1->x[i] |= tt & t5[i];1241P1->y[i] |= tt & t6[i];1242P1->z[i] |= tt & t1[i];1243}12441245/*1246* If P1 = 0, then we get z3 = 0 (which is invalid); if z1 is 0,1247* then we want to replace the result with a copy of P2. The1248* test on z1 was done at the start, in the zz mask.1249*/1250for (i = 0; i < 4; i ++) {1251P1->x[i] ^= zz & (P1->x[i] ^ P2->x[i]);1252P1->y[i] ^= zz & (P1->y[i] ^ P2->y[i]);1253P1->z[i] ^= zz & (P1->z[i] ^ F256_R[i]);1254}1255}1256#endif12571258/*1259* Inner function for computing a point multiplication. A window is1260* provided, with points 1*P to 15*P in affine coordinates.1261*1262* Assumptions:1263* - All provided points are valid points on the curve.1264* - Multiplier is non-zero, and smaller than the curve order.1265* - Everything is in Montgomery representation.1266*/1267static void1268point_mul_inner(p256_jacobian *R, const p256_affine *W,1269const unsigned char *k, size_t klen)1270{1271p256_jacobian Q;1272uint32_t qz;12731274memset(&Q, 0, sizeof Q);1275qz = 1;1276while (klen -- > 0) {1277int i;1278unsigned bk;12791280bk = *k ++;1281for (i = 0; i < 2; i ++) {1282uint32_t bits;1283uint32_t bnz;1284p256_affine T;1285p256_jacobian U;1286uint32_t n;1287int j;1288uint64_t m;12891290p256_double(&Q);1291p256_double(&Q);1292p256_double(&Q);1293p256_double(&Q);1294bits = (bk >> 4) & 0x0F;1295bnz = NEQ(bits, 0);12961297/*1298* Lookup point in window. If the bits are 0,1299* we get something invalid, which is not a1300* problem because we will use it only if the1301* bits are non-zero.1302*/1303memset(&T, 0, sizeof T);1304for (n = 0; n < 15; n ++) {1305m = -(uint64_t)EQ(bits, n + 1);1306T.x[0] |= m & W[n].x[0];1307T.x[1] |= m & W[n].x[1];1308T.x[2] |= m & W[n].x[2];1309T.x[3] |= m & W[n].x[3];1310T.y[0] |= m & W[n].y[0];1311T.y[1] |= m & W[n].y[1];1312T.y[2] |= m & W[n].y[2];1313T.y[3] |= m & W[n].y[3];1314}13151316U = Q;1317p256_add_mixed(&U, &T);13181319/*1320* If qz is still 1, then Q was all-zeros, and this1321* is conserved through p256_double().1322*/1323m = -(uint64_t)(bnz & qz);1324for (j = 0; j < 4; j ++) {1325Q.x[j] |= m & T.x[j];1326Q.y[j] |= m & T.y[j];1327Q.z[j] |= m & F256_R[j];1328}1329CCOPY(bnz & ~qz, &Q, &U, sizeof Q);1330qz &= ~bnz;1331bk <<= 4;1332}1333}1334*R = Q;1335}13361337/*1338* Convert a window from Jacobian to affine coordinates. A single1339* field inversion is used. This function works for windows up to1340* 32 elements.1341*1342* The destination array (aff[]) and the source array (jac[]) may1343* overlap, provided that the start of aff[] is not after the start of1344* jac[]. Even if the arrays do _not_ overlap, the source array is1345* modified.1346*/1347static void1348window_to_affine(p256_affine *aff, p256_jacobian *jac, int num)1349{1350/*1351* Convert the window points to affine coordinates. We use the1352* following trick to mutualize the inversion computation: if1353* we have z1, z2, z3, and z4, and want to inverse all of them,1354* we compute u = 1/(z1*z2*z3*z4), and then we have:1355* 1/z1 = u*z2*z3*z41356* 1/z2 = u*z1*z3*z41357* 1/z3 = u*z1*z2*z41358* 1/z4 = u*z1*z2*z31359*1360* The partial products are computed recursively:1361*1362* - on input (z_1,z_2), return (z_2,z_1) and z_1*z_21363* - on input (z_1,z_2,... z_n):1364* recurse on (z_1,z_2,... z_(n/2)) -> r1 and m11365* recurse on (z_(n/2+1),z_(n/2+2)... z_n) -> r2 and m21366* multiply elements of r1 by m2 -> s11367* multiply elements of r2 by m1 -> s21368* return r1||r2 and m1*m21369*1370* In the example below, we suppose that we have 14 elements.1371* Let z1, z2,... zE be the 14 values to invert (index noted in1372* hexadecimal, starting at 1).1373*1374* - Depth 1:1375* swap(z1, z2); z12 = z1*z21376* swap(z3, z4); z34 = z3*z41377* swap(z5, z6); z56 = z5*z61378* swap(z7, z8); z78 = z7*z81379* swap(z9, zA); z9A = z9*zA1380* swap(zB, zC); zBC = zB*zC1381* swap(zD, zE); zDE = zD*zE1382*1383* - Depth 2:1384* z1 <- z1*z34, z2 <- z2*z34, z3 <- z3*z12, z4 <- z4*z121385* z1234 = z12*z341386* z5 <- z5*z78, z6 <- z6*z78, z7 <- z7*z56, z8 <- z8*z561387* z5678 = z56*z781388* z9 <- z9*zBC, zA <- zA*zBC, zB <- zB*z9A, zC <- zC*z9A1389* z9ABC = z9A*zBC1390*1391* - Depth 3:1392* z1 <- z1*z5678, z2 <- z2*z5678, z3 <- z3*z5678, z4 <- z4*z56781393* z5 <- z5*z1234, z6 <- z6*z1234, z7 <- z7*z1234, z8 <- z8*z12341394* z12345678 = z1234*z56781395* z9 <- z9*zDE, zA <- zA*zDE, zB <- zB*zDE, zC <- zC*zDE1396* zD <- zD*z9ABC, zE*z9ABC1397* z9ABCDE = z9ABC*zDE1398*1399* - Depth 4:1400* multiply z1..z8 by z9ABCDE1401* multiply z9..zE by z123456781402* final z = z12345678*z9ABCDE1403*/14041405uint64_t z[16][4];1406int i, k, s;1407#define zt (z[15])1408#define zu (z[14])1409#define zv (z[13])14101411/*1412* First recursion step (pairwise swapping and multiplication).1413* If there is an odd number of elements, then we "invent" an1414* extra one with coordinate Z = 1 (in Montgomery representation).1415*/1416for (i = 0; (i + 1) < num; i += 2) {1417memcpy(zt, jac[i].z, sizeof zt);1418memcpy(jac[i].z, jac[i + 1].z, sizeof zt);1419memcpy(jac[i + 1].z, zt, sizeof zt);1420f256_montymul(z[i >> 1], jac[i].z, jac[i + 1].z);1421}1422if ((num & 1) != 0) {1423memcpy(z[num >> 1], jac[num - 1].z, sizeof zt);1424memcpy(jac[num - 1].z, F256_R, sizeof F256_R);1425}14261427/*1428* Perform further recursion steps. At the entry of each step,1429* the process has been done for groups of 's' points. The1430* integer k is the log2 of s.1431*/1432for (k = 1, s = 2; s < num; k ++, s <<= 1) {1433int n;14341435for (i = 0; i < num; i ++) {1436f256_montymul(jac[i].z, jac[i].z, z[(i >> k) ^ 1]);1437}1438n = (num + s - 1) >> k;1439for (i = 0; i < (n >> 1); i ++) {1440f256_montymul(z[i], z[i << 1], z[(i << 1) + 1]);1441}1442if ((n & 1) != 0) {1443memmove(z[n >> 1], z[n], sizeof zt);1444}1445}14461447/*1448* Invert the final result, and convert all points.1449*/1450f256_invert(zt, z[0]);1451for (i = 0; i < num; i ++) {1452f256_montymul(zv, jac[i].z, zt);1453f256_montysquare(zu, zv);1454f256_montymul(zv, zv, zu);1455f256_montymul(aff[i].x, jac[i].x, zu);1456f256_montymul(aff[i].y, jac[i].y, zv);1457}1458}14591460/*1461* Multiply the provided point by an integer.1462* Assumptions:1463* - Source point is a valid curve point.1464* - Source point is not the point-at-infinity.1465* - Integer is not 0, and is lower than the curve order.1466* If these conditions are not met, then the result is indeterminate1467* (but the process is still constant-time).1468*/1469static void1470p256_mul(p256_jacobian *P, const unsigned char *k, size_t klen)1471{1472union {1473p256_affine aff[15];1474p256_jacobian jac[15];1475} window;1476int i;14771478/*1479* Compute window, in Jacobian coordinates.1480*/1481window.jac[0] = *P;1482for (i = 2; i < 16; i ++) {1483window.jac[i - 1] = window.jac[(i >> 1) - 1];1484if ((i & 1) == 0) {1485p256_double(&window.jac[i - 1]);1486} else {1487p256_add(&window.jac[i - 1], &window.jac[i >> 1]);1488}1489}14901491/*1492* Convert the window points to affine coordinates. Point1493* window[0] is the source point, already in affine coordinates.1494*/1495window_to_affine(window.aff, window.jac, 15);14961497/*1498* Perform point multiplication.1499*/1500point_mul_inner(P, window.aff, k, klen);1501}15021503/*1504* Precomputed window for the conventional generator: P256_Gwin[n]1505* contains (n+1)*G (affine coordinates, in Montgomery representation).1506*/1507static const p256_affine P256_Gwin[] = {1508{1509{ 0x79E730D418A9143C, 0x75BA95FC5FEDB601,15100x79FB732B77622510, 0x18905F76A53755C6 },1511{ 0xDDF25357CE95560A, 0x8B4AB8E4BA19E45C,15120xD2E88688DD21F325, 0x8571FF1825885D85 }1513},1514{1515{ 0x850046D410DDD64D, 0xAA6AE3C1A433827D,15160x732205038D1490D9, 0xF6BB32E43DCF3A3B },1517{ 0x2F3648D361BEE1A5, 0x152CD7CBEB236FF8,15180x19A8FB0E92042DBE, 0x78C577510A5B8A3B }1519},1520{1521{ 0xFFAC3F904EEBC127, 0xB027F84A087D81FB,15220x66AD77DD87CBBC98, 0x26936A3FB6FF747E },1523{ 0xB04C5C1FC983A7EB, 0x583E47AD0861FE1A,15240x788208311A2EE98E, 0xD5F06A29E587CC07 }1525},1526{1527{ 0x74B0B50D46918DCC, 0x4650A6EDC623C173,15280x0CDAACACE8100AF2, 0x577362F541B0176B },1529{ 0x2D96F24CE4CBABA6, 0x17628471FAD6F447,15300x6B6C36DEE5DDD22E, 0x84B14C394C5AB863 }1531},1532{1533{ 0xBE1B8AAEC45C61F5, 0x90EC649A94B9537D,15340x941CB5AAD076C20C, 0xC9079605890523C8 },1535{ 0xEB309B4AE7BA4F10, 0x73C568EFE5EB882B,15360x3540A9877E7A1F68, 0x73A076BB2DD1E916 }1537},1538{1539{ 0x403947373E77664A, 0x55AE744F346CEE3E,15400xD50A961A5B17A3AD, 0x13074B5954213673 },1541{ 0x93D36220D377E44B, 0x299C2B53ADFF14B5,15420xF424D44CEF639F11, 0xA4C9916D4A07F75F }1543},1544{1545{ 0x0746354EA0173B4F, 0x2BD20213D23C00F7,15460xF43EAAB50C23BB08, 0x13BA5119C3123E03 },1547{ 0x2847D0303F5B9D4D, 0x6742F2F25DA67BDD,15480xEF933BDC77C94195, 0xEAEDD9156E240867 }1549},1550{1551{ 0x27F14CD19499A78F, 0x462AB5C56F9B3455,15520x8F90F02AF02CFC6B, 0xB763891EB265230D },1553{ 0xF59DA3A9532D4977, 0x21E3327DCF9EBA15,15540x123C7B84BE60BBF0, 0x56EC12F27706DF76 }1555},1556{1557{ 0x75C96E8F264E20E8, 0xABE6BFED59A7A841,15580x2CC09C0444C8EB00, 0xE05B3080F0C4E16B },1559{ 0x1EB7777AA45F3314, 0x56AF7BEDCE5D45E3,15600x2B6E019A88B12F1A, 0x086659CDFD835F9B }1561},1562{1563{ 0x2C18DBD19DC21EC8, 0x98F9868A0FCF8139,15640x737D2CD648250B49, 0xCC61C94724B3428F },1565{ 0x0C2B407880DD9E76, 0xC43A8991383FBE08,15660x5F7D2D65779BE5D2, 0x78719A54EB3B4AB5 }1567},1568{1569{ 0xEA7D260A6245E404, 0x9DE407956E7FDFE0,15700x1FF3A4158DAC1AB5, 0x3E7090F1649C9073 },1571{ 0x1A7685612B944E88, 0x250F939EE57F61C8,15720x0C0DAA891EAD643D, 0x68930023E125B88E }1573},1574{1575{ 0x04B71AA7D2697768, 0xABDEDEF5CA345A33,15760x2409D29DEE37385E, 0x4EE1DF77CB83E156 },1577{ 0x0CAC12D91CBB5B43, 0x170ED2F6CA895637,15780x28228CFA8ADE6D66, 0x7FF57C9553238ACA }1579},1580{1581{ 0xCCC425634B2ED709, 0x0E356769856FD30D,15820xBCBCD43F559E9811, 0x738477AC5395B759 },1583{ 0x35752B90C00EE17F, 0x68748390742ED2E3,15840x7CD06422BD1F5BC1, 0xFBC08769C9E7B797 }1585},1586{1587{ 0xA242A35BB0CF664A, 0x126E48F77F9707E3,15880x1717BF54C6832660, 0xFAAE7332FD12C72E },1589{ 0x27B52DB7995D586B, 0xBE29569E832237C2,15900xE8E4193E2A65E7DB, 0x152706DC2EAA1BBB }1591},1592{1593{ 0x72BCD8B7BC60055B, 0x03CC23EE56E27E4B,15940xEE337424E4819370, 0xE2AA0E430AD3DA09 },1595{ 0x40B8524F6383C45D, 0xD766355442A41B25,15960x64EFA6DE778A4797, 0x2042170A7079ADF4 }1597}1598};15991600/*1601* Multiply the conventional generator of the curve by the provided1602* integer. Return is written in *P.1603*1604* Assumptions:1605* - Integer is not 0, and is lower than the curve order.1606* If this conditions is not met, then the result is indeterminate1607* (but the process is still constant-time).1608*/1609static void1610p256_mulgen(p256_jacobian *P, const unsigned char *k, size_t klen)1611{1612point_mul_inner(P, P256_Gwin, k, klen);1613}16141615/*1616* Return 1 if all of the following hold:1617* - klen <= 321618* - k != 01619* - k is lower than the curve order1620* Otherwise, return 0.1621*1622* Constant-time behaviour: only klen may be observable.1623*/1624static uint32_t1625check_scalar(const unsigned char *k, size_t klen)1626{1627uint32_t z;1628int32_t c;1629size_t u;16301631if (klen > 32) {1632return 0;1633}1634z = 0;1635for (u = 0; u < klen; u ++) {1636z |= k[u];1637}1638if (klen == 32) {1639c = 0;1640for (u = 0; u < klen; u ++) {1641c |= -(int32_t)EQ0(c) & CMP(k[u], P256_N[u]);1642}1643} else {1644c = -1;1645}1646return NEQ(z, 0) & LT0(c);1647}16481649static uint32_t1650api_mul(unsigned char *G, size_t Glen,1651const unsigned char *k, size_t klen, int curve)1652{1653uint32_t r;1654p256_jacobian P;16551656(void)curve;1657if (Glen != 65) {1658return 0;1659}1660r = check_scalar(k, klen);1661r &= point_decode(&P, G);1662p256_mul(&P, k, klen);1663r &= point_encode(G, &P);1664return r;1665}16661667static size_t1668api_mulgen(unsigned char *R,1669const unsigned char *k, size_t klen, int curve)1670{1671p256_jacobian P;16721673(void)curve;1674p256_mulgen(&P, k, klen);1675point_encode(R, &P);1676return 65;1677}16781679static uint32_t1680api_muladd(unsigned char *A, const unsigned char *B, size_t len,1681const unsigned char *x, size_t xlen,1682const unsigned char *y, size_t ylen, int curve)1683{1684/*1685* We might want to use Shamir's trick here: make a composite1686* window of u*P+v*Q points, to merge the two doubling-ladders1687* into one. This, however, has some complications:1688*1689* - During the computation, we may hit the point-at-infinity.1690* Thus, we would need p256_add_complete_mixed() (complete1691* formulas for point addition), with a higher cost (17 muls1692* instead of 11).1693*1694* - A 4-bit window would be too large, since it would involve1695* 16*16-1 = 255 points. For the same window size as in the1696* p256_mul() case, we would need to reduce the window size1697* to 2 bits, and thus perform twice as many non-doubling1698* point additions.1699*1700* - The window may itself contain the point-at-infinity, and1701* thus cannot be in all generality be made of affine points.1702* Instead, we would need to make it a window of points in1703* Jacobian coordinates. Even p256_add_complete_mixed() would1704* be inappropriate.1705*1706* For these reasons, the code below performs two separate1707* point multiplications, then computes the final point addition1708* (which is both a "normal" addition, and a doubling, to handle1709* all cases).1710*/17111712p256_jacobian P, Q;1713uint32_t r, t, s;1714uint64_t z;17151716(void)curve;1717if (len != 65) {1718return 0;1719}1720r = point_decode(&P, A);1721p256_mul(&P, x, xlen);1722if (B == NULL) {1723p256_mulgen(&Q, y, ylen);1724} else {1725r &= point_decode(&Q, B);1726p256_mul(&Q, y, ylen);1727}17281729/*1730* The final addition may fail in case both points are equal.1731*/1732t = p256_add(&P, &Q);1733f256_final_reduce(P.z);1734z = P.z[0] | P.z[1] | P.z[2] | P.z[3];1735s = EQ((uint32_t)(z | (z >> 32)), 0);1736p256_double(&Q);17371738/*1739* If s is 1 then either P+Q = 0 (t = 1) or P = Q (t = 0). So we1740* have the following:1741*1742* s = 0, t = 0 return P (normal addition)1743* s = 0, t = 1 return P (normal addition)1744* s = 1, t = 0 return Q (a 'double' case)1745* s = 1, t = 1 report an error (P+Q = 0)1746*/1747CCOPY(s & ~t, &P, &Q, sizeof Q);1748point_encode(A, &P);1749r &= ~(s & t);1750return r;1751}17521753/* see bearssl_ec.h */1754const br_ec_impl br_ec_p256_m64 = {1755(uint32_t)0x00800000,1756&api_generator,1757&api_order,1758&api_xoff,1759&api_mul,1760&api_mulgen,1761&api_muladd1762};17631764/* see bearssl_ec.h */1765const br_ec_impl *1766br_ec_p256_m64_get(void)1767{1768return &br_ec_p256_m64;1769}17701771#else17721773/* see bearssl_ec.h */1774const br_ec_impl *1775br_ec_p256_m64_get(void)1776{1777return 0;1778}17791780#endif178117821783