7 #ifndef SECP256K1_SCALAR_REPR_IMPL_H     8 #define SECP256K1_SCALAR_REPR_IMPL_H    11 #define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)    12 #define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL)    13 #define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL)    14 #define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)    17 #define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)    18 #define SECP256K1_N_C_1 (~SECP256K1_N_1)    19 #define SECP256K1_N_C_2 (1)    22 #define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL)    23 #define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL)    24 #define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)    25 #define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)    43     return (a->
d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
    49     if ((offset + count - 1) >> 6 == offset >> 6) {
    50         return secp256k1_scalar_get_bits(a, offset, count);
    53         return ((a->
d[offset >> 6] >> (offset & 0x3F)) | (a->
d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1);
    73     r->
d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
    75     r->
d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
    77     r->
d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
    78     t += (uint64_t)r->
d[3];
    79     r->
d[3] = t & 0xFFFFFFFFFFFFFFFFULL;
    85     uint128_t t = (uint128_t)a->
d[0] + b->
d[0];
    86     r->
d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
    87     t += (uint128_t)a->
d[1] + b->
d[1];
    88     r->
d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
    89     t += (uint128_t)a->
d[2] + b->
d[2];
    90     r->
d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
    91     t += (uint128_t)a->
d[3] + b->
d[3];
    92     r->
d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
    93     overflow = t + secp256k1_scalar_check_overflow(r);
    95     secp256k1_scalar_reduce(r, overflow);
    99 static void secp256k1_scalar_cadd_bit(
secp256k1_scalar *r, 
unsigned int bit, 
int flag) {
   102     bit += ((uint32_t) flag - 1) & 0x100;  
   103     t = (uint128_t)r->
d[0] + (((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F));
   104     r->
d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
   105     t += (uint128_t)r->
d[1] + (((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F));
   106     r->
d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
   107     t += (uint128_t)r->
d[2] + (((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F));
   108     r->
d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
   109     t += (uint128_t)r->
d[3] + (((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
   110     r->
d[3] = t & 0xFFFFFFFFFFFFFFFFULL;
   117 static void secp256k1_scalar_set_b32(
secp256k1_scalar *r, 
const unsigned char *b32, 
int *overflow) {
   119     r->
d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56;
   120     r->
d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56;
   121     r->
d[2] = (uint64_t)b32[15] | (uint64_t)b32[14] << 8 | (uint64_t)b32[13] << 16 | (uint64_t)b32[12] << 24 | (uint64_t)b32[11] << 32 | (uint64_t)b32[10] << 40 | (uint64_t)b32[9] << 48 | (uint64_t)b32[8] << 56;
   122     r->
d[3] = (uint64_t)b32[7] | (uint64_t)b32[6] << 8 | (uint64_t)b32[5] << 16 | (uint64_t)b32[4] << 24 | (uint64_t)b32[3] << 32 | (uint64_t)b32[2] << 40 | (uint64_t)b32[1] << 48 | (uint64_t)b32[0] << 56;
   123     over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r));
   129 static void secp256k1_scalar_get_b32(
unsigned char *bin, 
const secp256k1_scalar* a) {
   130     bin[0] = a->
d[3] >> 56; bin[1] = a->
d[3] >> 48; bin[2] = a->
d[3] >> 40; bin[3] = a->
d[3] >> 32; bin[4] = a->
d[3] >> 24; bin[5] = a->
d[3] >> 16; bin[6] = a->
d[3] >> 8; bin[7] = a->
d[3];
   131     bin[8] = a->
d[2] >> 56; bin[9] = a->
d[2] >> 48; bin[10] = a->
d[2] >> 40; bin[11] = a->
d[2] >> 32; bin[12] = a->
d[2] >> 24; bin[13] = a->
d[2] >> 16; bin[14] = a->
d[2] >> 8; bin[15] = a->
d[2];
   132     bin[16] = a->
d[1] >> 56; bin[17] = a->
d[1] >> 48; bin[18] = a->
d[1] >> 40; bin[19] = a->
d[1] >> 32; bin[20] = a->
d[1] >> 24; bin[21] = a->
d[1] >> 16; bin[22] = a->
d[1] >> 8; bin[23] = a->
d[1];
   133     bin[24] = a->
d[0] >> 56; bin[25] = a->
d[0] >> 48; bin[26] = a->
d[0] >> 40; bin[27] = a->
d[0] >> 32; bin[28] = a->
d[0] >> 24; bin[29] = a->
d[0] >> 16; bin[30] = a->
d[0] >> 8; bin[31] = a->
d[0];
   137     return (a->
d[0] | a->
d[1] | a->
d[2] | a->
d[3]) == 0;
   141     uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
   143     r->
d[0] = t & nonzero; t >>= 64;
   145     r->
d[1] = t & nonzero; t >>= 64;
   147     r->
d[2] = t & nonzero; t >>= 64;
   149     r->
d[3] = t & nonzero;
   153     return ((a->
d[0] ^ 1) | a->
d[1] | a->
d[2] | a->
d[3]) == 0;
   171     uint64_t mask = !flag - 1;
   172     uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
   173     uint128_t t = (uint128_t)(r->
d[0] ^ mask) + ((
SECP256K1_N_0 + 1) & mask);
   174     r->
d[0] = t & nonzero; t >>= 64;
   176     r->
d[1] = t & nonzero; t >>= 64;
   178     r->
d[2] = t & nonzero; t >>= 64;
   180     r->
d[3] = t & nonzero;
   181     return 2 * (mask == 0) - 1;
   187 #define muladd(a,b) { \   190         uint128_t t = (uint128_t)a * b; \   195     th += (c0 < tl) ? 1 : 0;   \   197     c2 += (c1 < th) ? 1 : 0;   \   198     VERIFY_CHECK((c1 >= th) || (c2 != 0)); \   202 #define muladd_fast(a,b) { \   205         uint128_t t = (uint128_t)a * b; \   210     th += (c0 < tl) ? 1 : 0;   \   212     VERIFY_CHECK(c1 >= th); \   216 #define muladd2(a,b) { \   217     uint64_t tl, th, th2, tl2; \   219         uint128_t t = (uint128_t)a * b; \   224     c2 += (th2 < th) ? 1 : 0;        \   225     VERIFY_CHECK((th2 >= th) || (c2 != 0)); \   227     th2 += (tl2 < tl) ? 1 : 0;       \   229     th2 += (c0 < tl2) ? 1 : 0;       \   230     c2 += (c0 < tl2) & (th2 == 0);   \   231     VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \   233     c2 += (c1 < th2) ? 1 : 0;        \   234     VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \   238 #define sumadd(a) { \   241     over = (c0 < (a)) ? 1 : 0; \   243     c2 += (c1 < over) ? 1 : 0;   \   247 #define sumadd_fast(a) { \   249     c1 += (c0 < (a)) ? 1 : 0;   \   250     VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \   251     VERIFY_CHECK(c2 == 0); \   255 #define extract(n) { \   263 #define extract_fast(n) { \   267     VERIFY_CHECK(c2 == 0); \   270 static void secp256k1_scalar_reduce_512(
secp256k1_scalar *r, 
const uint64_t *l) {
   271 #ifdef USE_ASM_X86_64   273     uint64_t m0, m1, m2, m3, m4, m5, m6;
   274     uint64_t p0, p1, p2, p3, p4;
   277     __asm__ __volatile__(
   279     "movq 32(%%rsi), %%r11\n"   280     "movq 40(%%rsi), %%r12\n"   281     "movq 48(%%rsi), %%r13\n"   282     "movq 56(%%rsi), %%r14\n"   284     "movq 0(%%rsi), %%r8\n"   286     "xorq %%r10, %%r10\n"   296     "addq 8(%%rsi), %%r9\n"   302     "adcq %%rdx, %%r10\n"   308     "adcq %%rdx, %%r10\n"   314     "addq 16(%%rsi), %%r10\n"   320     "addq %%rax, %%r10\n"   326     "addq %%rax, %%r10\n"   330     "addq %%r11, %%r10\n"   335     "xorq %%r10, %%r10\n"   337     "addq 24(%%rsi), %%r8\n"   363     "adcq %%rdx, %%r10\n"   372     "addq %%r14, %%r10\n"   378     : 
"=g"(m0), 
"=g"(m1), 
"=g"(m2), 
"=g"(m3), 
"=g"(m4), 
"=g"(m5), 
"=g"(m6)
   380     : 
"rax", 
"rdx", 
"r8", 
"r9", 
"r10", 
"r11", 
"r12", 
"r13", 
"r14", 
"cc");
   383     __asm__ __volatile__(
   391     "xorq %%r10, %%r10\n"   407     "adcq %%rdx, %%r10\n"   413     "adcq %%rdx, %%r10\n"   425     "addq %%rax, %%r10\n"   431     "addq %%rax, %%r10\n"   435     "addq %%r11, %%r10\n"   457     : 
"=&g"(p0), 
"=&g"(p1), 
"=&g"(p2), 
"=g"(p3), 
"=g"(p4)
   459     : 
"rax", 
"rdx", 
"r8", 
"r9", 
"r10", 
"r11", 
"r12", 
"r13", 
"cc");
   462     __asm__ __volatile__(
   472     "movq %%rax, 0(%q6)\n"   485     "movq %%r8, 8(%q6)\n"   494     "movq %%r9, 16(%q6)\n"   500     "movq %%r8, 24(%q6)\n"   505     : 
"rax", 
"rdx", 
"r8", 
"r9", 
"r10", 
"cc", 
"memory");
   509     uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7];
   510     uint64_t m0, m1, m2, m3, m4, m5;
   512     uint64_t p0, p1, p2, p3;
   517     c0 = l[0]; c1 = 0; c2 = 0;
   544     c0 = m0; c1 = 0; c2 = 0;
   566     r->
d[0] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
   568     r->
d[1] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
   569     c += p2 + (uint128_t)p4;
   570     r->
d[2] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
   572     r->
d[3] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
   576     secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
   580 #ifdef USE_ASM_X86_64   581     const uint64_t *pb = b->
d;
   582     __asm__ __volatile__(
   584     "movq 0(%%rdi), %%r15\n"   585     "movq 8(%%rdi), %%rbx\n"   586     "movq 16(%%rdi), %%rcx\n"   587     "movq 0(%%rdx), %%r11\n"   588     "movq 8(%%rdx), %%r12\n"   589     "movq 16(%%rdx), %%r13\n"   590     "movq 24(%%rdx), %%r14\n"   592     "movq %%r15, %%rax\n"   595     "movq %%rax, 0(%%rsi)\n"   599     "xorq %%r10, %%r10\n"   601     "movq %%r15, %%rax\n"   607     "movq %%rbx, %%rax\n"   613     "movq %%r8, 8(%%rsi)\n"   616     "movq %%r15, %%rax\n"   619     "adcq %%rdx, %%r10\n"   622     "movq %%rbx, %%rax\n"   625     "adcq %%rdx, %%r10\n"   628     "movq %%rcx, %%rax\n"   631     "adcq %%rdx, %%r10\n"   634     "movq %%r9, 16(%%rsi)\n"   637     "movq %%r15, %%rax\n"   639     "addq %%rax, %%r10\n"   643     "movq 24(%%rdi), %%r15\n"   645     "movq %%rbx, %%rax\n"   647     "addq %%rax, %%r10\n"   651     "movq %%rcx, %%rax\n"   653     "addq %%rax, %%r10\n"   657     "movq %%r15, %%rax\n"   659     "addq %%rax, %%r10\n"   663     "movq %%r10, 24(%%rsi)\n"   664     "xorq %%r10, %%r10\n"   666     "movq %%rbx, %%rax\n"   672     "movq %%rcx, %%rax\n"   678     "movq %%r15, %%rax\n"   684     "movq %%r8, 32(%%rsi)\n"   687     "movq %%rcx, %%rax\n"   690     "adcq %%rdx, %%r10\n"   693     "movq %%r15, %%rax\n"   696     "adcq %%rdx, %%r10\n"   699     "movq %%r9, 40(%%rsi)\n"   701     "movq %%r15, %%rax\n"   703     "addq %%rax, %%r10\n"   706     "movq %%r10, 48(%%rsi)\n"   708     "movq %%r8, 56(%%rsi)\n"   711     : 
"rax", 
"rbx", 
"rcx", 
"r8", 
"r9", 
"r10", 
"r11", 
"r12", 
"r13", 
"r14", 
"r15", 
"cc", 
"memory");
   714     uint64_t c0 = 0, c1 = 0;
   746 static void secp256k1_scalar_sqr_512(uint64_t l[8], 
const secp256k1_scalar *a) {
   747 #ifdef USE_ASM_X86_64   748     __asm__ __volatile__(
   750     "movq 0(%%rdi), %%r11\n"   751     "movq 8(%%rdi), %%r12\n"   752     "movq 16(%%rdi), %%r13\n"   753     "movq 24(%%rdi), %%r14\n"   755     "movq %%r11, %%rax\n"   758     "movq %%rax, 0(%%rsi)\n"   762     "xorq %%r10, %%r10\n"   764     "movq %%r11, %%rax\n"   773     "movq %%r8, 8(%%rsi)\n"   776     "movq %%r11, %%rax\n"   779     "adcq %%rdx, %%r10\n"   782     "adcq %%rdx, %%r10\n"   785     "movq %%r12, %%rax\n"   788     "adcq %%rdx, %%r10\n"   791     "movq %%r9, 16(%%rsi)\n"   794     "movq %%r11, %%rax\n"   796     "addq %%rax, %%r10\n"   799     "addq %%rax, %%r10\n"   803     "movq %%r12, %%rax\n"   805     "addq %%rax, %%r10\n"   808     "addq %%rax, %%r10\n"   812     "movq %%r10, 24(%%rsi)\n"   813     "xorq %%r10, %%r10\n"   815     "movq %%r12, %%rax\n"   824     "movq %%r13, %%rax\n"   830     "movq %%r8, 32(%%rsi)\n"   833     "movq %%r13, %%rax\n"   836     "adcq %%rdx, %%r10\n"   839     "adcq %%rdx, %%r10\n"   842     "movq %%r9, 40(%%rsi)\n"   844     "movq %%r14, %%rax\n"   846     "addq %%rax, %%r10\n"   849     "movq %%r10, 48(%%rsi)\n"   851     "movq %%r8, 56(%%rsi)\n"   854     : 
"rax", 
"rdx", 
"r8", 
"r9", 
"r10", 
"r11", 
"r12", 
"r13", 
"r14", 
"cc", 
"memory");
   857     uint64_t c0 = 0, c1 = 0;
   893     secp256k1_scalar_mul_512(l, a, b);
   894     secp256k1_scalar_reduce_512(r, l);
   901     ret = r->
d[0] & ((1 << n) - 1);
   902     r->
d[0] = (r->
d[0] >> n) + (r->
d[1] << (64 - n));
   903     r->
d[1] = (r->
d[1] >> n) + (r->
d[2] << (64 - n));
   904     r->
d[2] = (r->
d[2] >> n) + (r->
d[3] << (64 - n));
   905     r->
d[3] = (r->
d[3] >> n);
   911     secp256k1_scalar_sqr_512(l, a);
   912     secp256k1_scalar_reduce_512(r, l);
   915 #ifdef USE_ENDOMORPHISM   929     return ((a->
d[0] ^ b->
d[0]) | (a->
d[1] ^ b->
d[1]) | (a->
d[2] ^ b->
d[2]) | (a->
d[3] ^ b->
d[3])) == 0;
   934     unsigned int shiftlimbs;
   935     unsigned int shiftlow;
   936     unsigned int shifthigh;
   938     secp256k1_scalar_mul_512(l, a, b);
   939     shiftlimbs = shift >> 6;
   940     shiftlow = shift & 0x3F;
   941     shifthigh = 64 - shiftlow;
   942     r->
d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
   943     r->
d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
   944     r->
d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
   945     r->
d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
   946     secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
 #define VERIFY_CHECK(cond)
 
#define extract_fast(n)
Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. 
 
UniValue ret(UniValue::VARR)
 
#define sumadd(a)
Add a to the number defined by (c0,c1,c2). 
 
#define sumadd_fast(a)
Add a to the number defined by (c0,c1). 
 
A scalar modulo the group order of the secp256k1 curve. 
 
#define extract(n)
Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. 
 
#define muladd2(a, b)
Add 2*a*b to the number defined by (c0,c1,c2). 
 
#define muladd_fast(a, b)
Add a*b to the number defined by (c0,c1). 
 
#define muladd(a, b)
Add a*b to the number defined by (c0,c1,c2).