diff --git a/ecc/bls12-377/fp/element.go b/ecc/bls12-377/fp/element.go index 3e5ae1887c..512029291b 100644 --- a/ecc/bls12-377/fp/element.go +++ b/ecc/bls12-377/fp/element.go @@ -79,9 +79,6 @@ var qElement = Element{ qElementWord5, } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = 9586122913090633727 - // rSquare var rSquare = Element{ 13224372171368877346, @@ -99,7 +96,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177", 10) + // base10: 258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177 + _modulus.SetString("1ae3a4617c510eac63b05c06ca1493b1a22d9f300f5138f1ef3622fba094800170b5d44300000008508c00000000001", 16) } // NewElement returns a new Element from a uint64 value @@ -353,7 +351,7 @@ func (z *Element) SetRandom() (*Element, error) { z[5] = binary.BigEndian.Uint64(bytes[40:48]) z[5] %= 121098312706494698 - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { var b uint64 @@ -377,10 +375,10 @@ func One() Element { // Halve sets z to z / 2 (mod p) func (z *Element) Halve() { - if z[0]&1 == 1 { + var carry uint64 + if z[0]&1 == 1 { // z = z + q - var carry uint64 z[0], carry = bits.Add64(z[0], 9586122913090633729, 0) z[1], carry = bits.Add64(z[1], 1660523435060625408, carry) z[2], carry = bits.Add64(z[2], 2230234197602682880, carry) @@ -389,9 +387,7 @@ func (z *Element) Halve() { z[5], _ = bits.Add64(z[5], 121098312706494698, carry) } - // z = z >> 1 - z[0] = z[0]>>1 | z[1]<<63 z[1] = z[1]>>1 | z[2]<<63 z[2] = z[2]>>1 | z[3]<<63 @@ -401,8 +397,6 @@ func (z *Element) Halve() { } -// API with assembly impl - // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *Element) Mul(x, y *Element) *Element { @@ -570,7 +564,7 @@ func _mulGeneric(z, x, y *Element) { z[5], z[4] = madd3(m, 121098312706494698, c[0], c[2], c[1]) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { var b uint64 @@ -581,89 +575,7 @@ func _mulGeneric(z, x, y *Element) { z[4], b = bits.Sub64(z[4], 14284016967150029115, b) z[5], _ = bits.Sub64(z[5], 121098312706494698, b) } -} - -func _mulWGeneric(z, x *Element, y uint64) { - - var t [6]uint64 - { - // round 0 - c1, c0 := bits.Mul64(y, x[0]) - m := c0 * 9586122913090633727 - c2 := madd0(m, 9586122913090633729, c0) - c1, c0 = madd1(y, x[1], c1) - c2, t[0] = madd2(m, 1660523435060625408, c2, c0) - c1, c0 = madd1(y, x[2], c1) - c2, t[1] = madd2(m, 2230234197602682880, c2, c0) - c1, c0 = madd1(y, x[3], c1) - c2, t[2] = madd2(m, 1883307231910630287, c2, c0) - c1, c0 = madd1(y, x[4], c1) - c2, t[3] = madd2(m, 14284016967150029115, c2, c0) - c1, c0 = madd1(y, x[5], c1) - t[5], t[4] = madd3(m, 121098312706494698, c0, c2, c1) - } - { - // round 1 - m := t[0] * 9586122913090633727 - c2 := madd0(m, 9586122913090633729, t[0]) - c2, t[0] = madd2(m, 1660523435060625408, c2, t[1]) - c2, t[1] = madd2(m, 2230234197602682880, c2, t[2]) - c2, t[2] = madd2(m, 1883307231910630287, c2, t[3]) - c2, t[3] = madd2(m, 14284016967150029115, c2, t[4]) - t[5], t[4] = madd2(m, 121098312706494698, t[5], c2) - } - { - // round 2 - m := t[0] * 9586122913090633727 - c2 := madd0(m, 9586122913090633729, t[0]) - c2, t[0] = madd2(m, 1660523435060625408, c2, t[1]) - c2, t[1] = madd2(m, 2230234197602682880, c2, t[2]) - c2, t[2] = madd2(m, 1883307231910630287, c2, t[3]) - c2, t[3] = madd2(m, 14284016967150029115, c2, t[4]) - t[5], t[4] = madd2(m, 121098312706494698, t[5], c2) - } - { - // round 3 - m := t[0] * 9586122913090633727 - c2 := madd0(m, 9586122913090633729, t[0]) - c2, t[0] = madd2(m, 1660523435060625408, c2, t[1]) - c2, t[1] = madd2(m, 2230234197602682880, c2, t[2]) - c2, t[2] = madd2(m, 1883307231910630287, c2, t[3]) - c2, t[3] = madd2(m, 14284016967150029115, c2, t[4]) - t[5], t[4] = madd2(m, 121098312706494698, t[5], c2) - } - { - // round 4 - m := t[0] * 9586122913090633727 - c2 := madd0(m, 9586122913090633729, t[0]) - c2, t[0] = madd2(m, 1660523435060625408, c2, t[1]) - c2, t[1] = madd2(m, 2230234197602682880, c2, t[2]) - c2, t[2] = madd2(m, 1883307231910630287, c2, t[3]) - c2, t[3] = madd2(m, 14284016967150029115, c2, t[4]) - t[5], t[4] = madd2(m, 121098312706494698, t[5], c2) - } - { - // round 5 - m := t[0] * 9586122913090633727 - c2 := madd0(m, 9586122913090633729, t[0]) - c2, z[0] = madd2(m, 1660523435060625408, c2, t[1]) - c2, z[1] = madd2(m, 2230234197602682880, c2, t[2]) - c2, z[2] = madd2(m, 1883307231910630287, c2, t[3]) - c2, z[3] = madd2(m, 14284016967150029115, c2, t[4]) - z[5], z[4] = madd2(m, 121098312706494698, t[5], c2) - } - // if z > q → z -= q - // note: this is NOT constant time - if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { - var b uint64 - z[0], b = bits.Sub64(z[0], 9586122913090633729, 0) - z[1], b = bits.Sub64(z[1], 1660523435060625408, b) - z[2], b = bits.Sub64(z[2], 2230234197602682880, b) - z[3], b = bits.Sub64(z[3], 1883307231910630287, b) - z[4], b = bits.Sub64(z[4], 14284016967150029115, b) - z[5], _ = bits.Sub64(z[5], 121098312706494698, b) - } } func _fromMontGeneric(z *Element) { @@ -736,7 +648,7 @@ func _fromMontGeneric(z *Element) { z[5] = C } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { var b uint64 @@ -759,7 +671,7 @@ func _addGeneric(z, x, y *Element) { z[4], carry = bits.Add64(x[4], y[4], carry) z[5], _ = bits.Add64(x[5], y[5], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { var b uint64 @@ -782,7 +694,7 @@ func _doubleGeneric(z, x *Element) { z[4], carry = bits.Add64(x[4], x[4], carry) z[5], _ = bits.Add64(x[5], x[5], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { var b uint64 @@ -830,7 +742,7 @@ func _negGeneric(z, x *Element) { func _reduceGeneric(z *Element) { - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { var b uint64 @@ -929,18 +841,30 @@ func (z *Element) BitLen() int { return bits.Len64(z[0]) } -// Exp z = x^exponent mod q -func (z *Element) Exp(x Element, exponent *big.Int) *Element { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } @@ -1470,14 +1394,10 @@ func (z *Element) Inverse(x *Element) *Element { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *Element) inverseExp(x *Element) *Element { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) return z.Exp(*x, qMinusTwo) } @@ -1519,6 +1439,8 @@ func (z *Element) linearComb(x *Element, xC int64, y *Element, yC int64) { // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *Element) montReduceSigned(x *Element, xHi uint64) { + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 9586122913090633727 const signBitRemover = ^signBitSelector neg := xHi&signBitSelector != 0 @@ -1612,7 +1534,7 @@ func (z *Element) montReduceSigned(x *Element, xHi uint64) { z[5], z[4] = madd2(m, qElementWord5, t[i+5], C) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { var b uint64 @@ -1670,7 +1592,7 @@ func (z *Element) montReduceSignedSimpleButSlow(x *Element, xHi uint64) { z[4], c = bits.Add64(z[4], 0, c) z[5], _ = bits.Add64(z[5], 0, c) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { var b uint64 @@ -1721,6 +1643,89 @@ func (z *Element) mulWSigned(x *Element, y int64) { } } +func _mulWGeneric(z, x *Element, y uint64) { + + var t [6]uint64 + { + // round 0 + c1, c0 := bits.Mul64(y, x[0]) + m := c0 * 9586122913090633727 + c2 := madd0(m, 9586122913090633729, c0) + c1, c0 = madd1(y, x[1], c1) + c2, t[0] = madd2(m, 1660523435060625408, c2, c0) + c1, c0 = madd1(y, x[2], c1) + c2, t[1] = madd2(m, 2230234197602682880, c2, c0) + c1, c0 = madd1(y, x[3], c1) + c2, t[2] = madd2(m, 1883307231910630287, c2, c0) + c1, c0 = madd1(y, x[4], c1) + c2, t[3] = madd2(m, 14284016967150029115, c2, c0) + c1, c0 = madd1(y, x[5], c1) + t[5], t[4] = madd3(m, 121098312706494698, c0, c2, c1) + } + { + // round 1 + m := t[0] * 9586122913090633727 + c2 := madd0(m, 9586122913090633729, t[0]) + c2, t[0] = madd2(m, 1660523435060625408, c2, t[1]) + c2, t[1] = madd2(m, 2230234197602682880, c2, t[2]) + c2, t[2] = madd2(m, 1883307231910630287, c2, t[3]) + c2, t[3] = madd2(m, 14284016967150029115, c2, t[4]) + t[5], t[4] = madd2(m, 121098312706494698, t[5], c2) + } + { + // round 2 + m := t[0] * 9586122913090633727 + c2 := madd0(m, 9586122913090633729, t[0]) + c2, t[0] = madd2(m, 1660523435060625408, c2, t[1]) + c2, t[1] = madd2(m, 2230234197602682880, c2, t[2]) + c2, t[2] = madd2(m, 1883307231910630287, c2, t[3]) + c2, t[3] = madd2(m, 14284016967150029115, c2, t[4]) + t[5], t[4] = madd2(m, 121098312706494698, t[5], c2) + } + { + // round 3 + m := t[0] * 9586122913090633727 + c2 := madd0(m, 9586122913090633729, t[0]) + c2, t[0] = madd2(m, 1660523435060625408, c2, t[1]) + c2, t[1] = madd2(m, 2230234197602682880, c2, t[2]) + c2, t[2] = madd2(m, 1883307231910630287, c2, t[3]) + c2, t[3] = madd2(m, 14284016967150029115, c2, t[4]) + t[5], t[4] = madd2(m, 121098312706494698, t[5], c2) + } + { + // round 4 + m := t[0] * 9586122913090633727 + c2 := madd0(m, 9586122913090633729, t[0]) + c2, t[0] = madd2(m, 1660523435060625408, c2, t[1]) + c2, t[1] = madd2(m, 2230234197602682880, c2, t[2]) + c2, t[2] = madd2(m, 1883307231910630287, c2, t[3]) + c2, t[3] = madd2(m, 14284016967150029115, c2, t[4]) + t[5], t[4] = madd2(m, 121098312706494698, t[5], c2) + } + { + // round 5 + m := t[0] * 9586122913090633727 + c2 := madd0(m, 9586122913090633729, t[0]) + c2, z[0] = madd2(m, 1660523435060625408, c2, t[1]) + c2, z[1] = madd2(m, 2230234197602682880, c2, t[2]) + c2, z[2] = madd2(m, 1883307231910630287, c2, t[3]) + c2, z[3] = madd2(m, 14284016967150029115, c2, t[4]) + z[5], z[4] = madd2(m, 121098312706494698, t[5], c2) + } + + // if z >= q → z -= q + // note: this is NOT constant time + if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { + var b uint64 + z[0], b = bits.Sub64(z[0], 9586122913090633729, 0) + z[1], b = bits.Sub64(z[1], 1660523435060625408, b) + z[2], b = bits.Sub64(z[2], 2230234197602682880, b) + z[3], b = bits.Sub64(z[3], 1883307231910630287, b) + z[4], b = bits.Sub64(z[4], 14284016967150029115, b) + z[5], _ = bits.Sub64(z[5], 121098312706494698, b) + } +} + func (z *Element) neg(x *Element, xHi uint64) uint64 { var b uint64 diff --git a/ecc/bls12-377/fp/element_ops_noasm.go b/ecc/bls12-377/fp/element_ops_noasm.go index fec6289183..7d02396a3f 100644 --- a/ecc/bls12-377/fp/element_ops_noasm.go +++ b/ecc/bls12-377/fp/element_ops_noasm.go @@ -46,7 +46,6 @@ func MulBy13(x *Element) { func Butterfly(a, b *Element) { _butterflyGeneric(a, b) } - func mul(z, x, y *Element) { _mulGeneric(z, x, y) } diff --git a/ecc/bls12-377/fp/element_test.go b/ecc/bls12-377/fp/element_test.go index 1ce4114510..5a86d471eb 100644 --- a/ecc/bls12-377/fp/element_test.go +++ b/ecc/bls12-377/fp/element_test.go @@ -22,10 +22,12 @@ import ( "fmt" "math/big" "math/bits" + + "github.com/consensys/gnark-crypto/field" mrand "math/rand" + "testing" - "github.com/consensys/gnark-crypto/field" "github.com/leanovate/gopter" ggen "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" @@ -324,7 +326,6 @@ func init() { a[5]-- staticTestValues = append(staticTestValues, a) } - { a := qElement a[5]-- @@ -338,6 +339,12 @@ func init() { staticTestValues = append(staticTestValues, a) } + { + a := qElement + a[5] = 0 + staticTestValues = append(staticTestValues, a) + } + } func TestElementReduce(t *testing.T) { @@ -475,7 +482,6 @@ func TestElementInverseExp(t *testing.T) { properties = gopter.NewProperties(parameters) properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly if supportAdx { t.Log("disabling ADX") @@ -1422,8 +1428,8 @@ func TestElementSquare(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1501,8 +1507,8 @@ func TestElementInverse(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1580,8 +1586,8 @@ func TestElementSqrt(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1676,8 +1682,8 @@ func TestElementDouble(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1772,8 +1778,8 @@ func TestElementNeg(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -2108,6 +2114,134 @@ func TestElementSetInterface(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + func TestElementFromMont(t *testing.T) { t.Parallel() @@ -2314,8 +2448,6 @@ func genFull() gopter.Gen { } } -// Some utils - func (z *Element) matchVeryBigInt(aHi uint64, aInt *big.Int) error { var modulus big.Int var aIntMod big.Int diff --git a/ecc/bls12-377/fr/element.go b/ecc/bls12-377/fr/element.go index 8ac0b0de97..e78bf1c114 100644 --- a/ecc/bls12-377/fr/element.go +++ b/ecc/bls12-377/fr/element.go @@ -75,9 +75,6 @@ var qElement = Element{ qElementWord3, } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = 725501752471715839 - // rSquare var rSquare = Element{ 2726216793283724667, @@ -93,7 +90,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("8444461749428370424248824938781546531375899335154063827935233455917409239041", 10) + // base10: 8444461749428370424248824938781546531375899335154063827935233455917409239041 + _modulus.SetString("12ab655e9a2ca55660b44d1e5c37b00159aa76fed00000010a11800000000001", 16) } // NewElement returns a new Element from a uint64 value @@ -327,7 +325,7 @@ func (z *Element) SetRandom() (*Element, error) { z[3] = binary.BigEndian.Uint64(bytes[24:32]) z[3] %= 1345280370688173398 - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 1345280370688173398 || (z[3] == 1345280370688173398 && (z[2] < 6968279316240510977 || (z[2] == 6968279316240510977 && (z[1] < 6461107452199829505 || (z[1] == 6461107452199829505 && (z[0] < 725501752471715841))))))) { var b uint64 @@ -349,19 +347,17 @@ func One() Element { // Halve sets z to z / 2 (mod p) func (z *Element) Halve() { - if z[0]&1 == 1 { + var carry uint64 + if z[0]&1 == 1 { // z = z + q - var carry uint64 z[0], carry = bits.Add64(z[0], 725501752471715841, 0) z[1], carry = bits.Add64(z[1], 6461107452199829505, carry) z[2], carry = bits.Add64(z[2], 6968279316240510977, carry) z[3], _ = bits.Add64(z[3], 1345280370688173398, carry) } - // z = z >> 1 - z[0] = z[0]>>1 | z[1]<<63 z[1] = z[1]>>1 | z[2]<<63 z[2] = z[2]>>1 | z[3]<<63 @@ -369,8 +365,6 @@ func (z *Element) Halve() { } -// API with assembly impl - // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *Element) Mul(x, y *Element) *Element { @@ -486,7 +480,7 @@ func _mulGeneric(z, x, y *Element) { z[3], z[2] = madd3(m, 1345280370688173398, c[0], c[2], c[1]) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 1345280370688173398 || (z[3] == 1345280370688173398 && (z[2] < 6968279316240510977 || (z[2] == 6968279316240510977 && (z[1] < 6461107452199829505 || (z[1] == 6461107452199829505 && (z[0] < 725501752471715841))))))) { var b uint64 @@ -495,57 +489,7 @@ func _mulGeneric(z, x, y *Element) { z[2], b = bits.Sub64(z[2], 6968279316240510977, b) z[3], _ = bits.Sub64(z[3], 1345280370688173398, b) } -} - -func _mulWGeneric(z, x *Element, y uint64) { - - var t [4]uint64 - { - // round 0 - c1, c0 := bits.Mul64(y, x[0]) - m := c0 * 725501752471715839 - c2 := madd0(m, 725501752471715841, c0) - c1, c0 = madd1(y, x[1], c1) - c2, t[0] = madd2(m, 6461107452199829505, c2, c0) - c1, c0 = madd1(y, x[2], c1) - c2, t[1] = madd2(m, 6968279316240510977, c2, c0) - c1, c0 = madd1(y, x[3], c1) - t[3], t[2] = madd3(m, 1345280370688173398, c0, c2, c1) - } - { - // round 1 - m := t[0] * 725501752471715839 - c2 := madd0(m, 725501752471715841, t[0]) - c2, t[0] = madd2(m, 6461107452199829505, c2, t[1]) - c2, t[1] = madd2(m, 6968279316240510977, c2, t[2]) - t[3], t[2] = madd2(m, 1345280370688173398, t[3], c2) - } - { - // round 2 - m := t[0] * 725501752471715839 - c2 := madd0(m, 725501752471715841, t[0]) - c2, t[0] = madd2(m, 6461107452199829505, c2, t[1]) - c2, t[1] = madd2(m, 6968279316240510977, c2, t[2]) - t[3], t[2] = madd2(m, 1345280370688173398, t[3], c2) - } - { - // round 3 - m := t[0] * 725501752471715839 - c2 := madd0(m, 725501752471715841, t[0]) - c2, z[0] = madd2(m, 6461107452199829505, c2, t[1]) - c2, z[1] = madd2(m, 6968279316240510977, c2, t[2]) - z[3], z[2] = madd2(m, 1345280370688173398, t[3], c2) - } - // if z > q → z -= q - // note: this is NOT constant time - if !(z[3] < 1345280370688173398 || (z[3] == 1345280370688173398 && (z[2] < 6968279316240510977 || (z[2] == 6968279316240510977 && (z[1] < 6461107452199829505 || (z[1] == 6461107452199829505 && (z[0] < 725501752471715841))))))) { - var b uint64 - z[0], b = bits.Sub64(z[0], 725501752471715841, 0) - z[1], b = bits.Sub64(z[1], 6461107452199829505, b) - z[2], b = bits.Sub64(z[2], 6968279316240510977, b) - z[3], _ = bits.Sub64(z[3], 1345280370688173398, b) - } } func _fromMontGeneric(z *Element) { @@ -588,7 +532,7 @@ func _fromMontGeneric(z *Element) { z[3] = C } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 1345280370688173398 || (z[3] == 1345280370688173398 && (z[2] < 6968279316240510977 || (z[2] == 6968279316240510977 && (z[1] < 6461107452199829505 || (z[1] == 6461107452199829505 && (z[0] < 725501752471715841))))))) { var b uint64 @@ -607,7 +551,7 @@ func _addGeneric(z, x, y *Element) { z[2], carry = bits.Add64(x[2], y[2], carry) z[3], _ = bits.Add64(x[3], y[3], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 1345280370688173398 || (z[3] == 1345280370688173398 && (z[2] < 6968279316240510977 || (z[2] == 6968279316240510977 && (z[1] < 6461107452199829505 || (z[1] == 6461107452199829505 && (z[0] < 725501752471715841))))))) { var b uint64 @@ -626,7 +570,7 @@ func _doubleGeneric(z, x *Element) { z[2], carry = bits.Add64(x[2], x[2], carry) z[3], _ = bits.Add64(x[3], x[3], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 1345280370688173398 || (z[3] == 1345280370688173398 && (z[2] < 6968279316240510977 || (z[2] == 6968279316240510977 && (z[1] < 6461107452199829505 || (z[1] == 6461107452199829505 && (z[0] < 725501752471715841))))))) { var b uint64 @@ -666,7 +610,7 @@ func _negGeneric(z, x *Element) { func _reduceGeneric(z *Element) { - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 1345280370688173398 || (z[3] == 1345280370688173398 && (z[2] < 6968279316240510977 || (z[2] == 6968279316240510977 && (z[1] < 6461107452199829505 || (z[1] == 6461107452199829505 && (z[0] < 725501752471715841))))))) { var b uint64 @@ -757,18 +701,30 @@ func (z *Element) BitLen() int { return bits.Len64(z[0]) } -// Exp z = x^exponent mod q -func (z *Element) Exp(x Element, exponent *big.Int) *Element { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } @@ -1282,14 +1238,10 @@ func (z *Element) Inverse(x *Element) *Element { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *Element) inverseExp(x *Element) *Element { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) return z.Exp(*x, qMinusTwo) } @@ -1331,6 +1283,8 @@ func (z *Element) linearComb(x *Element, xC int64, y *Element, yC int64) { // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *Element) montReduceSigned(x *Element, xHi uint64) { + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 725501752471715839 const signBitRemover = ^signBitSelector neg := xHi&signBitSelector != 0 @@ -1390,7 +1344,7 @@ func (z *Element) montReduceSigned(x *Element, xHi uint64) { z[3], z[2] = madd2(m, qElementWord3, t[i+3], C) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 1345280370688173398 || (z[3] == 1345280370688173398 && (z[2] < 6968279316240510977 || (z[2] == 6968279316240510977 && (z[1] < 6461107452199829505 || (z[1] == 6461107452199829505 && (z[0] < 725501752471715841))))))) { var b uint64 @@ -1440,7 +1394,7 @@ func (z *Element) montReduceSignedSimpleButSlow(x *Element, xHi uint64) { z[2], c = bits.Add64(z[2], 0, c) z[3], _ = bits.Add64(z[3], 0, c) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 1345280370688173398 || (z[3] == 1345280370688173398 && (z[2] < 6968279316240510977 || (z[2] == 6968279316240510977 && (z[1] < 6461107452199829505 || (z[1] == 6461107452199829505 && (z[0] < 725501752471715841))))))) { var b uint64 @@ -1485,6 +1439,57 @@ func (z *Element) mulWSigned(x *Element, y int64) { } } +func _mulWGeneric(z, x *Element, y uint64) { + + var t [4]uint64 + { + // round 0 + c1, c0 := bits.Mul64(y, x[0]) + m := c0 * 725501752471715839 + c2 := madd0(m, 725501752471715841, c0) + c1, c0 = madd1(y, x[1], c1) + c2, t[0] = madd2(m, 6461107452199829505, c2, c0) + c1, c0 = madd1(y, x[2], c1) + c2, t[1] = madd2(m, 6968279316240510977, c2, c0) + c1, c0 = madd1(y, x[3], c1) + t[3], t[2] = madd3(m, 1345280370688173398, c0, c2, c1) + } + { + // round 1 + m := t[0] * 725501752471715839 + c2 := madd0(m, 725501752471715841, t[0]) + c2, t[0] = madd2(m, 6461107452199829505, c2, t[1]) + c2, t[1] = madd2(m, 6968279316240510977, c2, t[2]) + t[3], t[2] = madd2(m, 1345280370688173398, t[3], c2) + } + { + // round 2 + m := t[0] * 725501752471715839 + c2 := madd0(m, 725501752471715841, t[0]) + c2, t[0] = madd2(m, 6461107452199829505, c2, t[1]) + c2, t[1] = madd2(m, 6968279316240510977, c2, t[2]) + t[3], t[2] = madd2(m, 1345280370688173398, t[3], c2) + } + { + // round 3 + m := t[0] * 725501752471715839 + c2 := madd0(m, 725501752471715841, t[0]) + c2, z[0] = madd2(m, 6461107452199829505, c2, t[1]) + c2, z[1] = madd2(m, 6968279316240510977, c2, t[2]) + z[3], z[2] = madd2(m, 1345280370688173398, t[3], c2) + } + + // if z >= q → z -= q + // note: this is NOT constant time + if !(z[3] < 1345280370688173398 || (z[3] == 1345280370688173398 && (z[2] < 6968279316240510977 || (z[2] == 6968279316240510977 && (z[1] < 6461107452199829505 || (z[1] == 6461107452199829505 && (z[0] < 725501752471715841))))))) { + var b uint64 + z[0], b = bits.Sub64(z[0], 725501752471715841, 0) + z[1], b = bits.Sub64(z[1], 6461107452199829505, b) + z[2], b = bits.Sub64(z[2], 6968279316240510977, b) + z[3], _ = bits.Sub64(z[3], 1345280370688173398, b) + } +} + func (z *Element) neg(x *Element, xHi uint64) uint64 { var b uint64 diff --git a/ecc/bls12-377/fr/element_ops_noasm.go b/ecc/bls12-377/fr/element_ops_noasm.go index ec1fac18d6..7891ac9cd3 100644 --- a/ecc/bls12-377/fr/element_ops_noasm.go +++ b/ecc/bls12-377/fr/element_ops_noasm.go @@ -46,7 +46,6 @@ func MulBy13(x *Element) { func Butterfly(a, b *Element) { _butterflyGeneric(a, b) } - func mul(z, x, y *Element) { _mulGeneric(z, x, y) } diff --git a/ecc/bls12-377/fr/element_test.go b/ecc/bls12-377/fr/element_test.go index eb8cb933c1..bee69cc461 100644 --- a/ecc/bls12-377/fr/element_test.go +++ b/ecc/bls12-377/fr/element_test.go @@ -22,10 +22,12 @@ import ( "fmt" "math/big" "math/bits" + + "github.com/consensys/gnark-crypto/field" mrand "math/rand" + "testing" - "github.com/consensys/gnark-crypto/field" "github.com/leanovate/gopter" ggen "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" @@ -320,7 +322,6 @@ func init() { a[3]-- staticTestValues = append(staticTestValues, a) } - { a := qElement a[3]-- @@ -334,6 +335,12 @@ func init() { staticTestValues = append(staticTestValues, a) } + { + a := qElement + a[3] = 0 + staticTestValues = append(staticTestValues, a) + } + } func TestElementReduce(t *testing.T) { @@ -471,7 +478,6 @@ func TestElementInverseExp(t *testing.T) { properties = gopter.NewProperties(parameters) properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly if supportAdx { t.Log("disabling ADX") @@ -1418,8 +1424,8 @@ func TestElementSquare(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1497,8 +1503,8 @@ func TestElementInverse(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1576,8 +1582,8 @@ func TestElementSqrt(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1672,8 +1678,8 @@ func TestElementDouble(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1768,8 +1774,8 @@ func TestElementNeg(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -2104,6 +2110,134 @@ func TestElementSetInterface(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + func TestElementFromMont(t *testing.T) { t.Parallel() @@ -2286,8 +2420,6 @@ func genFull() gopter.Gen { } } -// Some utils - func (z *Element) matchVeryBigInt(aHi uint64, aInt *big.Int) error { var modulus big.Int var aIntMod big.Int diff --git a/ecc/bls12-377/fr/mimc/mimc.go b/ecc/bls12-377/fr/mimc/mimc.go index 2fd03ea0c1..17ebc3012b 100644 --- a/ecc/bls12-377/fr/mimc/mimc.go +++ b/ecc/bls12-377/fr/mimc/mimc.go @@ -64,7 +64,7 @@ func NewMiMC() hash.Hash { // Reset resets the Hash to its initial state. func (d *digest) Reset() { d.data = nil - d.h = fr.Element{0, 0, 0, 0} + d.h.SetZero() } // Sum appends the current hash to b and returns the resulting slice. diff --git a/ecc/bls12-378/fp/element.go b/ecc/bls12-378/fp/element.go index 606c11e56c..9c1511654b 100644 --- a/ecc/bls12-378/fp/element.go +++ b/ecc/bls12-378/fp/element.go @@ -79,9 +79,6 @@ var qElement = Element{ qElementWord5, } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = 11045256207009841151 - // rSquare var rSquare = Element{ 13541478318970833666, @@ -99,7 +96,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("605248206075306171733248481581800960739847691770924913753520744034740935903401304776283802348837311170974282940417", 10) + // base10: 605248206075306171733248481581800960739847691770924913753520744034740935903401304776283802348837311170974282940417 + _modulus.SetString("3eeb0416684d19053cb5d240ed107a284059eb647102326980dc360d0a49d7fce97f76a822c00009948a20000000001", 16) } // NewElement returns a new Element from a uint64 value @@ -353,7 +351,7 @@ func (z *Element) SetRandom() (*Element, error) { z[5] = binary.BigEndian.Uint64(bytes[40:48]) z[5] %= 283357621510263184 - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { var b uint64 @@ -377,10 +375,10 @@ func One() Element { // Halve sets z to z / 2 (mod p) func (z *Element) Halve() { - if z[0]&1 == 1 { + var carry uint64 + if z[0]&1 == 1 { // z = z + q - var carry uint64 z[0], carry = bits.Add64(z[0], 11045256207009841153, 0) z[1], carry = bits.Add64(z[1], 14886639130118979584, carry) z[2], carry = bits.Add64(z[2], 10956628289047010687, carry) @@ -389,9 +387,7 @@ func (z *Element) Halve() { z[5], _ = bits.Add64(z[5], 283357621510263184, carry) } - // z = z >> 1 - z[0] = z[0]>>1 | z[1]<<63 z[1] = z[1]>>1 | z[2]<<63 z[2] = z[2]>>1 | z[3]<<63 @@ -401,8 +397,6 @@ func (z *Element) Halve() { } -// API with assembly impl - // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *Element) Mul(x, y *Element) *Element { @@ -570,7 +564,7 @@ func _mulGeneric(z, x, y *Element) { z[5], z[4] = madd3(m, 283357621510263184, c[0], c[2], c[1]) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { var b uint64 @@ -581,89 +575,7 @@ func _mulGeneric(z, x, y *Element) { z[4], b = bits.Sub64(z[4], 6038022134869067682, b) z[5], _ = bits.Sub64(z[5], 283357621510263184, b) } -} - -func _mulWGeneric(z, x *Element, y uint64) { - - var t [6]uint64 - { - // round 0 - c1, c0 := bits.Mul64(y, x[0]) - m := c0 * 11045256207009841151 - c2 := madd0(m, 11045256207009841153, c0) - c1, c0 = madd1(y, x[1], c1) - c2, t[0] = madd2(m, 14886639130118979584, c2, c0) - c1, c0 = madd1(y, x[2], c1) - c2, t[1] = madd2(m, 10956628289047010687, c2, c0) - c1, c0 = madd1(y, x[3], c1) - c2, t[2] = madd2(m, 9513184293603517222, c2, c0) - c1, c0 = madd1(y, x[4], c1) - c2, t[3] = madd2(m, 6038022134869067682, c2, c0) - c1, c0 = madd1(y, x[5], c1) - t[5], t[4] = madd3(m, 283357621510263184, c0, c2, c1) - } - { - // round 1 - m := t[0] * 11045256207009841151 - c2 := madd0(m, 11045256207009841153, t[0]) - c2, t[0] = madd2(m, 14886639130118979584, c2, t[1]) - c2, t[1] = madd2(m, 10956628289047010687, c2, t[2]) - c2, t[2] = madd2(m, 9513184293603517222, c2, t[3]) - c2, t[3] = madd2(m, 6038022134869067682, c2, t[4]) - t[5], t[4] = madd2(m, 283357621510263184, t[5], c2) - } - { - // round 2 - m := t[0] * 11045256207009841151 - c2 := madd0(m, 11045256207009841153, t[0]) - c2, t[0] = madd2(m, 14886639130118979584, c2, t[1]) - c2, t[1] = madd2(m, 10956628289047010687, c2, t[2]) - c2, t[2] = madd2(m, 9513184293603517222, c2, t[3]) - c2, t[3] = madd2(m, 6038022134869067682, c2, t[4]) - t[5], t[4] = madd2(m, 283357621510263184, t[5], c2) - } - { - // round 3 - m := t[0] * 11045256207009841151 - c2 := madd0(m, 11045256207009841153, t[0]) - c2, t[0] = madd2(m, 14886639130118979584, c2, t[1]) - c2, t[1] = madd2(m, 10956628289047010687, c2, t[2]) - c2, t[2] = madd2(m, 9513184293603517222, c2, t[3]) - c2, t[3] = madd2(m, 6038022134869067682, c2, t[4]) - t[5], t[4] = madd2(m, 283357621510263184, t[5], c2) - } - { - // round 4 - m := t[0] * 11045256207009841151 - c2 := madd0(m, 11045256207009841153, t[0]) - c2, t[0] = madd2(m, 14886639130118979584, c2, t[1]) - c2, t[1] = madd2(m, 10956628289047010687, c2, t[2]) - c2, t[2] = madd2(m, 9513184293603517222, c2, t[3]) - c2, t[3] = madd2(m, 6038022134869067682, c2, t[4]) - t[5], t[4] = madd2(m, 283357621510263184, t[5], c2) - } - { - // round 5 - m := t[0] * 11045256207009841151 - c2 := madd0(m, 11045256207009841153, t[0]) - c2, z[0] = madd2(m, 14886639130118979584, c2, t[1]) - c2, z[1] = madd2(m, 10956628289047010687, c2, t[2]) - c2, z[2] = madd2(m, 9513184293603517222, c2, t[3]) - c2, z[3] = madd2(m, 6038022134869067682, c2, t[4]) - z[5], z[4] = madd2(m, 283357621510263184, t[5], c2) - } - // if z > q → z -= q - // note: this is NOT constant time - if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { - var b uint64 - z[0], b = bits.Sub64(z[0], 11045256207009841153, 0) - z[1], b = bits.Sub64(z[1], 14886639130118979584, b) - z[2], b = bits.Sub64(z[2], 10956628289047010687, b) - z[3], b = bits.Sub64(z[3], 9513184293603517222, b) - z[4], b = bits.Sub64(z[4], 6038022134869067682, b) - z[5], _ = bits.Sub64(z[5], 283357621510263184, b) - } } func _fromMontGeneric(z *Element) { @@ -736,7 +648,7 @@ func _fromMontGeneric(z *Element) { z[5] = C } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { var b uint64 @@ -759,7 +671,7 @@ func _addGeneric(z, x, y *Element) { z[4], carry = bits.Add64(x[4], y[4], carry) z[5], _ = bits.Add64(x[5], y[5], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { var b uint64 @@ -782,7 +694,7 @@ func _doubleGeneric(z, x *Element) { z[4], carry = bits.Add64(x[4], x[4], carry) z[5], _ = bits.Add64(x[5], x[5], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { var b uint64 @@ -830,7 +742,7 @@ func _negGeneric(z, x *Element) { func _reduceGeneric(z *Element) { - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { var b uint64 @@ -929,18 +841,30 @@ func (z *Element) BitLen() int { return bits.Len64(z[0]) } -// Exp z = x^exponent mod q -func (z *Element) Exp(x Element, exponent *big.Int) *Element { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } @@ -1470,14 +1394,10 @@ func (z *Element) Inverse(x *Element) *Element { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *Element) inverseExp(x *Element) *Element { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) return z.Exp(*x, qMinusTwo) } @@ -1519,6 +1439,8 @@ func (z *Element) linearComb(x *Element, xC int64, y *Element, yC int64) { // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *Element) montReduceSigned(x *Element, xHi uint64) { + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 11045256207009841151 const signBitRemover = ^signBitSelector neg := xHi&signBitSelector != 0 @@ -1612,7 +1534,7 @@ func (z *Element) montReduceSigned(x *Element, xHi uint64) { z[5], z[4] = madd2(m, qElementWord5, t[i+5], C) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { var b uint64 @@ -1670,7 +1592,7 @@ func (z *Element) montReduceSignedSimpleButSlow(x *Element, xHi uint64) { z[4], c = bits.Add64(z[4], 0, c) z[5], _ = bits.Add64(z[5], 0, c) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { var b uint64 @@ -1721,6 +1643,89 @@ func (z *Element) mulWSigned(x *Element, y int64) { } } +func _mulWGeneric(z, x *Element, y uint64) { + + var t [6]uint64 + { + // round 0 + c1, c0 := bits.Mul64(y, x[0]) + m := c0 * 11045256207009841151 + c2 := madd0(m, 11045256207009841153, c0) + c1, c0 = madd1(y, x[1], c1) + c2, t[0] = madd2(m, 14886639130118979584, c2, c0) + c1, c0 = madd1(y, x[2], c1) + c2, t[1] = madd2(m, 10956628289047010687, c2, c0) + c1, c0 = madd1(y, x[3], c1) + c2, t[2] = madd2(m, 9513184293603517222, c2, c0) + c1, c0 = madd1(y, x[4], c1) + c2, t[3] = madd2(m, 6038022134869067682, c2, c0) + c1, c0 = madd1(y, x[5], c1) + t[5], t[4] = madd3(m, 283357621510263184, c0, c2, c1) + } + { + // round 1 + m := t[0] * 11045256207009841151 + c2 := madd0(m, 11045256207009841153, t[0]) + c2, t[0] = madd2(m, 14886639130118979584, c2, t[1]) + c2, t[1] = madd2(m, 10956628289047010687, c2, t[2]) + c2, t[2] = madd2(m, 9513184293603517222, c2, t[3]) + c2, t[3] = madd2(m, 6038022134869067682, c2, t[4]) + t[5], t[4] = madd2(m, 283357621510263184, t[5], c2) + } + { + // round 2 + m := t[0] * 11045256207009841151 + c2 := madd0(m, 11045256207009841153, t[0]) + c2, t[0] = madd2(m, 14886639130118979584, c2, t[1]) + c2, t[1] = madd2(m, 10956628289047010687, c2, t[2]) + c2, t[2] = madd2(m, 9513184293603517222, c2, t[3]) + c2, t[3] = madd2(m, 6038022134869067682, c2, t[4]) + t[5], t[4] = madd2(m, 283357621510263184, t[5], c2) + } + { + // round 3 + m := t[0] * 11045256207009841151 + c2 := madd0(m, 11045256207009841153, t[0]) + c2, t[0] = madd2(m, 14886639130118979584, c2, t[1]) + c2, t[1] = madd2(m, 10956628289047010687, c2, t[2]) + c2, t[2] = madd2(m, 9513184293603517222, c2, t[3]) + c2, t[3] = madd2(m, 6038022134869067682, c2, t[4]) + t[5], t[4] = madd2(m, 283357621510263184, t[5], c2) + } + { + // round 4 + m := t[0] * 11045256207009841151 + c2 := madd0(m, 11045256207009841153, t[0]) + c2, t[0] = madd2(m, 14886639130118979584, c2, t[1]) + c2, t[1] = madd2(m, 10956628289047010687, c2, t[2]) + c2, t[2] = madd2(m, 9513184293603517222, c2, t[3]) + c2, t[3] = madd2(m, 6038022134869067682, c2, t[4]) + t[5], t[4] = madd2(m, 283357621510263184, t[5], c2) + } + { + // round 5 + m := t[0] * 11045256207009841151 + c2 := madd0(m, 11045256207009841153, t[0]) + c2, z[0] = madd2(m, 14886639130118979584, c2, t[1]) + c2, z[1] = madd2(m, 10956628289047010687, c2, t[2]) + c2, z[2] = madd2(m, 9513184293603517222, c2, t[3]) + c2, z[3] = madd2(m, 6038022134869067682, c2, t[4]) + z[5], z[4] = madd2(m, 283357621510263184, t[5], c2) + } + + // if z >= q → z -= q + // note: this is NOT constant time + if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { + var b uint64 + z[0], b = bits.Sub64(z[0], 11045256207009841153, 0) + z[1], b = bits.Sub64(z[1], 14886639130118979584, b) + z[2], b = bits.Sub64(z[2], 10956628289047010687, b) + z[3], b = bits.Sub64(z[3], 9513184293603517222, b) + z[4], b = bits.Sub64(z[4], 6038022134869067682, b) + z[5], _ = bits.Sub64(z[5], 283357621510263184, b) + } +} + func (z *Element) neg(x *Element, xHi uint64) uint64 { var b uint64 diff --git a/ecc/bls12-378/fp/element_ops_noasm.go b/ecc/bls12-378/fp/element_ops_noasm.go index fec6289183..7d02396a3f 100644 --- a/ecc/bls12-378/fp/element_ops_noasm.go +++ b/ecc/bls12-378/fp/element_ops_noasm.go @@ -46,7 +46,6 @@ func MulBy13(x *Element) { func Butterfly(a, b *Element) { _butterflyGeneric(a, b) } - func mul(z, x, y *Element) { _mulGeneric(z, x, y) } diff --git a/ecc/bls12-378/fp/element_test.go b/ecc/bls12-378/fp/element_test.go index b8c990baee..23c644212a 100644 --- a/ecc/bls12-378/fp/element_test.go +++ b/ecc/bls12-378/fp/element_test.go @@ -22,10 +22,12 @@ import ( "fmt" "math/big" "math/bits" + + "github.com/consensys/gnark-crypto/field" mrand "math/rand" + "testing" - "github.com/consensys/gnark-crypto/field" "github.com/leanovate/gopter" ggen "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" @@ -324,7 +326,6 @@ func init() { a[5]-- staticTestValues = append(staticTestValues, a) } - { a := qElement a[5]-- @@ -338,6 +339,12 @@ func init() { staticTestValues = append(staticTestValues, a) } + { + a := qElement + a[5] = 0 + staticTestValues = append(staticTestValues, a) + } + } func TestElementReduce(t *testing.T) { @@ -475,7 +482,6 @@ func TestElementInverseExp(t *testing.T) { properties = gopter.NewProperties(parameters) properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly if supportAdx { t.Log("disabling ADX") @@ -1422,8 +1428,8 @@ func TestElementSquare(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1501,8 +1507,8 @@ func TestElementInverse(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1580,8 +1586,8 @@ func TestElementSqrt(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1676,8 +1682,8 @@ func TestElementDouble(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1772,8 +1778,8 @@ func TestElementNeg(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -2108,6 +2114,134 @@ func TestElementSetInterface(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + func TestElementFromMont(t *testing.T) { t.Parallel() @@ -2314,8 +2448,6 @@ func genFull() gopter.Gen { } } -// Some utils - func (z *Element) matchVeryBigInt(aHi uint64, aInt *big.Int) error { var modulus big.Int var aIntMod big.Int diff --git a/ecc/bls12-378/fr/element.go b/ecc/bls12-378/fr/element.go index f81a0240fe..8740f0369e 100644 --- a/ecc/bls12-378/fr/element.go +++ b/ecc/bls12-378/fr/element.go @@ -75,9 +75,6 @@ var qElement = Element{ qElementWord3, } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = 3643768340310130687 - // rSquare var rSquare = Element{ 1260465344847950704, @@ -93,7 +90,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("14883435066912132899950318861128167269793560281114003360875131245101026639873", 10) + // base10: 14883435066912132899950318861128167269793560281114003360875131245101026639873 + _modulus.SetString("20e7b9c8ef7b2eb187787fb4e3dbb0ffeae77f3da09400013291440000000001", 16) } // NewElement returns a new Element from a uint64 value @@ -327,7 +325,7 @@ func (z *Element) SetRandom() (*Element, error) { z[3] = binary.BigEndian.Uint64(bytes[24:32]) z[3] %= 2371068001496280753 - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 2371068001496280753 || (z[3] == 2371068001496280753 && (z[2] < 9761692607219216639 || (z[2] == 9761692607219216639 && (z[1] < 16926637627159085057 || (z[1] == 16926637627159085057 && (z[0] < 3643768340310130689))))))) { var b uint64 @@ -349,19 +347,17 @@ func One() Element { // Halve sets z to z / 2 (mod p) func (z *Element) Halve() { - if z[0]&1 == 1 { + var carry uint64 + if z[0]&1 == 1 { // z = z + q - var carry uint64 z[0], carry = bits.Add64(z[0], 3643768340310130689, 0) z[1], carry = bits.Add64(z[1], 16926637627159085057, carry) z[2], carry = bits.Add64(z[2], 9761692607219216639, carry) z[3], _ = bits.Add64(z[3], 2371068001496280753, carry) } - // z = z >> 1 - z[0] = z[0]>>1 | z[1]<<63 z[1] = z[1]>>1 | z[2]<<63 z[2] = z[2]>>1 | z[3]<<63 @@ -369,8 +365,6 @@ func (z *Element) Halve() { } -// API with assembly impl - // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *Element) Mul(x, y *Element) *Element { @@ -486,7 +480,7 @@ func _mulGeneric(z, x, y *Element) { z[3], z[2] = madd3(m, 2371068001496280753, c[0], c[2], c[1]) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 2371068001496280753 || (z[3] == 2371068001496280753 && (z[2] < 9761692607219216639 || (z[2] == 9761692607219216639 && (z[1] < 16926637627159085057 || (z[1] == 16926637627159085057 && (z[0] < 3643768340310130689))))))) { var b uint64 @@ -495,57 +489,7 @@ func _mulGeneric(z, x, y *Element) { z[2], b = bits.Sub64(z[2], 9761692607219216639, b) z[3], _ = bits.Sub64(z[3], 2371068001496280753, b) } -} - -func _mulWGeneric(z, x *Element, y uint64) { - - var t [4]uint64 - { - // round 0 - c1, c0 := bits.Mul64(y, x[0]) - m := c0 * 3643768340310130687 - c2 := madd0(m, 3643768340310130689, c0) - c1, c0 = madd1(y, x[1], c1) - c2, t[0] = madd2(m, 16926637627159085057, c2, c0) - c1, c0 = madd1(y, x[2], c1) - c2, t[1] = madd2(m, 9761692607219216639, c2, c0) - c1, c0 = madd1(y, x[3], c1) - t[3], t[2] = madd3(m, 2371068001496280753, c0, c2, c1) - } - { - // round 1 - m := t[0] * 3643768340310130687 - c2 := madd0(m, 3643768340310130689, t[0]) - c2, t[0] = madd2(m, 16926637627159085057, c2, t[1]) - c2, t[1] = madd2(m, 9761692607219216639, c2, t[2]) - t[3], t[2] = madd2(m, 2371068001496280753, t[3], c2) - } - { - // round 2 - m := t[0] * 3643768340310130687 - c2 := madd0(m, 3643768340310130689, t[0]) - c2, t[0] = madd2(m, 16926637627159085057, c2, t[1]) - c2, t[1] = madd2(m, 9761692607219216639, c2, t[2]) - t[3], t[2] = madd2(m, 2371068001496280753, t[3], c2) - } - { - // round 3 - m := t[0] * 3643768340310130687 - c2 := madd0(m, 3643768340310130689, t[0]) - c2, z[0] = madd2(m, 16926637627159085057, c2, t[1]) - c2, z[1] = madd2(m, 9761692607219216639, c2, t[2]) - z[3], z[2] = madd2(m, 2371068001496280753, t[3], c2) - } - // if z > q → z -= q - // note: this is NOT constant time - if !(z[3] < 2371068001496280753 || (z[3] == 2371068001496280753 && (z[2] < 9761692607219216639 || (z[2] == 9761692607219216639 && (z[1] < 16926637627159085057 || (z[1] == 16926637627159085057 && (z[0] < 3643768340310130689))))))) { - var b uint64 - z[0], b = bits.Sub64(z[0], 3643768340310130689, 0) - z[1], b = bits.Sub64(z[1], 16926637627159085057, b) - z[2], b = bits.Sub64(z[2], 9761692607219216639, b) - z[3], _ = bits.Sub64(z[3], 2371068001496280753, b) - } } func _fromMontGeneric(z *Element) { @@ -588,7 +532,7 @@ func _fromMontGeneric(z *Element) { z[3] = C } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 2371068001496280753 || (z[3] == 2371068001496280753 && (z[2] < 9761692607219216639 || (z[2] == 9761692607219216639 && (z[1] < 16926637627159085057 || (z[1] == 16926637627159085057 && (z[0] < 3643768340310130689))))))) { var b uint64 @@ -607,7 +551,7 @@ func _addGeneric(z, x, y *Element) { z[2], carry = bits.Add64(x[2], y[2], carry) z[3], _ = bits.Add64(x[3], y[3], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 2371068001496280753 || (z[3] == 2371068001496280753 && (z[2] < 9761692607219216639 || (z[2] == 9761692607219216639 && (z[1] < 16926637627159085057 || (z[1] == 16926637627159085057 && (z[0] < 3643768340310130689))))))) { var b uint64 @@ -626,7 +570,7 @@ func _doubleGeneric(z, x *Element) { z[2], carry = bits.Add64(x[2], x[2], carry) z[3], _ = bits.Add64(x[3], x[3], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 2371068001496280753 || (z[3] == 2371068001496280753 && (z[2] < 9761692607219216639 || (z[2] == 9761692607219216639 && (z[1] < 16926637627159085057 || (z[1] == 16926637627159085057 && (z[0] < 3643768340310130689))))))) { var b uint64 @@ -666,7 +610,7 @@ func _negGeneric(z, x *Element) { func _reduceGeneric(z *Element) { - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 2371068001496280753 || (z[3] == 2371068001496280753 && (z[2] < 9761692607219216639 || (z[2] == 9761692607219216639 && (z[1] < 16926637627159085057 || (z[1] == 16926637627159085057 && (z[0] < 3643768340310130689))))))) { var b uint64 @@ -757,18 +701,30 @@ func (z *Element) BitLen() int { return bits.Len64(z[0]) } -// Exp z = x^exponent mod q -func (z *Element) Exp(x Element, exponent *big.Int) *Element { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } @@ -1282,14 +1238,10 @@ func (z *Element) Inverse(x *Element) *Element { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *Element) inverseExp(x *Element) *Element { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) return z.Exp(*x, qMinusTwo) } @@ -1331,6 +1283,8 @@ func (z *Element) linearComb(x *Element, xC int64, y *Element, yC int64) { // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *Element) montReduceSigned(x *Element, xHi uint64) { + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 3643768340310130687 const signBitRemover = ^signBitSelector neg := xHi&signBitSelector != 0 @@ -1390,7 +1344,7 @@ func (z *Element) montReduceSigned(x *Element, xHi uint64) { z[3], z[2] = madd2(m, qElementWord3, t[i+3], C) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 2371068001496280753 || (z[3] == 2371068001496280753 && (z[2] < 9761692607219216639 || (z[2] == 9761692607219216639 && (z[1] < 16926637627159085057 || (z[1] == 16926637627159085057 && (z[0] < 3643768340310130689))))))) { var b uint64 @@ -1440,7 +1394,7 @@ func (z *Element) montReduceSignedSimpleButSlow(x *Element, xHi uint64) { z[2], c = bits.Add64(z[2], 0, c) z[3], _ = bits.Add64(z[3], 0, c) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 2371068001496280753 || (z[3] == 2371068001496280753 && (z[2] < 9761692607219216639 || (z[2] == 9761692607219216639 && (z[1] < 16926637627159085057 || (z[1] == 16926637627159085057 && (z[0] < 3643768340310130689))))))) { var b uint64 @@ -1485,6 +1439,57 @@ func (z *Element) mulWSigned(x *Element, y int64) { } } +func _mulWGeneric(z, x *Element, y uint64) { + + var t [4]uint64 + { + // round 0 + c1, c0 := bits.Mul64(y, x[0]) + m := c0 * 3643768340310130687 + c2 := madd0(m, 3643768340310130689, c0) + c1, c0 = madd1(y, x[1], c1) + c2, t[0] = madd2(m, 16926637627159085057, c2, c0) + c1, c0 = madd1(y, x[2], c1) + c2, t[1] = madd2(m, 9761692607219216639, c2, c0) + c1, c0 = madd1(y, x[3], c1) + t[3], t[2] = madd3(m, 2371068001496280753, c0, c2, c1) + } + { + // round 1 + m := t[0] * 3643768340310130687 + c2 := madd0(m, 3643768340310130689, t[0]) + c2, t[0] = madd2(m, 16926637627159085057, c2, t[1]) + c2, t[1] = madd2(m, 9761692607219216639, c2, t[2]) + t[3], t[2] = madd2(m, 2371068001496280753, t[3], c2) + } + { + // round 2 + m := t[0] * 3643768340310130687 + c2 := madd0(m, 3643768340310130689, t[0]) + c2, t[0] = madd2(m, 16926637627159085057, c2, t[1]) + c2, t[1] = madd2(m, 9761692607219216639, c2, t[2]) + t[3], t[2] = madd2(m, 2371068001496280753, t[3], c2) + } + { + // round 3 + m := t[0] * 3643768340310130687 + c2 := madd0(m, 3643768340310130689, t[0]) + c2, z[0] = madd2(m, 16926637627159085057, c2, t[1]) + c2, z[1] = madd2(m, 9761692607219216639, c2, t[2]) + z[3], z[2] = madd2(m, 2371068001496280753, t[3], c2) + } + + // if z >= q → z -= q + // note: this is NOT constant time + if !(z[3] < 2371068001496280753 || (z[3] == 2371068001496280753 && (z[2] < 9761692607219216639 || (z[2] == 9761692607219216639 && (z[1] < 16926637627159085057 || (z[1] == 16926637627159085057 && (z[0] < 3643768340310130689))))))) { + var b uint64 + z[0], b = bits.Sub64(z[0], 3643768340310130689, 0) + z[1], b = bits.Sub64(z[1], 16926637627159085057, b) + z[2], b = bits.Sub64(z[2], 9761692607219216639, b) + z[3], _ = bits.Sub64(z[3], 2371068001496280753, b) + } +} + func (z *Element) neg(x *Element, xHi uint64) uint64 { var b uint64 diff --git a/ecc/bls12-378/fr/element_ops_noasm.go b/ecc/bls12-378/fr/element_ops_noasm.go index ec1fac18d6..7891ac9cd3 100644 --- a/ecc/bls12-378/fr/element_ops_noasm.go +++ b/ecc/bls12-378/fr/element_ops_noasm.go @@ -46,7 +46,6 @@ func MulBy13(x *Element) { func Butterfly(a, b *Element) { _butterflyGeneric(a, b) } - func mul(z, x, y *Element) { _mulGeneric(z, x, y) } diff --git a/ecc/bls12-378/fr/element_test.go b/ecc/bls12-378/fr/element_test.go index e31d934264..f9116a5e67 100644 --- a/ecc/bls12-378/fr/element_test.go +++ b/ecc/bls12-378/fr/element_test.go @@ -22,10 +22,12 @@ import ( "fmt" "math/big" "math/bits" + + "github.com/consensys/gnark-crypto/field" mrand "math/rand" + "testing" - "github.com/consensys/gnark-crypto/field" "github.com/leanovate/gopter" ggen "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" @@ -320,7 +322,6 @@ func init() { a[3]-- staticTestValues = append(staticTestValues, a) } - { a := qElement a[3]-- @@ -334,6 +335,12 @@ func init() { staticTestValues = append(staticTestValues, a) } + { + a := qElement + a[3] = 0 + staticTestValues = append(staticTestValues, a) + } + } func TestElementReduce(t *testing.T) { @@ -471,7 +478,6 @@ func TestElementInverseExp(t *testing.T) { properties = gopter.NewProperties(parameters) properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly if supportAdx { t.Log("disabling ADX") @@ -1418,8 +1424,8 @@ func TestElementSquare(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1497,8 +1503,8 @@ func TestElementInverse(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1576,8 +1582,8 @@ func TestElementSqrt(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1672,8 +1678,8 @@ func TestElementDouble(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1768,8 +1774,8 @@ func TestElementNeg(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -2104,6 +2110,134 @@ func TestElementSetInterface(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + func TestElementFromMont(t *testing.T) { t.Parallel() @@ -2286,8 +2420,6 @@ func genFull() gopter.Gen { } } -// Some utils - func (z *Element) matchVeryBigInt(aHi uint64, aInt *big.Int) error { var modulus big.Int var aIntMod big.Int diff --git a/ecc/bls12-378/fr/mimc/mimc.go b/ecc/bls12-378/fr/mimc/mimc.go index 7d788e8f42..c16c943a55 100644 --- a/ecc/bls12-378/fr/mimc/mimc.go +++ b/ecc/bls12-378/fr/mimc/mimc.go @@ -64,7 +64,7 @@ func NewMiMC() hash.Hash { // Reset resets the Hash to its initial state. func (d *digest) Reset() { d.data = nil - d.h = fr.Element{0, 0, 0, 0} + d.h.SetZero() } // Sum appends the current hash to b and returns the resulting slice. diff --git a/ecc/bls12-381/fp/element.go b/ecc/bls12-381/fp/element.go index 0749b8f3cf..39c2035121 100644 --- a/ecc/bls12-381/fp/element.go +++ b/ecc/bls12-381/fp/element.go @@ -79,9 +79,6 @@ var qElement = Element{ qElementWord5, } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = 9940570264628428797 - // rSquare var rSquare = Element{ 17644856173732828998, @@ -99,7 +96,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787", 10) + // base10: 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787 + _modulus.SetString("1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab", 16) } // NewElement returns a new Element from a uint64 value @@ -353,7 +351,7 @@ func (z *Element) SetRandom() (*Element, error) { z[5] = binary.BigEndian.Uint64(bytes[40:48]) z[5] %= 1873798617647539866 - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 1873798617647539866 || (z[5] == 1873798617647539866 && (z[4] < 5412103778470702295 || (z[4] == 5412103778470702295 && (z[3] < 7239337960414712511 || (z[3] == 7239337960414712511 && (z[2] < 7435674573564081700 || (z[2] == 7435674573564081700 && (z[1] < 2210141511517208575 || (z[1] == 2210141511517208575 && (z[0] < 13402431016077863595))))))))))) { var b uint64 @@ -377,10 +375,10 @@ func One() Element { // Halve sets z to z / 2 (mod p) func (z *Element) Halve() { - if z[0]&1 == 1 { + var carry uint64 + if z[0]&1 == 1 { // z = z + q - var carry uint64 z[0], carry = bits.Add64(z[0], 13402431016077863595, 0) z[1], carry = bits.Add64(z[1], 2210141511517208575, carry) z[2], carry = bits.Add64(z[2], 7435674573564081700, carry) @@ -389,9 +387,7 @@ func (z *Element) Halve() { z[5], _ = bits.Add64(z[5], 1873798617647539866, carry) } - // z = z >> 1 - z[0] = z[0]>>1 | z[1]<<63 z[1] = z[1]>>1 | z[2]<<63 z[2] = z[2]>>1 | z[3]<<63 @@ -401,8 +397,6 @@ func (z *Element) Halve() { } -// API with assembly impl - // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *Element) Mul(x, y *Element) *Element { @@ -570,7 +564,7 @@ func _mulGeneric(z, x, y *Element) { z[5], z[4] = madd3(m, 1873798617647539866, c[0], c[2], c[1]) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 1873798617647539866 || (z[5] == 1873798617647539866 && (z[4] < 5412103778470702295 || (z[4] == 5412103778470702295 && (z[3] < 7239337960414712511 || (z[3] == 7239337960414712511 && (z[2] < 7435674573564081700 || (z[2] == 7435674573564081700 && (z[1] < 2210141511517208575 || (z[1] == 2210141511517208575 && (z[0] < 13402431016077863595))))))))))) { var b uint64 @@ -581,89 +575,7 @@ func _mulGeneric(z, x, y *Element) { z[4], b = bits.Sub64(z[4], 5412103778470702295, b) z[5], _ = bits.Sub64(z[5], 1873798617647539866, b) } -} - -func _mulWGeneric(z, x *Element, y uint64) { - - var t [6]uint64 - { - // round 0 - c1, c0 := bits.Mul64(y, x[0]) - m := c0 * 9940570264628428797 - c2 := madd0(m, 13402431016077863595, c0) - c1, c0 = madd1(y, x[1], c1) - c2, t[0] = madd2(m, 2210141511517208575, c2, c0) - c1, c0 = madd1(y, x[2], c1) - c2, t[1] = madd2(m, 7435674573564081700, c2, c0) - c1, c0 = madd1(y, x[3], c1) - c2, t[2] = madd2(m, 7239337960414712511, c2, c0) - c1, c0 = madd1(y, x[4], c1) - c2, t[3] = madd2(m, 5412103778470702295, c2, c0) - c1, c0 = madd1(y, x[5], c1) - t[5], t[4] = madd3(m, 1873798617647539866, c0, c2, c1) - } - { - // round 1 - m := t[0] * 9940570264628428797 - c2 := madd0(m, 13402431016077863595, t[0]) - c2, t[0] = madd2(m, 2210141511517208575, c2, t[1]) - c2, t[1] = madd2(m, 7435674573564081700, c2, t[2]) - c2, t[2] = madd2(m, 7239337960414712511, c2, t[3]) - c2, t[3] = madd2(m, 5412103778470702295, c2, t[4]) - t[5], t[4] = madd2(m, 1873798617647539866, t[5], c2) - } - { - // round 2 - m := t[0] * 9940570264628428797 - c2 := madd0(m, 13402431016077863595, t[0]) - c2, t[0] = madd2(m, 2210141511517208575, c2, t[1]) - c2, t[1] = madd2(m, 7435674573564081700, c2, t[2]) - c2, t[2] = madd2(m, 7239337960414712511, c2, t[3]) - c2, t[3] = madd2(m, 5412103778470702295, c2, t[4]) - t[5], t[4] = madd2(m, 1873798617647539866, t[5], c2) - } - { - // round 3 - m := t[0] * 9940570264628428797 - c2 := madd0(m, 13402431016077863595, t[0]) - c2, t[0] = madd2(m, 2210141511517208575, c2, t[1]) - c2, t[1] = madd2(m, 7435674573564081700, c2, t[2]) - c2, t[2] = madd2(m, 7239337960414712511, c2, t[3]) - c2, t[3] = madd2(m, 5412103778470702295, c2, t[4]) - t[5], t[4] = madd2(m, 1873798617647539866, t[5], c2) - } - { - // round 4 - m := t[0] * 9940570264628428797 - c2 := madd0(m, 13402431016077863595, t[0]) - c2, t[0] = madd2(m, 2210141511517208575, c2, t[1]) - c2, t[1] = madd2(m, 7435674573564081700, c2, t[2]) - c2, t[2] = madd2(m, 7239337960414712511, c2, t[3]) - c2, t[3] = madd2(m, 5412103778470702295, c2, t[4]) - t[5], t[4] = madd2(m, 1873798617647539866, t[5], c2) - } - { - // round 5 - m := t[0] * 9940570264628428797 - c2 := madd0(m, 13402431016077863595, t[0]) - c2, z[0] = madd2(m, 2210141511517208575, c2, t[1]) - c2, z[1] = madd2(m, 7435674573564081700, c2, t[2]) - c2, z[2] = madd2(m, 7239337960414712511, c2, t[3]) - c2, z[3] = madd2(m, 5412103778470702295, c2, t[4]) - z[5], z[4] = madd2(m, 1873798617647539866, t[5], c2) - } - // if z > q → z -= q - // note: this is NOT constant time - if !(z[5] < 1873798617647539866 || (z[5] == 1873798617647539866 && (z[4] < 5412103778470702295 || (z[4] == 5412103778470702295 && (z[3] < 7239337960414712511 || (z[3] == 7239337960414712511 && (z[2] < 7435674573564081700 || (z[2] == 7435674573564081700 && (z[1] < 2210141511517208575 || (z[1] == 2210141511517208575 && (z[0] < 13402431016077863595))))))))))) { - var b uint64 - z[0], b = bits.Sub64(z[0], 13402431016077863595, 0) - z[1], b = bits.Sub64(z[1], 2210141511517208575, b) - z[2], b = bits.Sub64(z[2], 7435674573564081700, b) - z[3], b = bits.Sub64(z[3], 7239337960414712511, b) - z[4], b = bits.Sub64(z[4], 5412103778470702295, b) - z[5], _ = bits.Sub64(z[5], 1873798617647539866, b) - } } func _fromMontGeneric(z *Element) { @@ -736,7 +648,7 @@ func _fromMontGeneric(z *Element) { z[5] = C } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 1873798617647539866 || (z[5] == 1873798617647539866 && (z[4] < 5412103778470702295 || (z[4] == 5412103778470702295 && (z[3] < 7239337960414712511 || (z[3] == 7239337960414712511 && (z[2] < 7435674573564081700 || (z[2] == 7435674573564081700 && (z[1] < 2210141511517208575 || (z[1] == 2210141511517208575 && (z[0] < 13402431016077863595))))))))))) { var b uint64 @@ -759,7 +671,7 @@ func _addGeneric(z, x, y *Element) { z[4], carry = bits.Add64(x[4], y[4], carry) z[5], _ = bits.Add64(x[5], y[5], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 1873798617647539866 || (z[5] == 1873798617647539866 && (z[4] < 5412103778470702295 || (z[4] == 5412103778470702295 && (z[3] < 7239337960414712511 || (z[3] == 7239337960414712511 && (z[2] < 7435674573564081700 || (z[2] == 7435674573564081700 && (z[1] < 2210141511517208575 || (z[1] == 2210141511517208575 && (z[0] < 13402431016077863595))))))))))) { var b uint64 @@ -782,7 +694,7 @@ func _doubleGeneric(z, x *Element) { z[4], carry = bits.Add64(x[4], x[4], carry) z[5], _ = bits.Add64(x[5], x[5], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 1873798617647539866 || (z[5] == 1873798617647539866 && (z[4] < 5412103778470702295 || (z[4] == 5412103778470702295 && (z[3] < 7239337960414712511 || (z[3] == 7239337960414712511 && (z[2] < 7435674573564081700 || (z[2] == 7435674573564081700 && (z[1] < 2210141511517208575 || (z[1] == 2210141511517208575 && (z[0] < 13402431016077863595))))))))))) { var b uint64 @@ -830,7 +742,7 @@ func _negGeneric(z, x *Element) { func _reduceGeneric(z *Element) { - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 1873798617647539866 || (z[5] == 1873798617647539866 && (z[4] < 5412103778470702295 || (z[4] == 5412103778470702295 && (z[3] < 7239337960414712511 || (z[3] == 7239337960414712511 && (z[2] < 7435674573564081700 || (z[2] == 7435674573564081700 && (z[1] < 2210141511517208575 || (z[1] == 2210141511517208575 && (z[0] < 13402431016077863595))))))))))) { var b uint64 @@ -929,18 +841,30 @@ func (z *Element) BitLen() int { return bits.Len64(z[0]) } -// Exp z = x^exponent mod q -func (z *Element) Exp(x Element, exponent *big.Int) *Element { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } @@ -1416,14 +1340,10 @@ func (z *Element) Inverse(x *Element) *Element { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *Element) inverseExp(x *Element) *Element { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) return z.Exp(*x, qMinusTwo) } @@ -1465,6 +1385,8 @@ func (z *Element) linearComb(x *Element, xC int64, y *Element, yC int64) { // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *Element) montReduceSigned(x *Element, xHi uint64) { + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 9940570264628428797 const signBitRemover = ^signBitSelector neg := xHi&signBitSelector != 0 @@ -1558,7 +1480,7 @@ func (z *Element) montReduceSigned(x *Element, xHi uint64) { z[5], z[4] = madd2(m, qElementWord5, t[i+5], C) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 1873798617647539866 || (z[5] == 1873798617647539866 && (z[4] < 5412103778470702295 || (z[4] == 5412103778470702295 && (z[3] < 7239337960414712511 || (z[3] == 7239337960414712511 && (z[2] < 7435674573564081700 || (z[2] == 7435674573564081700 && (z[1] < 2210141511517208575 || (z[1] == 2210141511517208575 && (z[0] < 13402431016077863595))))))))))) { var b uint64 @@ -1616,7 +1538,7 @@ func (z *Element) montReduceSignedSimpleButSlow(x *Element, xHi uint64) { z[4], c = bits.Add64(z[4], 0, c) z[5], _ = bits.Add64(z[5], 0, c) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 1873798617647539866 || (z[5] == 1873798617647539866 && (z[4] < 5412103778470702295 || (z[4] == 5412103778470702295 && (z[3] < 7239337960414712511 || (z[3] == 7239337960414712511 && (z[2] < 7435674573564081700 || (z[2] == 7435674573564081700 && (z[1] < 2210141511517208575 || (z[1] == 2210141511517208575 && (z[0] < 13402431016077863595))))))))))) { var b uint64 @@ -1667,6 +1589,89 @@ func (z *Element) mulWSigned(x *Element, y int64) { } } +func _mulWGeneric(z, x *Element, y uint64) { + + var t [6]uint64 + { + // round 0 + c1, c0 := bits.Mul64(y, x[0]) + m := c0 * 9940570264628428797 + c2 := madd0(m, 13402431016077863595, c0) + c1, c0 = madd1(y, x[1], c1) + c2, t[0] = madd2(m, 2210141511517208575, c2, c0) + c1, c0 = madd1(y, x[2], c1) + c2, t[1] = madd2(m, 7435674573564081700, c2, c0) + c1, c0 = madd1(y, x[3], c1) + c2, t[2] = madd2(m, 7239337960414712511, c2, c0) + c1, c0 = madd1(y, x[4], c1) + c2, t[3] = madd2(m, 5412103778470702295, c2, c0) + c1, c0 = madd1(y, x[5], c1) + t[5], t[4] = madd3(m, 1873798617647539866, c0, c2, c1) + } + { + // round 1 + m := t[0] * 9940570264628428797 + c2 := madd0(m, 13402431016077863595, t[0]) + c2, t[0] = madd2(m, 2210141511517208575, c2, t[1]) + c2, t[1] = madd2(m, 7435674573564081700, c2, t[2]) + c2, t[2] = madd2(m, 7239337960414712511, c2, t[3]) + c2, t[3] = madd2(m, 5412103778470702295, c2, t[4]) + t[5], t[4] = madd2(m, 1873798617647539866, t[5], c2) + } + { + // round 2 + m := t[0] * 9940570264628428797 + c2 := madd0(m, 13402431016077863595, t[0]) + c2, t[0] = madd2(m, 2210141511517208575, c2, t[1]) + c2, t[1] = madd2(m, 7435674573564081700, c2, t[2]) + c2, t[2] = madd2(m, 7239337960414712511, c2, t[3]) + c2, t[3] = madd2(m, 5412103778470702295, c2, t[4]) + t[5], t[4] = madd2(m, 1873798617647539866, t[5], c2) + } + { + // round 3 + m := t[0] * 9940570264628428797 + c2 := madd0(m, 13402431016077863595, t[0]) + c2, t[0] = madd2(m, 2210141511517208575, c2, t[1]) + c2, t[1] = madd2(m, 7435674573564081700, c2, t[2]) + c2, t[2] = madd2(m, 7239337960414712511, c2, t[3]) + c2, t[3] = madd2(m, 5412103778470702295, c2, t[4]) + t[5], t[4] = madd2(m, 1873798617647539866, t[5], c2) + } + { + // round 4 + m := t[0] * 9940570264628428797 + c2 := madd0(m, 13402431016077863595, t[0]) + c2, t[0] = madd2(m, 2210141511517208575, c2, t[1]) + c2, t[1] = madd2(m, 7435674573564081700, c2, t[2]) + c2, t[2] = madd2(m, 7239337960414712511, c2, t[3]) + c2, t[3] = madd2(m, 5412103778470702295, c2, t[4]) + t[5], t[4] = madd2(m, 1873798617647539866, t[5], c2) + } + { + // round 5 + m := t[0] * 9940570264628428797 + c2 := madd0(m, 13402431016077863595, t[0]) + c2, z[0] = madd2(m, 2210141511517208575, c2, t[1]) + c2, z[1] = madd2(m, 7435674573564081700, c2, t[2]) + c2, z[2] = madd2(m, 7239337960414712511, c2, t[3]) + c2, z[3] = madd2(m, 5412103778470702295, c2, t[4]) + z[5], z[4] = madd2(m, 1873798617647539866, t[5], c2) + } + + // if z >= q → z -= q + // note: this is NOT constant time + if !(z[5] < 1873798617647539866 || (z[5] == 1873798617647539866 && (z[4] < 5412103778470702295 || (z[4] == 5412103778470702295 && (z[3] < 7239337960414712511 || (z[3] == 7239337960414712511 && (z[2] < 7435674573564081700 || (z[2] == 7435674573564081700 && (z[1] < 2210141511517208575 || (z[1] == 2210141511517208575 && (z[0] < 13402431016077863595))))))))))) { + var b uint64 + z[0], b = bits.Sub64(z[0], 13402431016077863595, 0) + z[1], b = bits.Sub64(z[1], 2210141511517208575, b) + z[2], b = bits.Sub64(z[2], 7435674573564081700, b) + z[3], b = bits.Sub64(z[3], 7239337960414712511, b) + z[4], b = bits.Sub64(z[4], 5412103778470702295, b) + z[5], _ = bits.Sub64(z[5], 1873798617647539866, b) + } +} + func (z *Element) neg(x *Element, xHi uint64) uint64 { var b uint64 diff --git a/ecc/bls12-381/fp/element_ops_noasm.go b/ecc/bls12-381/fp/element_ops_noasm.go index fec6289183..7d02396a3f 100644 --- a/ecc/bls12-381/fp/element_ops_noasm.go +++ b/ecc/bls12-381/fp/element_ops_noasm.go @@ -46,7 +46,6 @@ func MulBy13(x *Element) { func Butterfly(a, b *Element) { _butterflyGeneric(a, b) } - func mul(z, x, y *Element) { _mulGeneric(z, x, y) } diff --git a/ecc/bls12-381/fp/element_test.go b/ecc/bls12-381/fp/element_test.go index 1b6ba87879..f0e1fd5432 100644 --- a/ecc/bls12-381/fp/element_test.go +++ b/ecc/bls12-381/fp/element_test.go @@ -22,10 +22,12 @@ import ( "fmt" "math/big" "math/bits" + + "github.com/consensys/gnark-crypto/field" mrand "math/rand" + "testing" - "github.com/consensys/gnark-crypto/field" "github.com/leanovate/gopter" ggen "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" @@ -324,7 +326,6 @@ func init() { a[5]-- staticTestValues = append(staticTestValues, a) } - { a := qElement a[5]-- @@ -338,6 +339,12 @@ func init() { staticTestValues = append(staticTestValues, a) } + { + a := qElement + a[5] = 0 + staticTestValues = append(staticTestValues, a) + } + } func TestElementReduce(t *testing.T) { @@ -475,7 +482,6 @@ func TestElementInverseExp(t *testing.T) { properties = gopter.NewProperties(parameters) properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly if supportAdx { t.Log("disabling ADX") @@ -1422,8 +1428,8 @@ func TestElementSquare(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1501,8 +1507,8 @@ func TestElementInverse(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1580,8 +1586,8 @@ func TestElementSqrt(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1676,8 +1682,8 @@ func TestElementDouble(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1772,8 +1778,8 @@ func TestElementNeg(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -2108,6 +2114,134 @@ func TestElementSetInterface(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + func TestElementFromMont(t *testing.T) { t.Parallel() @@ -2314,8 +2448,6 @@ func genFull() gopter.Gen { } } -// Some utils - func (z *Element) matchVeryBigInt(aHi uint64, aInt *big.Int) error { var modulus big.Int var aIntMod big.Int diff --git a/ecc/bls12-381/fr/element.go b/ecc/bls12-381/fr/element.go index 6996497cf6..de598428b8 100644 --- a/ecc/bls12-381/fr/element.go +++ b/ecc/bls12-381/fr/element.go @@ -75,9 +75,6 @@ var qElement = Element{ qElementWord3, } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = 18446744069414584319 - // rSquare var rSquare = Element{ 14526898881837571181, @@ -93,7 +90,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("52435875175126190479447740508185965837690552500527637822603658699938581184513", 10) + // base10: 52435875175126190479447740508185965837690552500527637822603658699938581184513 + _modulus.SetString("73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001", 16) } // NewElement returns a new Element from a uint64 value @@ -327,7 +325,7 @@ func (z *Element) SetRandom() (*Element, error) { z[3] = binary.BigEndian.Uint64(bytes[24:32]) z[3] %= 8353516859464449352 - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 8353516859464449352 || (z[3] == 8353516859464449352 && (z[2] < 3691218898639771653 || (z[2] == 3691218898639771653 && (z[1] < 6034159408538082302 || (z[1] == 6034159408538082302 && (z[0] < 18446744069414584321))))))) { var b uint64 @@ -349,19 +347,17 @@ func One() Element { // Halve sets z to z / 2 (mod p) func (z *Element) Halve() { - if z[0]&1 == 1 { + var carry uint64 + if z[0]&1 == 1 { // z = z + q - var carry uint64 z[0], carry = bits.Add64(z[0], 18446744069414584321, 0) z[1], carry = bits.Add64(z[1], 6034159408538082302, carry) z[2], carry = bits.Add64(z[2], 3691218898639771653, carry) z[3], _ = bits.Add64(z[3], 8353516859464449352, carry) } - // z = z >> 1 - z[0] = z[0]>>1 | z[1]<<63 z[1] = z[1]>>1 | z[2]<<63 z[2] = z[2]>>1 | z[3]<<63 @@ -369,8 +365,6 @@ func (z *Element) Halve() { } -// API with assembly impl - // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *Element) Mul(x, y *Element) *Element { @@ -486,7 +480,7 @@ func _mulGeneric(z, x, y *Element) { z[3], z[2] = madd3(m, 8353516859464449352, c[0], c[2], c[1]) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 8353516859464449352 || (z[3] == 8353516859464449352 && (z[2] < 3691218898639771653 || (z[2] == 3691218898639771653 && (z[1] < 6034159408538082302 || (z[1] == 6034159408538082302 && (z[0] < 18446744069414584321))))))) { var b uint64 @@ -495,57 +489,7 @@ func _mulGeneric(z, x, y *Element) { z[2], b = bits.Sub64(z[2], 3691218898639771653, b) z[3], _ = bits.Sub64(z[3], 8353516859464449352, b) } -} - -func _mulWGeneric(z, x *Element, y uint64) { - - var t [4]uint64 - { - // round 0 - c1, c0 := bits.Mul64(y, x[0]) - m := c0 * 18446744069414584319 - c2 := madd0(m, 18446744069414584321, c0) - c1, c0 = madd1(y, x[1], c1) - c2, t[0] = madd2(m, 6034159408538082302, c2, c0) - c1, c0 = madd1(y, x[2], c1) - c2, t[1] = madd2(m, 3691218898639771653, c2, c0) - c1, c0 = madd1(y, x[3], c1) - t[3], t[2] = madd3(m, 8353516859464449352, c0, c2, c1) - } - { - // round 1 - m := t[0] * 18446744069414584319 - c2 := madd0(m, 18446744069414584321, t[0]) - c2, t[0] = madd2(m, 6034159408538082302, c2, t[1]) - c2, t[1] = madd2(m, 3691218898639771653, c2, t[2]) - t[3], t[2] = madd2(m, 8353516859464449352, t[3], c2) - } - { - // round 2 - m := t[0] * 18446744069414584319 - c2 := madd0(m, 18446744069414584321, t[0]) - c2, t[0] = madd2(m, 6034159408538082302, c2, t[1]) - c2, t[1] = madd2(m, 3691218898639771653, c2, t[2]) - t[3], t[2] = madd2(m, 8353516859464449352, t[3], c2) - } - { - // round 3 - m := t[0] * 18446744069414584319 - c2 := madd0(m, 18446744069414584321, t[0]) - c2, z[0] = madd2(m, 6034159408538082302, c2, t[1]) - c2, z[1] = madd2(m, 3691218898639771653, c2, t[2]) - z[3], z[2] = madd2(m, 8353516859464449352, t[3], c2) - } - // if z > q → z -= q - // note: this is NOT constant time - if !(z[3] < 8353516859464449352 || (z[3] == 8353516859464449352 && (z[2] < 3691218898639771653 || (z[2] == 3691218898639771653 && (z[1] < 6034159408538082302 || (z[1] == 6034159408538082302 && (z[0] < 18446744069414584321))))))) { - var b uint64 - z[0], b = bits.Sub64(z[0], 18446744069414584321, 0) - z[1], b = bits.Sub64(z[1], 6034159408538082302, b) - z[2], b = bits.Sub64(z[2], 3691218898639771653, b) - z[3], _ = bits.Sub64(z[3], 8353516859464449352, b) - } } func _fromMontGeneric(z *Element) { @@ -588,7 +532,7 @@ func _fromMontGeneric(z *Element) { z[3] = C } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 8353516859464449352 || (z[3] == 8353516859464449352 && (z[2] < 3691218898639771653 || (z[2] == 3691218898639771653 && (z[1] < 6034159408538082302 || (z[1] == 6034159408538082302 && (z[0] < 18446744069414584321))))))) { var b uint64 @@ -607,7 +551,7 @@ func _addGeneric(z, x, y *Element) { z[2], carry = bits.Add64(x[2], y[2], carry) z[3], _ = bits.Add64(x[3], y[3], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 8353516859464449352 || (z[3] == 8353516859464449352 && (z[2] < 3691218898639771653 || (z[2] == 3691218898639771653 && (z[1] < 6034159408538082302 || (z[1] == 6034159408538082302 && (z[0] < 18446744069414584321))))))) { var b uint64 @@ -626,7 +570,7 @@ func _doubleGeneric(z, x *Element) { z[2], carry = bits.Add64(x[2], x[2], carry) z[3], _ = bits.Add64(x[3], x[3], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 8353516859464449352 || (z[3] == 8353516859464449352 && (z[2] < 3691218898639771653 || (z[2] == 3691218898639771653 && (z[1] < 6034159408538082302 || (z[1] == 6034159408538082302 && (z[0] < 18446744069414584321))))))) { var b uint64 @@ -666,7 +610,7 @@ func _negGeneric(z, x *Element) { func _reduceGeneric(z *Element) { - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 8353516859464449352 || (z[3] == 8353516859464449352 && (z[2] < 3691218898639771653 || (z[2] == 3691218898639771653 && (z[1] < 6034159408538082302 || (z[1] == 6034159408538082302 && (z[0] < 18446744069414584321))))))) { var b uint64 @@ -757,18 +701,30 @@ func (z *Element) BitLen() int { return bits.Len64(z[0]) } -// Exp z = x^exponent mod q -func (z *Element) Exp(x Element, exponent *big.Int) *Element { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } @@ -1282,14 +1238,10 @@ func (z *Element) Inverse(x *Element) *Element { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *Element) inverseExp(x *Element) *Element { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) return z.Exp(*x, qMinusTwo) } @@ -1331,6 +1283,8 @@ func (z *Element) linearComb(x *Element, xC int64, y *Element, yC int64) { // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *Element) montReduceSigned(x *Element, xHi uint64) { + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 18446744069414584319 const signBitRemover = ^signBitSelector neg := xHi&signBitSelector != 0 @@ -1390,7 +1344,7 @@ func (z *Element) montReduceSigned(x *Element, xHi uint64) { z[3], z[2] = madd2(m, qElementWord3, t[i+3], C) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 8353516859464449352 || (z[3] == 8353516859464449352 && (z[2] < 3691218898639771653 || (z[2] == 3691218898639771653 && (z[1] < 6034159408538082302 || (z[1] == 6034159408538082302 && (z[0] < 18446744069414584321))))))) { var b uint64 @@ -1440,7 +1394,7 @@ func (z *Element) montReduceSignedSimpleButSlow(x *Element, xHi uint64) { z[2], c = bits.Add64(z[2], 0, c) z[3], _ = bits.Add64(z[3], 0, c) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 8353516859464449352 || (z[3] == 8353516859464449352 && (z[2] < 3691218898639771653 || (z[2] == 3691218898639771653 && (z[1] < 6034159408538082302 || (z[1] == 6034159408538082302 && (z[0] < 18446744069414584321))))))) { var b uint64 @@ -1485,6 +1439,57 @@ func (z *Element) mulWSigned(x *Element, y int64) { } } +func _mulWGeneric(z, x *Element, y uint64) { + + var t [4]uint64 + { + // round 0 + c1, c0 := bits.Mul64(y, x[0]) + m := c0 * 18446744069414584319 + c2 := madd0(m, 18446744069414584321, c0) + c1, c0 = madd1(y, x[1], c1) + c2, t[0] = madd2(m, 6034159408538082302, c2, c0) + c1, c0 = madd1(y, x[2], c1) + c2, t[1] = madd2(m, 3691218898639771653, c2, c0) + c1, c0 = madd1(y, x[3], c1) + t[3], t[2] = madd3(m, 8353516859464449352, c0, c2, c1) + } + { + // round 1 + m := t[0] * 18446744069414584319 + c2 := madd0(m, 18446744069414584321, t[0]) + c2, t[0] = madd2(m, 6034159408538082302, c2, t[1]) + c2, t[1] = madd2(m, 3691218898639771653, c2, t[2]) + t[3], t[2] = madd2(m, 8353516859464449352, t[3], c2) + } + { + // round 2 + m := t[0] * 18446744069414584319 + c2 := madd0(m, 18446744069414584321, t[0]) + c2, t[0] = madd2(m, 6034159408538082302, c2, t[1]) + c2, t[1] = madd2(m, 3691218898639771653, c2, t[2]) + t[3], t[2] = madd2(m, 8353516859464449352, t[3], c2) + } + { + // round 3 + m := t[0] * 18446744069414584319 + c2 := madd0(m, 18446744069414584321, t[0]) + c2, z[0] = madd2(m, 6034159408538082302, c2, t[1]) + c2, z[1] = madd2(m, 3691218898639771653, c2, t[2]) + z[3], z[2] = madd2(m, 8353516859464449352, t[3], c2) + } + + // if z >= q → z -= q + // note: this is NOT constant time + if !(z[3] < 8353516859464449352 || (z[3] == 8353516859464449352 && (z[2] < 3691218898639771653 || (z[2] == 3691218898639771653 && (z[1] < 6034159408538082302 || (z[1] == 6034159408538082302 && (z[0] < 18446744069414584321))))))) { + var b uint64 + z[0], b = bits.Sub64(z[0], 18446744069414584321, 0) + z[1], b = bits.Sub64(z[1], 6034159408538082302, b) + z[2], b = bits.Sub64(z[2], 3691218898639771653, b) + z[3], _ = bits.Sub64(z[3], 8353516859464449352, b) + } +} + func (z *Element) neg(x *Element, xHi uint64) uint64 { var b uint64 diff --git a/ecc/bls12-381/fr/element_ops_noasm.go b/ecc/bls12-381/fr/element_ops_noasm.go index ec1fac18d6..7891ac9cd3 100644 --- a/ecc/bls12-381/fr/element_ops_noasm.go +++ b/ecc/bls12-381/fr/element_ops_noasm.go @@ -46,7 +46,6 @@ func MulBy13(x *Element) { func Butterfly(a, b *Element) { _butterflyGeneric(a, b) } - func mul(z, x, y *Element) { _mulGeneric(z, x, y) } diff --git a/ecc/bls12-381/fr/element_test.go b/ecc/bls12-381/fr/element_test.go index ae9de4bc51..255f1364e6 100644 --- a/ecc/bls12-381/fr/element_test.go +++ b/ecc/bls12-381/fr/element_test.go @@ -22,10 +22,12 @@ import ( "fmt" "math/big" "math/bits" + + "github.com/consensys/gnark-crypto/field" mrand "math/rand" + "testing" - "github.com/consensys/gnark-crypto/field" "github.com/leanovate/gopter" ggen "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" @@ -320,7 +322,6 @@ func init() { a[3]-- staticTestValues = append(staticTestValues, a) } - { a := qElement a[3]-- @@ -334,6 +335,12 @@ func init() { staticTestValues = append(staticTestValues, a) } + { + a := qElement + a[3] = 0 + staticTestValues = append(staticTestValues, a) + } + } func TestElementReduce(t *testing.T) { @@ -471,7 +478,6 @@ func TestElementInverseExp(t *testing.T) { properties = gopter.NewProperties(parameters) properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly if supportAdx { t.Log("disabling ADX") @@ -1418,8 +1424,8 @@ func TestElementSquare(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1497,8 +1503,8 @@ func TestElementInverse(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1576,8 +1582,8 @@ func TestElementSqrt(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1672,8 +1678,8 @@ func TestElementDouble(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1768,8 +1774,8 @@ func TestElementNeg(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -2104,6 +2110,134 @@ func TestElementSetInterface(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + func TestElementFromMont(t *testing.T) { t.Parallel() @@ -2286,8 +2420,6 @@ func genFull() gopter.Gen { } } -// Some utils - func (z *Element) matchVeryBigInt(aHi uint64, aInt *big.Int) error { var modulus big.Int var aIntMod big.Int diff --git a/ecc/bls12-381/fr/mimc/mimc.go b/ecc/bls12-381/fr/mimc/mimc.go index 89287dd062..9104557317 100644 --- a/ecc/bls12-381/fr/mimc/mimc.go +++ b/ecc/bls12-381/fr/mimc/mimc.go @@ -64,7 +64,7 @@ func NewMiMC() hash.Hash { // Reset resets the Hash to its initial state. func (d *digest) Reset() { d.data = nil - d.h = fr.Element{0, 0, 0, 0} + d.h.SetZero() } // Sum appends the current hash to b and returns the resulting slice. diff --git a/ecc/bls12-39/bls12-39.go b/ecc/bls12-39/bls12-39.go new file mode 100644 index 0000000000..096478b839 --- /dev/null +++ b/ecc/bls12-39/bls12-39.go @@ -0,0 +1,130 @@ +package bls1239 + +import ( + "math/big" + + "github.com/consensys/gnark-crypto/ecc" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fp" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/consensys/gnark-crypto/ecc/bls12-39/internal/fptower" +) + +// E: y**2=x**3+2 +// Etwist: y**2 = x**3+2/(u+1) +// Tower: Fp->Fp2, u**2=3 -> Fp12, v**6=u+1 +// Generator (BLS12 family): x=100 +// optimal Ate loop: trace(frob)-1=x +// trace of pi: x+1 +// Fp: p=326667333367 +// Fr: r=99990001 +// ID bls39 ID +const ID = ecc.BLS12_39 + +// bCurveCoeff b coeff of the curve +var bCurveCoeff fp.Element + +// twist +var twist fptower.E2 + +// bTwistCurveCoeff b coeff of the twist (defined over Fp2) curve +var bTwistCurveCoeff fptower.E2 + +// generators of the r-torsion group, resp. in ker(pi-id), ker(Tr) +var g1Gen G1Jac +var g2Gen G2Jac + +var g1GenAff G1Affine +var g2GenAff G2Affine + +// point at infinity +var g1Infinity G1Jac +var g2Infinity G2Jac + +// optimal Ate loop counter (=trace-1 = x in BLS family) +var loopCounter [7]int8 + +// Parameters useful for the GLV scalar multiplication. The third roots define the +// endomorphisms phi1 and phi2 for and . lambda is such that lies above +// in the ring Z[phi]. More concretely it's the associated eigenvalue +// of phi1 (resp phi2) restricted to (resp ) +// cf https://www.cosic.esat.kuleuven.be/nessie/reports/phase2/GLV.pdf +var thirdRootOneG1 fp.Element +var thirdRootOneG2 fp.Element +var lambdaGLV big.Int + +// glvBasis stores R-linearly independant vectors (a,b), (c,d) +// in ker((u,v)->u+vlambda[r]), and their determinant +var glvBasis ecc.Lattice + +// psi o pi o psi**-1, where psi:E->E' is the degree 6 iso defined over Fp12 +var endo struct { + u fptower.E2 + v fptower.E2 +} + +// generator of the curve +var xGen big.Int + +// expose the tower -- github.com/consensys/gnark uses it in a gnark circuit + +// E2 is a degree two finite field extension of fp.Element +type E2 = fptower.E2 + +// E6 is a degree three finite field extension of fp2 +type E6 = fptower.E6 + +// E12 is a degree two finite field extension of fp6 +type E12 = fptower.E12 + +func init() { + + bCurveCoeff.SetUint64(2) + twist.A0.SetUint64(1) + twist.A1.SetUint64(1) + bTwistCurveCoeff.Inverse(&twist).Double(&bTwistCurveCoeff) + + g1Gen.X.SetString("76374581475") + g1Gen.Y.SetString("135768504117") + g1Gen.Z.SetString("1") + + g2Gen.X.SetString("170522782386", + "184493119176") + g2Gen.Y.SetString("113781902987", + "323607052549") + g2Gen.Z.SetString("1", + "0") + + g1GenAff.FromJacobian(&g1Gen) + g2GenAff.FromJacobian(&g2Gen) + + g1Infinity.X.SetOne() + g1Infinity.Y.SetOne() + g2Infinity.X.SetOne() + g2Infinity.Y.SetOne() + + thirdRootOneG1.SetUint64(9702999901) + thirdRootOneG2.Square(&thirdRootOneG1) + lambdaGLV.SetString("9999", 10) + _r := fr.Modulus() + ecc.PrecomputeLattice(_r, &lambdaGLV, &glvBasis) + + endo.u.A0.SetUint64(159768345029) + endo.u.A1.SetUint64(182009477101) + endo.v.A0.SetUint64(293515655025) + endo.v.A1.SetUint64(228828781692) + + // binary decomposition of 100 little endian + loopCounter = [7]int8{0, 0, 1, 0, 0, 1, 1} + + xGen.SetString("100", 10) + +} + +// Generators return the generators of the r-torsion group, resp. in ker(pi-id), ker(Tr) +func Generators() (g1Jac G1Jac, g2Jac G2Jac, g1Aff G1Affine, g2Aff G2Affine) { + g1Aff = g1GenAff + g2Aff = g2GenAff + g1Jac = g1Gen + g2Jac = g2Gen + return +} diff --git a/ecc/bls12-39/doc.go b/ecc/bls12-39/doc.go new file mode 100644 index 0000000000..bca395dcfc --- /dev/null +++ b/ecc/bls12-39/doc.go @@ -0,0 +1,18 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +// Package bls1239 implements a toy BLS curve for test purposes. Warning: not suitable for cryptographic use. +package bls1239 diff --git a/ecc/bls12-39/fp/arith.go b/ecc/bls12-39/fp/arith.go new file mode 100644 index 0000000000..7e068ce399 --- /dev/null +++ b/ecc/bls12-39/fp/arith.go @@ -0,0 +1,30 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fp + +import ( + "math/bits" +) + +// madd0 hi = a*b + c (discards lo bits) +func madd0(a, b, c uint64) (hi uint64) { + var carry, lo uint64 + hi, lo = bits.Mul64(a, b) + _, carry = bits.Add64(lo, c, 0) + hi, _ = bits.Add64(hi, 0, carry) + return +} diff --git a/ecc/bls12-39/fp/doc.go b/ecc/bls12-39/fp/doc.go new file mode 100644 index 0000000000..7bd275abb5 --- /dev/null +++ b/ecc/bls12-39/fp/doc.go @@ -0,0 +1,43 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +// Package fp contains field arithmetic operations for modulus = 0x4c0ee3eef7. +// +// The API is similar to math/big (big.Int), but the operations are significantly faster (up to 20x for the modular multiplication on amd64, see also https://hackmd.io/@gnark/modular_multiplication) +// +// The modulus is hardcoded in all the operations. +// +// Field elements are represented as an array, and assumed to be in Montgomery form in all methods: +// type Element [1]uint64 +// +// Example API signature +// // Mul z = x * y mod q +// func (z *Element) Mul(x, y *Element) *Element +// +// and can be used like so: +// var a, b Element +// a.SetUint64(2) +// b.SetString("984896738") +// a.Mul(a, b) +// a.Sub(a, a) +// .Add(a, b) +// .Inv(a) +// b.Exp(b, new(big.Int).SetUint64(42)) +// +// Modulus +// 0x4c0ee3eef7 // base 16 +// 326667333367 // base 10 +package fp diff --git a/ecc/bls12-39/fp/element.go b/ecc/bls12-39/fp/element.go new file mode 100644 index 0000000000..e8e1ca7e70 --- /dev/null +++ b/ecc/bls12-39/fp/element.go @@ -0,0 +1,915 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fp + +// /!\ WARNING /!\ +// this code has not been audited and is provided as-is. In particular, +// there is no security guarantees such as constant time implementation +// or side-channel attack resistance +// /!\ WARNING /!\ + +import ( + "crypto/rand" + "encoding/binary" + "errors" + "io" + "math/big" + "math/bits" + "reflect" + "strconv" + "strings" + "sync" +) + +// Element represents a field element stored on 1 words (uint64) +// Element are assumed to be in Montgomery form in all methods +// field modulus q = +// +// 326667333367 +type Element [1]uint64 + +// Limbs number of 64 bits words needed to represent Element +const Limbs = 1 + +// Bits number bits needed to represent Element +const Bits = 39 + +// Bytes number bytes needed to represent Element +const Bytes = Limbs * 8 + +// field modulus stored as big.Int +var _modulus big.Int + +// Modulus returns q as a big.Int +// q = +// +// 326667333367 +func Modulus() *big.Int { + return new(big.Int).Set(&_modulus) +} + +// q (modulus) +const qElementWord0 uint64 = 326667333367 +const q uint64 = qElementWord0 + +var qElement = Element{ + qElementWord0, +} + +// rSquare +var rSquare = Element{ + 242079875569, +} + +var bigIntPool = sync.Pool{ + New: func() interface{} { + return new(big.Int) + }, +} + +func init() { + // base10: 326667333367 + _modulus.SetString("4c0ee3eef7", 16) +} + +// NewElement returns a new Element from a uint64 value +// +// it is equivalent to +// var v NewElement +// v.SetUint64(...) +func NewElement(v uint64) Element { + z := Element{v} + z.Mul(&z, &rSquare) + return z +} + +// SetUint64 sets z to v and returns z +func (z *Element) SetUint64(v uint64) *Element { + // sets z LSB to v (non-Montgomery form) and convert z to Montgomery form + *z = Element{v} + return z.Mul(z, &rSquare) // z.ToMont() +} + +// SetInt64 sets z to v and returns z +func (z *Element) SetInt64(v int64) *Element { + + // absolute value of v + m := v >> 63 + z.SetUint64(uint64((v ^ m) - m)) + + if m != 0 { + // v is negative + z.Neg(z) + } + + return z +} + +// Set z = x +func (z *Element) Set(x *Element) *Element { + z[0] = x[0] + return z +} + +// SetInterface converts provided interface into Element +// returns an error if provided type is not supported +// supported types: Element, *Element, uint64, int, string (interpreted as base10 integer), +// *big.Int, big.Int, []byte +func (z *Element) SetInterface(i1 interface{}) (*Element, error) { + switch c1 := i1.(type) { + case Element: + return z.Set(&c1), nil + case *Element: + if c1 == nil { + return nil, errors.New("can't set fp.Element with ") + } + return z.Set(c1), nil + case uint8: + return z.SetUint64(uint64(c1)), nil + case uint16: + return z.SetUint64(uint64(c1)), nil + case uint32: + return z.SetUint64(uint64(c1)), nil + case uint: + return z.SetUint64(uint64(c1)), nil + case uint64: + return z.SetUint64(c1), nil + case int8: + return z.SetInt64(int64(c1)), nil + case int16: + return z.SetInt64(int64(c1)), nil + case int32: + return z.SetInt64(int64(c1)), nil + case int64: + return z.SetInt64(c1), nil + case int: + return z.SetInt64(int64(c1)), nil + case string: + return z.SetString(c1), nil + case *big.Int: + if c1 == nil { + return nil, errors.New("can't set fp.Element with ") + } + return z.SetBigInt(c1), nil + case big.Int: + return z.SetBigInt(&c1), nil + case []byte: + return z.SetBytes(c1), nil + default: + return nil, errors.New("can't set fp.Element from type " + reflect.TypeOf(i1).String()) + } +} + +// SetZero z = 0 +func (z *Element) SetZero() *Element { + z[0] = 0 + return z +} + +// SetOne z = 1 (in Montgomery form) +func (z *Element) SetOne() *Element { + z[0] = 152135744813 + return z +} + +// Div z = x*y^-1 mod q +func (z *Element) Div(x, y *Element) *Element { + var yInv Element + yInv.Inverse(y) + z.Mul(x, &yInv) + return z +} + +// Bit returns the i'th bit, with lsb == bit 0. +// It is the responsibility of the caller to convert from Montgomery to Regular form if needed +func (z *Element) Bit(i uint64) uint64 { + j := i / 64 + if j >= 1 { + return 0 + } + return uint64(z[j] >> (i % 64) & 1) +} + +// Equal returns z == x; constant-time +func (z *Element) Equal(x *Element) bool { + return z.NotEqual(x) == 0 +} + +// NotEqual returns 0 if and only if z == x; constant-time +func (z *Element) NotEqual(x *Element) uint64 { + return (z[0] ^ x[0]) +} + +// IsZero returns z == 0 +func (z *Element) IsZero() bool { + return (z[0]) == 0 +} + +// IsOne returns z == 1 +func (z *Element) IsOne() bool { + return z[0] == 152135744813 +} + +// IsUint64 reports whether z can be represented as an uint64. +func (z *Element) IsUint64() bool { + return true +} + +// Uint64 returns the uint64 representation of x. If x cannot be represented in a uint64, the result is undefined. +func (z *Element) Uint64() uint64 { + zz := *z + zz.FromMont() + return zz[0] +} + +// FitsOnOneWord reports whether z words (except the least significant word) are 0 +func (z *Element) FitsOnOneWord() bool { + return true +} + +// Cmp compares (lexicographic order) z and x and returns: +// +// -1 if z < x +// 0 if z == x +// +1 if z > x +// +func (z *Element) Cmp(x *Element) int { + _z := *z + _x := *x + _z.FromMont() + _x.FromMont() + if _z[0] > _x[0] { + return 1 + } else if _z[0] < _x[0] { + return -1 + } + return 0 +} + +// LexicographicallyLargest returns true if this element is strictly lexicographically +// larger than its negation, false otherwise +func (z *Element) LexicographicallyLargest() bool { + // adapted from github.com/zkcrypto/bls12_381 + // we check if the element is larger than (q-1) / 2 + // if z - (((q -1) / 2) + 1) have no underflow, then z > (q-1) / 2 + + _z := *z + _z.FromMont() + + var b uint64 + _, b = bits.Sub64(_z[0], 163333666684, 0) + + return b == 0 +} + +// SetRandom sets z to a random element < q +func (z *Element) SetRandom() (*Element, error) { + var bytes [8]byte + if _, err := io.ReadFull(rand.Reader, bytes[:]); err != nil { + return nil, err + } + z[0] = binary.BigEndian.Uint64(bytes[0:8]) + z[0] %= 326667333367 + + return z, nil +} + +// One returns 1 (in montgommery form) +func One() Element { + var one Element + one.SetOne() + return one +} + +// Halve sets z to z / 2 (mod p) +func (z *Element) Halve() { + + if z[0]&1 == 1 { + // z = z + q + z[0], _ = bits.Add64(z[0], 326667333367, 0) + + } + // z = z >> 1 + z[0] >>= 1 + +} + +// Mul z = x * y mod q +// see https://hackmd.io/@gnark/modular_multiplication +func (z *Element) Mul(x, y *Element) *Element { + + // CIOS multiplication + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 14763286405836492089 + + var r uint64 + hi, lo := bits.Mul64(x[0], y[0]) + m := lo * qInvNegLsw + hi2, lo2 := bits.Mul64(m, q) + _, carry := bits.Add64(lo2, lo, 0) + r, carry = bits.Add64(hi2, hi, carry) + + if carry != 0 || r >= q { + // we need to reduce + r -= q + + } + z[0] = r + + return z +} + +// Square z = x * x mod q +// see https://hackmd.io/@gnark/modular_multiplication +func (z *Element) Square(x *Element) *Element { + + // CIOS multiplication + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 14763286405836492089 + + var r uint64 + hi, lo := bits.Mul64(x[0], x[0]) + m := lo * qInvNegLsw + hi2, lo2 := bits.Mul64(m, q) + _, carry := bits.Add64(lo2, lo, 0) + r, carry = bits.Add64(hi2, hi, carry) + + if carry != 0 || r >= q { + // we need to reduce + r -= q + + } + z[0] = r + + return z +} + +// FromMont converts z in place (i.e. mutates) from Montgomery to regular representation +// sets and returns z = z * 1 +func (z *Element) FromMont() *Element { + fromMont(z) + return z +} + +// Add z = x + y mod q +func (z *Element) Add(x, y *Element) *Element { + add(z, x, y) + return z +} + +// Double z = x + x mod q, aka Lsh 1 +func (z *Element) Double(x *Element) *Element { + double(z, x) + return z +} + +// Sub z = x - y mod q +func (z *Element) Sub(x, y *Element) *Element { + sub(z, x, y) + return z +} + +// Neg z = q - x +func (z *Element) Neg(x *Element) *Element { + neg(z, x) + return z +} + +// Select is a constant-time conditional move. +// If c=0, z = x0. Else z = x1 +func (z *Element) Select(c int, x0 *Element, x1 *Element) *Element { + cC := uint64((int64(c) | -int64(c)) >> 63) // "canonicized" into: 0 if c=0, -1 otherwise + z[0] = x0[0] ^ cC&(x0[0]^x1[0]) + return z +} + +// Generic (no ADX instructions, no AMD64) versions of multiplication and squaring algorithms + +func _mulGeneric(z, x, y *Element) { + + // CIOS multiplication + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 14763286405836492089 + + var r uint64 + hi, lo := bits.Mul64(x[0], y[0]) + m := lo * qInvNegLsw + hi2, lo2 := bits.Mul64(m, q) + _, carry := bits.Add64(lo2, lo, 0) + r, carry = bits.Add64(hi2, hi, carry) + + if carry != 0 || r >= q { + // we need to reduce + r -= q + + } + z[0] = r + +} + +func _fromMontGeneric(z *Element) { + // the following lines implement z = z * 1 + // with a modified CIOS montgomery multiplication + { + // m = z[0]n'[0] mod W + m := z[0] * 14763286405836492089 + C := madd0(m, 326667333367, z[0]) + z[0] = C + } + + // if z >= q → z -= q + // note: this is NOT constant time + if z[0] >= q { + z[0] -= q + } +} + +func _addGeneric(z, x, y *Element) { + + z[0], _ = bits.Add64(x[0], y[0], 0) + if z[0] >= q { + z[0] -= q + } +} + +func _doubleGeneric(z, x *Element) { + if x[0]&(1<<63) == (1 << 63) { + // if highest bit is set, then we have a carry to x + x, we shift and subtract q + z[0] = (x[0] << 1) - q + } else { + // highest bit is not set, but x + x can still be >= q + z[0] = (x[0] << 1) + if z[0] >= q { + z[0] -= q + } + } +} + +func _subGeneric(z, x, y *Element) { + var b uint64 + z[0], b = bits.Sub64(x[0], y[0], 0) + if b != 0 { + z[0] += q + } +} + +func _negGeneric(z, x *Element) { + if x.IsZero() { + z.SetZero() + return + } + z[0] = q - x[0] +} + +func _reduceGeneric(z *Element) { + + // if z >= q → z -= q + // note: this is NOT constant time + if z[0] >= q { + z[0] -= q + } +} + +func mulByConstant(z *Element, c uint8) { + switch c { + case 0: + z.SetZero() + return + case 1: + return + case 2: + z.Double(z) + return + case 3: + _z := *z + z.Double(z).Add(z, &_z) + case 5: + _z := *z + z.Double(z).Double(z).Add(z, &_z) + case 11: + _z := *z + z.Double(z).Double(z).Add(z, &_z).Double(z).Add(z, &_z) + default: + var y Element + y.SetUint64(uint64(c)) + z.Mul(z, &y) + } +} + +// BatchInvert returns a new slice with every element inverted. +// Uses Montgomery batch inversion trick +func BatchInvert(a []Element) []Element { + res := make([]Element, len(a)) + if len(a) == 0 { + return res + } + + zeroes := make([]bool, len(a)) + accumulator := One() + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + zeroes[i] = true + continue + } + res[i] = accumulator + accumulator.Mul(&accumulator, &a[i]) + } + + accumulator.Inverse(&accumulator) + + for i := len(a) - 1; i >= 0; i-- { + if zeroes[i] { + continue + } + res[i].Mul(&res[i], &accumulator) + accumulator.Mul(&accumulator, &a[i]) + } + + return res +} + +func _butterflyGeneric(a, b *Element) { + t := *a + a.Add(a, b) + b.Sub(&t, b) +} + +// BitLen returns the minimum number of bits needed to represent z +// returns 0 if z == 0 +func (z *Element) BitLen() int { + return bits.Len64(z[0]) +} + +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { + return z.SetOne() + } + + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + + z.Set(&x) + + for i := e.BitLen() - 2; i >= 0; i-- { + z.Square(z) + if e.Bit(i) == 1 { + z.Mul(z, &x) + } + } + + return z +} + +// ToMont converts z to Montgomery form +// sets and returns z = z * r² +func (z *Element) ToMont() *Element { + return z.Mul(z, &rSquare) +} + +// ToRegular returns z in regular form (doesn't mutate z) +func (z Element) ToRegular() Element { + return *z.FromMont() +} + +// String returns the decimal representation of z as generated by +// z.Text(10). +func (z *Element) String() string { + return z.Text(10) +} + +// Text returns the string representation of z in the given base. +// Base must be between 2 and 36, inclusive. The result uses the +// lower-case letters 'a' to 'z' for digit values 10 to 35. +// No prefix (such as "0x") is added to the string. If z is a nil +// pointer it returns "". +// If base == 10 and -z fits in a uint16 prefix "-" is added to the string. +func (z *Element) Text(base int) string { + if base < 2 || base > 36 { + panic("invalid base") + } + if z == nil { + return "" + } + + const maxUint16 = 65535 + if base == 10 { + var zzNeg Element + zzNeg.Neg(z) + zzNeg.FromMont() + if zzNeg[0] <= maxUint16 && zzNeg[0] != 0 { + return "-" + strconv.FormatUint(zzNeg[0], base) + } + } + zz := *z + zz.FromMont() + return strconv.FormatUint(zz[0], base) +} + +// ToBigInt returns z as a big.Int in Montgomery form +func (z *Element) ToBigInt(res *big.Int) *big.Int { + var b [Limbs * 8]byte + binary.BigEndian.PutUint64(b[0:8], z[0]) + + return res.SetBytes(b[:]) +} + +// ToBigIntRegular returns z as a big.Int in regular form +func (z Element) ToBigIntRegular(res *big.Int) *big.Int { + z.FromMont() + return z.ToBigInt(res) +} + +// Bytes returns the regular (non montgomery) value +// of z as a big-endian byte array. +func (z *Element) Bytes() (res [Limbs * 8]byte) { + _z := z.ToRegular() + binary.BigEndian.PutUint64(res[0:8], _z[0]) + + return +} + +// Marshal returns the regular (non montgomery) value +// of z as a big-endian byte slice. +func (z *Element) Marshal() []byte { + b := z.Bytes() + return b[:] +} + +// SetBytes interprets e as the bytes of a big-endian unsigned integer, +// sets z to that value (in Montgomery form), and returns z. +func (z *Element) SetBytes(e []byte) *Element { + if len(e) == 8 { + // fast path + z[0] = binary.BigEndian.Uint64(e) + return z.ToMont() + } + // get a big int from our pool + vv := bigIntPool.Get().(*big.Int) + vv.SetBytes(e) + + // set big int + z.SetBigInt(vv) + + // put temporary object back in pool + bigIntPool.Put(vv) + + return z +} + +// SetBigInt sets z to v (regular form) and returns z in Montgomery form +func (z *Element) SetBigInt(v *big.Int) *Element { + z.SetZero() + + var zero big.Int + + // fast path + c := v.Cmp(&_modulus) + if c == 0 { + // v == 0 + return z + } else if c != 1 && v.Cmp(&zero) != -1 { + // 0 < v < q + return z.setBigInt(v) + } + + // get temporary big int from the pool + vv := bigIntPool.Get().(*big.Int) + + // copy input + modular reduction + vv.Set(v) + vv.Mod(v, &_modulus) + + // set big int byte value + z.setBigInt(vv) + + // release object into pool + bigIntPool.Put(vv) + return z +} + +// setBigInt assumes 0 ⩽ v < q +func (z *Element) setBigInt(v *big.Int) *Element { + vBits := v.Bits() + + if bits.UintSize == 64 { + for i := 0; i < len(vBits); i++ { + z[i] = uint64(vBits[i]) + } + } else { + for i := 0; i < len(vBits); i++ { + if i%2 == 0 { + z[i/2] = uint64(vBits[i]) + } else { + z[i/2] |= uint64(vBits[i]) << 32 + } + } + } + + return z.ToMont() +} + +// SetString creates a big.Int with number and calls SetBigInt on z +// +// The number prefix determines the actual base: A prefix of +// ''0b'' or ''0B'' selects base 2, ''0'', ''0o'' or ''0O'' selects base 8, +// and ''0x'' or ''0X'' selects base 16. Otherwise, the selected base is 10 +// and no prefix is accepted. +// +// For base 16, lower and upper case letters are considered the same: +// The letters 'a' to 'f' and 'A' to 'F' represent digit values 10 to 15. +// +// An underscore character ''_'' may appear between a base +// prefix and an adjacent digit, and between successive digits; such +// underscores do not change the value of the number. +// Incorrect placement of underscores is reported as a panic if there +// are no other errors. +// +func (z *Element) SetString(number string) *Element { + // get temporary big int from the pool + vv := bigIntPool.Get().(*big.Int) + + if _, ok := vv.SetString(number, 0); !ok { + panic("Element.SetString failed -> can't parse number into a big.Int " + number) + } + + z.SetBigInt(vv) + + // release object into pool + bigIntPool.Put(vv) + + return z +} + +// MarshalJSON returns json encoding of z (z.Text(10)) +// If z == nil, returns null +func (z *Element) MarshalJSON() ([]byte, error) { + if z == nil { + return []byte("null"), nil + } + const maxSafeBound = 15 // we encode it as number if it's small + s := z.Text(10) + if len(s) <= maxSafeBound { + return []byte(s), nil + } + var sbb strings.Builder + sbb.WriteByte('"') + sbb.WriteString(s) + sbb.WriteByte('"') + return []byte(sbb.String()), nil +} + +// UnmarshalJSON accepts numbers and strings as input +// See Element.SetString for valid prefixes (0x, 0b, ...) +func (z *Element) UnmarshalJSON(data []byte) error { + s := string(data) + if len(s) > Bits*3 { + return errors.New("value too large (max = Element.Bits * 3)") + } + + // we accept numbers and strings, remove leading and trailing quotes if any + if len(s) > 0 && s[0] == '"' { + s = s[1:] + } + if len(s) > 0 && s[len(s)-1] == '"' { + s = s[:len(s)-1] + } + + // get temporary big int from the pool + vv := bigIntPool.Get().(*big.Int) + + if _, ok := vv.SetString(s, 0); !ok { + return errors.New("can't parse into a big.Int: " + s) + } + + z.SetBigInt(vv) + + // release object into pool + bigIntPool.Put(vv) + return nil +} + +// Legendre returns the Legendre symbol of z (either +1, -1, or 0.) +func (z *Element) Legendre() int { + var l Element + // z^((q-1)/2) + l.expByLegendreExp(*z) + + if l.IsZero() { + return 0 + } + + // if l == 1 + if l[0] == 152135744813 { + return 1 + } + return -1 +} + +// Sqrt z = √x mod q +// if the square root doesn't exist (x is not a square mod q) +// Sqrt leaves z unchanged and returns nil +func (z *Element) Sqrt(x *Element) *Element { + // q ≡ 3 (mod 4) + // using z ≡ ± x^((p+1)/4) (mod q) + var y, square Element + y.expBySqrtExp(*x) + // as we didn't compute the legendre symbol, ensure we found y such that y * y = x + square.Square(&y) + if square.Equal(x) { + return z.Set(&y) + } + return nil +} + +// Inverse z = x⁻¹ mod q +// Algorithm 16 in "Efficient Software-Implementation of Finite Fields with Applications to Cryptography" +// if x == 0, sets and returns z = x +func (z *Element) Inverse(x *Element) *Element { + const q uint64 = qElementWord0 + if x.IsZero() { + z.SetZero() + return z + } + + var r, s, u, v uint64 + u = q // u = q + s = 242079875569 // s = r^2 + r = 0 + v = x[0] + + var carry, borrow uint64 + + for (u != 1) && (v != 1) { + for v&1 == 0 { + v >>= 1 + if s&1 == 0 { + s >>= 1 + } else { + s, carry = bits.Add64(s, q, 0) + s >>= 1 + if carry != 0 { + s |= (1 << 63) + } + } + } + for u&1 == 0 { + u >>= 1 + if r&1 == 0 { + r >>= 1 + } else { + r, carry = bits.Add64(r, q, 0) + r >>= 1 + if carry != 0 { + r |= (1 << 63) + } + } + } + if v >= u { + v -= u + s, borrow = bits.Sub64(s, r, 0) + if borrow == 1 { + s += q + } + } else { + u -= v + r, borrow = bits.Sub64(r, s, 0) + if borrow == 1 { + r += q + } + } + } + + if u == 1 { + z[0] = r + } else { + z[0] = s + } + + return z +} diff --git a/ecc/bls12-39/fp/element_exp.go b/ecc/bls12-39/fp/element_exp.go new file mode 100644 index 0000000000..ac637e664e --- /dev/null +++ b/ecc/bls12-39/fp/element_exp.go @@ -0,0 +1,244 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fp + +// expBySqrtExp is equivalent to z.Exp(x, 1303b8fbbe) +// +// uses github.com/mmcloughlin/addchain v0.4.0 to generate a shorter addition chain +func (z *Element) expBySqrtExp(x Element) *Element { + // addition chain: + // + // _10 = 2*1 + // _11 = 1 + _10 + // _1100 = _11 << 2 + // _1111 = _11 + _1100 + // _111100 = _1111 << 2 + // _1111000 = 2*_111100 + // _10110100 = _111100 + _1111000 + // i11 = 2*_10110100 + _1111000 + // i12 = 2*i11 + // i19 = (i12 << 2 + i12) << 3 + _10110100 + // i20 = i11 + i19 + // i22 = 2*i20 + i19 + // i43 = ((i20 + i22) << 14 + i22 + 1) << 4 + // return 2*(_1111 + i43) + // + // Operations: 33 squares 12 multiplies + + // Allocate Temporaries. + var ( + t0 = new(Element) + t1 = new(Element) + t2 = new(Element) + t3 = new(Element) + ) + + // var t0,t1,t2,t3 Element + // Step 1: z = x^0x2 + z.Square(&x) + + // Step 2: z = x^0x3 + z.Mul(&x, z) + + // Step 4: t0 = x^0xc + t0.Square(z) + for s := 1; s < 2; s++ { + t0.Square(t0) + } + + // Step 5: z = x^0xf + z.Mul(z, t0) + + // Step 7: t0 = x^0x3c + t0.Square(z) + for s := 1; s < 2; s++ { + t0.Square(t0) + } + + // Step 8: t1 = x^0x78 + t1.Square(t0) + + // Step 9: t0 = x^0xb4 + t0.Mul(t0, t1) + + // Step 10: t2 = x^0x168 + t2.Square(t0) + + // Step 11: t1 = x^0x1e0 + t1.Mul(t1, t2) + + // Step 12: t2 = x^0x3c0 + t2.Square(t1) + + // Step 14: t3 = x^0xf00 + t3.Square(t2) + for s := 1; s < 2; s++ { + t3.Square(t3) + } + + // Step 15: t2 = x^0x12c0 + t2.Mul(t2, t3) + + // Step 18: t2 = x^0x9600 + for s := 0; s < 3; s++ { + t2.Square(t2) + } + + // Step 19: t0 = x^0x96b4 + t0.Mul(t0, t2) + + // Step 20: t1 = x^0x9894 + t1.Mul(t1, t0) + + // Step 21: t2 = x^0x13128 + t2.Square(t1) + + // Step 22: t0 = x^0x1c7dc + t0.Mul(t0, t2) + + // Step 23: t1 = x^0x26070 + t1.Mul(t1, t0) + + // Step 37: t1 = x^0x981c0000 + for s := 0; s < 14; s++ { + t1.Square(t1) + } + + // Step 38: t0 = x^0x981dc7dc + t0.Mul(t0, t1) + + // Step 39: t0 = x^0x981dc7dd + t0.Mul(&x, t0) + + // Step 43: t0 = x^0x981dc7dd0 + for s := 0; s < 4; s++ { + t0.Square(t0) + } + + // Step 44: z = x^0x981dc7ddf + z.Mul(z, t0) + + // Step 45: z = x^0x1303b8fbbe + z.Square(z) + + return z +} + +// expByLegendreExp is equivalent to z.Exp(x, 260771f77b) +// +// uses github.com/mmcloughlin/addchain v0.4.0 to generate a shorter addition chain +func (z *Element) expByLegendreExp(x Element) *Element { + // addition chain: + // + // _10 = 2*1 + // _11 = 1 + _10 + // _110 = 2*_11 + // _1001 = _11 + _110 + // _1010 = 1 + _1001 + // _10011 = _1001 + _1010 + // _11101 = _1010 + _10011 + // _100110 = _1001 + _11101 + // i30 = ((_100110 << 10 + _11101) << 2 + _11) << 8 + // i44 = ((_11101 + i30 + _10) << 6 + _11101) << 5 + // return 2*(_11101 + i44) + 1 + // + // Operations: 34 squares 13 multiplies + + // Allocate Temporaries. + var ( + t0 = new(Element) + t1 = new(Element) + t2 = new(Element) + t3 = new(Element) + ) + + // var t0,t1,t2,t3 Element + // Step 1: t0 = x^0x2 + t0.Square(&x) + + // Step 2: t1 = x^0x3 + t1.Mul(&x, t0) + + // Step 3: z = x^0x6 + z.Square(t1) + + // Step 4: t2 = x^0x9 + t2.Mul(t1, z) + + // Step 5: z = x^0xa + z.Mul(&x, t2) + + // Step 6: t3 = x^0x13 + t3.Mul(t2, z) + + // Step 7: z = x^0x1d + z.Mul(z, t3) + + // Step 8: t2 = x^0x26 + t2.Mul(t2, z) + + // Step 18: t2 = x^0x9800 + for s := 0; s < 10; s++ { + t2.Square(t2) + } + + // Step 19: t2 = x^0x981d + t2.Mul(z, t2) + + // Step 21: t2 = x^0x26074 + for s := 0; s < 2; s++ { + t2.Square(t2) + } + + // Step 22: t1 = x^0x26077 + t1.Mul(t1, t2) + + // Step 30: t1 = x^0x2607700 + for s := 0; s < 8; s++ { + t1.Square(t1) + } + + // Step 31: t1 = x^0x260771d + t1.Mul(z, t1) + + // Step 32: t0 = x^0x260771f + t0.Mul(t0, t1) + + // Step 38: t0 = x^0x981dc7c0 + for s := 0; s < 6; s++ { + t0.Square(t0) + } + + // Step 39: t0 = x^0x981dc7dd + t0.Mul(z, t0) + + // Step 44: t0 = x^0x1303b8fba0 + for s := 0; s < 5; s++ { + t0.Square(t0) + } + + // Step 45: z = x^0x1303b8fbbd + z.Mul(z, t0) + + // Step 46: z = x^0x260771f77a + z.Square(z) + + // Step 47: z = x^0x260771f77b + z.Mul(&x, z) + + return z +} diff --git a/ecc/bls12-39/fp/element_fuzz.go b/ecc/bls12-39/fp/element_fuzz.go new file mode 100644 index 0000000000..7368dc869c --- /dev/null +++ b/ecc/bls12-39/fp/element_fuzz.go @@ -0,0 +1,113 @@ +//go:build gofuzz +// +build gofuzz + +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fp + +import ( + "bytes" + "encoding/binary" + "io" + "math/big" + "math/bits" +) + +const ( + fuzzInteresting = 1 + fuzzNormal = 0 + fuzzDiscard = -1 +) + +// Fuzz arithmetic operations fuzzer +func Fuzz(data []byte) int { + r := bytes.NewReader(data) + + var e1, e2 Element + e1.SetRawBytes(r) + e2.SetRawBytes(r) + + { + // mul assembly + + var c, _c Element + a, _a, b, _b := e1, e1, e2, e2 + c.Mul(&a, &b) + _mulGeneric(&_c, &_a, &_b) + + if !c.Equal(&_c) { + panic("mul asm != mul generic on Element") + } + } + + { + // inverse + inv := e1 + inv.Inverse(&inv) + + var bInv, b1, b2 big.Int + e1.ToBigIntRegular(&b1) + bInv.ModInverse(&b1, Modulus()) + inv.ToBigIntRegular(&b2) + + if b2.Cmp(&bInv) != 0 { + panic("inverse operation doesn't match big int result") + } + } + + { + // a + -a == 0 + a, b := e1, e1 + b.Neg(&b) + a.Add(&a, &b) + if !a.IsZero() { + panic("a + -a != 0") + } + } + + return fuzzNormal + +} + +// SetRawBytes reads up to Bytes (bytes needed to represent Element) from reader +// and interpret it as big endian uint64 +// used for fuzzing purposes only +func (z *Element) SetRawBytes(r io.Reader) { + + buf := make([]byte, 8) + + for i := 0; i < len(z); i++ { + if _, err := io.ReadFull(r, buf); err != nil { + goto eof + } + z[i] = binary.BigEndian.Uint64(buf[:]) + } +eof: + z[0] %= qElement[0] + + if z.BiggerModulus() { + var b uint64 + z[0], b = bits.Sub64(z[0], qElement[0], 0) + } + + return +} + +func (z *Element) BiggerModulus() bool { + + return z[0] >= qElement[0] +} diff --git a/ecc/bls12-39/fp/element_ops_noasm.go b/ecc/bls12-39/fp/element_ops_noasm.go new file mode 100644 index 0000000000..87c6f3fe88 --- /dev/null +++ b/ecc/bls12-39/fp/element_ops_noasm.go @@ -0,0 +1,77 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fp + +// /!\ WARNING /!\ +// this code has not been audited and is provided as-is. In particular, +// there is no security guarantees such as constant time implementation +// or side-channel attack resistance +// /!\ WARNING /!\ + +// MulBy3 x *= 3 +func MulBy3(x *Element) { + var y Element + y.SetUint64(3) + x.Mul(x, &y) +} + +// MulBy5 x *= 5 +func MulBy5(x *Element) { + var y Element + y.SetUint64(5) + x.Mul(x, &y) +} + +// MulBy13 x *= 13 +func MulBy13(x *Element) { + var y Element + y.SetUint64(13) + x.Mul(x, &y) +} + +// Butterfly sets +// a = a + b +// b = a - b +func Butterfly(a, b *Element) { + _butterflyGeneric(a, b) +} + +// FromMont converts z in place (i.e. mutates) from Montgomery to regular representation +// sets and returns z = z * 1 +func fromMont(z *Element) { + _fromMontGeneric(z) +} + +func add(z, x, y *Element) { + _addGeneric(z, x, y) +} + +func double(z, x *Element) { + _doubleGeneric(z, x) +} + +func sub(z, x, y *Element) { + _subGeneric(z, x, y) +} + +func neg(z, x *Element) { + _negGeneric(z, x) +} + +func reduce(z *Element) { + _reduceGeneric(z) +} diff --git a/ecc/bls12-39/fp/element_test.go b/ecc/bls12-39/fp/element_test.go new file mode 100644 index 0000000000..9bbba5fc46 --- /dev/null +++ b/ecc/bls12-39/fp/element_test.go @@ -0,0 +1,2249 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fp + +import ( + "crypto/rand" + "encoding/json" + "fmt" + "math/big" + "math/bits" + + "testing" + + "github.com/leanovate/gopter" + ggen "github.com/leanovate/gopter/gen" + "github.com/leanovate/gopter/prop" + + "github.com/stretchr/testify/require" +) + +// ------------------------------------------------------------------------------------------------- +// benchmarks +// most benchmarks are rudimentary and should sample a large number of random inputs +// or be run multiple times to ensure it didn't measure the fastest path of the function + +var benchResElement Element + +func BenchmarkElementSelect(b *testing.B) { + var x, y Element + x.SetRandom() + y.SetRandom() + + for i := 0; i < b.N; i++ { + benchResElement.Select(i%3, &x, &y) + } +} + +func BenchmarkElementSetBytes(b *testing.B) { + var x Element + x.SetRandom() + bb := x.Bytes() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + benchResElement.SetBytes(bb[:]) + } + +} + +func BenchmarkElementMulByConstants(b *testing.B) { + b.Run("mulBy3", func(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + MulBy3(&benchResElement) + } + }) + b.Run("mulBy5", func(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + MulBy5(&benchResElement) + } + }) + b.Run("mulBy13", func(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + MulBy13(&benchResElement) + } + }) +} + +func BenchmarkElementInverse(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + benchResElement.Inverse(&x) + } + +} + +func BenchmarkElementButterfly(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + Butterfly(&x, &benchResElement) + } +} + +func BenchmarkElementExp(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b1, _ := rand.Int(rand.Reader, Modulus()) + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Exp(x, b1) + } +} + +func BenchmarkElementDouble(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Double(&benchResElement) + } +} + +func BenchmarkElementAdd(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Add(&x, &benchResElement) + } +} + +func BenchmarkElementSub(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Sub(&x, &benchResElement) + } +} + +func BenchmarkElementNeg(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Neg(&benchResElement) + } +} + +func BenchmarkElementDiv(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Div(&x, &benchResElement) + } +} + +func BenchmarkElementFromMont(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.FromMont() + } +} + +func BenchmarkElementToMont(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.ToMont() + } +} +func BenchmarkElementSquare(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Square(&benchResElement) + } +} + +func BenchmarkElementSqrt(b *testing.B) { + var a Element + a.SetUint64(4) + a.Neg(&a) + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Sqrt(&a) + } +} + +func BenchmarkElementMul(b *testing.B) { + x := Element{ + 242079875569, + } + benchResElement.SetOne() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Mul(&benchResElement, &x) + } +} + +func BenchmarkElementCmp(b *testing.B) { + x := Element{ + 242079875569, + } + benchResElement = x + benchResElement[0] = 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Cmp(&x) + } +} + +func TestElementCmp(t *testing.T) { + var x, y Element + + if x.Cmp(&y) != 0 { + t.Fatal("x == y") + } + + one := One() + y.Sub(&y, &one) + + if x.Cmp(&y) != -1 { + t.Fatal("x < y") + } + if y.Cmp(&x) != 1 { + t.Fatal("x < y") + } + + x = y + if x.Cmp(&y) != 0 { + t.Fatal("x == y") + } + + x.Sub(&x, &one) + if x.Cmp(&y) != -1 { + t.Fatal("x < y") + } + if y.Cmp(&x) != 1 { + t.Fatal("x < y") + } +} + +func TestElementNegZero(t *testing.T) { + var a, b Element + b.SetZero() + for a.IsZero() { + a.SetRandom() + } + a.Neg(&b) + if !a.IsZero() { + t.Fatal("neg(0) != 0") + } +} + +// ------------------------------------------------------------------------------------------------- +// Gopter tests +// most of them are generated with a template + +const ( + nbFuzzShort = 200 + nbFuzz = 1000 +) + +// special values to be used in tests +var staticTestValues []Element + +func init() { + staticTestValues = append(staticTestValues, Element{}) // zero + staticTestValues = append(staticTestValues, One()) // one + staticTestValues = append(staticTestValues, rSquare) // r² + var e, one Element + one.SetOne() + e.Sub(&qElement, &one) + staticTestValues = append(staticTestValues, e) // q - 1 + e.Double(&one) + staticTestValues = append(staticTestValues, e) // 2 + + { + a := qElement + a[0]-- + staticTestValues = append(staticTestValues, a) + } + staticTestValues = append(staticTestValues, Element{0}) + staticTestValues = append(staticTestValues, Element{1}) + staticTestValues = append(staticTestValues, Element{2}) + + { + a := qElement + a[0]-- + staticTestValues = append(staticTestValues, a) + } + + { + a := qElement + a[0] = 0 + staticTestValues = append(staticTestValues, a) + } + + { + a := qElement + a[0] = 0 + staticTestValues = append(staticTestValues, a) + } + +} + +func TestElementReduce(t *testing.T) { + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, s := range testValues { + expected := s + reduce(&s) + _reduceGeneric(&expected) + if !s.Equal(&expected) { + t.Fatal("reduce failed: asm and generic impl don't match") + } + } + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := genFull() + + properties.Property("reduce should output a result smaller than modulus", prop.ForAll( + func(a Element) bool { + b := a + reduce(&a) + _reduceGeneric(&b) + return !a.biggerOrEqualModulus() && a.Equal(&b) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestElementEqual(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("x.Equal(&y) iff x == y; likely false for random pairs", prop.ForAll( + func(a testPairElement, b testPairElement) bool { + return a.element.Equal(&b.element) == (a.element == b.element) + }, + genA, + genB, + )) + + properties.Property("x.Equal(&y) if x == y", prop.ForAll( + func(a testPairElement) bool { + b := a.element + return a.element.Equal(&b) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBytes(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("SetBytes(Bytes()) should stay constant", prop.ForAll( + func(a testPairElement) bool { + var b Element + bytes := a.element.Bytes() + b.SetBytes(bytes[:]) + return a.element.Equal(&b) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementInverseExp(t *testing.T) { + // inverse must be equal to exp^-2 + exp := Modulus() + exp.Sub(exp, new(big.Int).SetUint64(2)) + + invMatchExp := func(a testPairElement) bool { + var b Element + b.Set(&a.element) + a.element.Inverse(&a.element) + b.Exp(b, exp) + + return a.element.Equal(&b) + } + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + properties := gopter.NewProperties(parameters) + genA := gen() + properties.Property("inv == exp^-2", prop.ForAll(invMatchExp, genA)) + properties.TestingRun(t, gopter.ConsoleReporter(false)) + + parameters.MinSuccessfulTests = 1 + properties = gopter.NewProperties(parameters) + properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementMulByConstants(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + implemented := []uint8{0, 1, 2, 3, 5, 13} + properties.Property("mulByConstant", prop.ForAll( + func(a testPairElement) bool { + for _, c := range implemented { + var constant Element + constant.SetUint64(uint64(c)) + + b := a.element + b.Mul(&b, &constant) + + aa := a.element + mulByConstant(&aa, c) + + if !aa.Equal(&b) { + return false + } + } + + return true + }, + genA, + )) + + properties.Property("MulBy3(x) == Mul(x, 3)", prop.ForAll( + func(a testPairElement) bool { + var constant Element + constant.SetUint64(3) + + b := a.element + b.Mul(&b, &constant) + + MulBy3(&a.element) + + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("MulBy5(x) == Mul(x, 5)", prop.ForAll( + func(a testPairElement) bool { + var constant Element + constant.SetUint64(5) + + b := a.element + b.Mul(&b, &constant) + + MulBy5(&a.element) + + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("MulBy13(x) == Mul(x, 13)", prop.ForAll( + func(a testPairElement) bool { + var constant Element + constant.SetUint64(13) + + b := a.element + b.Mul(&b, &constant) + + MulBy13(&a.element) + + return a.element.Equal(&b) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestElementLegendre(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("legendre should output same result than big.Int.Jacobi", prop.ForAll( + func(a testPairElement) bool { + return a.element.Legendre() == big.Jacobi(&a.bigint, Modulus()) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestElementButterflies(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("butterfly0 == a -b; a +b", prop.ForAll( + func(a, b testPairElement) bool { + a0, b0 := a.element, b.element + + _butterflyGeneric(&a.element, &b.element) + Butterfly(&a0, &b0) + + return a.element.Equal(&a0) && b.element.Equal(&b0) + }, + genA, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestElementLexicographicallyLargest(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("element.Cmp should match LexicographicallyLargest output", prop.ForAll( + func(a testPairElement) bool { + var negA Element + negA.Neg(&a.element) + + cmpResult := a.element.Cmp(&negA) + lResult := a.element.LexicographicallyLargest() + + if lResult && cmpResult == 1 { + return true + } + if !lResult && cmpResult != 1 { + return true + } + return false + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestElementAdd(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("Add: having the receiver as operand should output the same result", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + d.Set(&a.element) + + c.Add(&a.element, &b.element) + a.element.Add(&a.element, &b.element) + b.element.Add(&d, &b.element) + + return a.element.Equal(&b.element) && a.element.Equal(&c) && b.element.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("Add: operation result must match big.Int result", prop.ForAll( + func(a, b testPairElement) bool { + { + var c Element + + c.Add(&a.element, &b.element) + + var d, e big.Int + d.Add(&a.bigint, &b.bigint).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + + // fixed elements + // a is random + // r takes special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, r := range testValues { + var d, e, rb big.Int + r.ToBigIntRegular(&rb) + + var c Element + c.Add(&a.element, &r) + d.Add(&a.bigint, &rb).Mod(&d, Modulus()) + + // checking generic impl against asm path + var cGeneric Element + _addGeneric(&cGeneric, &a.element, &r) + if !cGeneric.Equal(&c) { + // need to give context to failing error. + return false + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + return true + }, + genA, + genB, + )) + + properties.Property("Add: operation result must be smaller than modulus", prop.ForAll( + func(a, b testPairElement) bool { + var c Element + + c.Add(&a.element, &b.element) + + return !c.biggerOrEqualModulus() + }, + genA, + genB, + )) + + properties.Property("Add: assembly implementation must be consistent with generic one", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + c.Add(&a.element, &b.element) + _addGeneric(&d, &a.element, &b.element) + return c.Equal(&d) + }, + genA, + genB, + )) + + specialValueTest := func() { + // test special values against special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + for _, b := range testValues { + + var bBig, d, e big.Int + b.ToBigIntRegular(&bBig) + + var c Element + c.Add(&a, &b) + d.Add(&aBig, &bBig).Mod(&d, Modulus()) + + // checking asm against generic impl + var cGeneric Element + _addGeneric(&cGeneric, &a, &b) + if !cGeneric.Equal(&c) { + t.Fatal("Add failed special test values: asm and generic impl don't match") + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Add failed special test values") + } + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementSub(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("Sub: having the receiver as operand should output the same result", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + d.Set(&a.element) + + c.Sub(&a.element, &b.element) + a.element.Sub(&a.element, &b.element) + b.element.Sub(&d, &b.element) + + return a.element.Equal(&b.element) && a.element.Equal(&c) && b.element.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("Sub: operation result must match big.Int result", prop.ForAll( + func(a, b testPairElement) bool { + { + var c Element + + c.Sub(&a.element, &b.element) + + var d, e big.Int + d.Sub(&a.bigint, &b.bigint).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + + // fixed elements + // a is random + // r takes special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, r := range testValues { + var d, e, rb big.Int + r.ToBigIntRegular(&rb) + + var c Element + c.Sub(&a.element, &r) + d.Sub(&a.bigint, &rb).Mod(&d, Modulus()) + + // checking generic impl against asm path + var cGeneric Element + _subGeneric(&cGeneric, &a.element, &r) + if !cGeneric.Equal(&c) { + // need to give context to failing error. + return false + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + return true + }, + genA, + genB, + )) + + properties.Property("Sub: operation result must be smaller than modulus", prop.ForAll( + func(a, b testPairElement) bool { + var c Element + + c.Sub(&a.element, &b.element) + + return !c.biggerOrEqualModulus() + }, + genA, + genB, + )) + + properties.Property("Sub: assembly implementation must be consistent with generic one", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + c.Sub(&a.element, &b.element) + _subGeneric(&d, &a.element, &b.element) + return c.Equal(&d) + }, + genA, + genB, + )) + + specialValueTest := func() { + // test special values against special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + for _, b := range testValues { + + var bBig, d, e big.Int + b.ToBigIntRegular(&bBig) + + var c Element + c.Sub(&a, &b) + d.Sub(&aBig, &bBig).Mod(&d, Modulus()) + + // checking asm against generic impl + var cGeneric Element + _subGeneric(&cGeneric, &a, &b) + if !cGeneric.Equal(&c) { + t.Fatal("Sub failed special test values: asm and generic impl don't match") + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Sub failed special test values") + } + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementMul(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("Mul: having the receiver as operand should output the same result", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + d.Set(&a.element) + + c.Mul(&a.element, &b.element) + a.element.Mul(&a.element, &b.element) + b.element.Mul(&d, &b.element) + + return a.element.Equal(&b.element) && a.element.Equal(&c) && b.element.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("Mul: operation result must match big.Int result", prop.ForAll( + func(a, b testPairElement) bool { + { + var c Element + + c.Mul(&a.element, &b.element) + + var d, e big.Int + d.Mul(&a.bigint, &b.bigint).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + + // fixed elements + // a is random + // r takes special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, r := range testValues { + var d, e, rb big.Int + r.ToBigIntRegular(&rb) + + var c Element + c.Mul(&a.element, &r) + d.Mul(&a.bigint, &rb).Mod(&d, Modulus()) + + // checking generic impl against asm path + var cGeneric Element + _mulGeneric(&cGeneric, &a.element, &r) + if !cGeneric.Equal(&c) { + // need to give context to failing error. + return false + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + return true + }, + genA, + genB, + )) + + properties.Property("Mul: operation result must be smaller than modulus", prop.ForAll( + func(a, b testPairElement) bool { + var c Element + + c.Mul(&a.element, &b.element) + + return !c.biggerOrEqualModulus() + }, + genA, + genB, + )) + + properties.Property("Mul: assembly implementation must be consistent with generic one", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + c.Mul(&a.element, &b.element) + _mulGeneric(&d, &a.element, &b.element) + return c.Equal(&d) + }, + genA, + genB, + )) + + specialValueTest := func() { + // test special values against special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + for _, b := range testValues { + + var bBig, d, e big.Int + b.ToBigIntRegular(&bBig) + + var c Element + c.Mul(&a, &b) + d.Mul(&aBig, &bBig).Mod(&d, Modulus()) + + // checking asm against generic impl + var cGeneric Element + _mulGeneric(&cGeneric, &a, &b) + if !cGeneric.Equal(&c) { + t.Fatal("Mul failed special test values: asm and generic impl don't match") + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Mul failed special test values") + } + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementDiv(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("Div: having the receiver as operand should output the same result", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + d.Set(&a.element) + + c.Div(&a.element, &b.element) + a.element.Div(&a.element, &b.element) + b.element.Div(&d, &b.element) + + return a.element.Equal(&b.element) && a.element.Equal(&c) && b.element.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("Div: operation result must match big.Int result", prop.ForAll( + func(a, b testPairElement) bool { + { + var c Element + + c.Div(&a.element, &b.element) + + var d, e big.Int + d.ModInverse(&b.bigint, Modulus()) + d.Mul(&d, &a.bigint).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + + // fixed elements + // a is random + // r takes special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, r := range testValues { + var d, e, rb big.Int + r.ToBigIntRegular(&rb) + + var c Element + c.Div(&a.element, &r) + d.ModInverse(&rb, Modulus()) + d.Mul(&d, &a.bigint).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + return true + }, + genA, + genB, + )) + + properties.Property("Div: operation result must be smaller than modulus", prop.ForAll( + func(a, b testPairElement) bool { + var c Element + + c.Div(&a.element, &b.element) + + return !c.biggerOrEqualModulus() + }, + genA, + genB, + )) + + specialValueTest := func() { + // test special values against special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + for _, b := range testValues { + + var bBig, d, e big.Int + b.ToBigIntRegular(&bBig) + + var c Element + c.Div(&a, &b) + d.ModInverse(&bBig, Modulus()) + d.Mul(&d, &aBig).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Div failed special test values") + } + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementExp(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("Exp: having the receiver as operand should output the same result", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + d.Set(&a.element) + + c.Exp(a.element, &b.bigint) + a.element.Exp(a.element, &b.bigint) + b.element.Exp(d, &b.bigint) + + return a.element.Equal(&b.element) && a.element.Equal(&c) && b.element.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("Exp: operation result must match big.Int result", prop.ForAll( + func(a, b testPairElement) bool { + { + var c Element + + c.Exp(a.element, &b.bigint) + + var d, e big.Int + d.Exp(&a.bigint, &b.bigint, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + + // fixed elements + // a is random + // r takes special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, r := range testValues { + var d, e, rb big.Int + r.ToBigIntRegular(&rb) + + var c Element + c.Exp(a.element, &rb) + d.Exp(&a.bigint, &rb, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + return true + }, + genA, + genB, + )) + + properties.Property("Exp: operation result must be smaller than modulus", prop.ForAll( + func(a, b testPairElement) bool { + var c Element + + c.Exp(a.element, &b.bigint) + + return !c.biggerOrEqualModulus() + }, + genA, + genB, + )) + + specialValueTest := func() { + // test special values against special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + for _, b := range testValues { + + var bBig, d, e big.Int + b.ToBigIntRegular(&bBig) + + var c Element + c.Exp(a, &bBig) + d.Exp(&aBig, &bBig, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Exp failed special test values") + } + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementSquare(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Square: having the receiver as operand should output the same result", prop.ForAll( + func(a testPairElement) bool { + + var b Element + + b.Square(&a.element) + a.element.Square(&a.element) + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("Square: operation result must match big.Int result", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Square(&a.element) + + var d, e big.Int + d.Mul(&a.bigint, &a.bigint).Mod(&d, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, + )) + + properties.Property("Square: operation result must be smaller than modulus", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Square(&a.element) + return !c.biggerOrEqualModulus() + }, + genA, + )) + + specialValueTest := func() { + // test special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + var c Element + c.Square(&a) + + var d, e big.Int + d.Mul(&aBig, &aBig).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Square failed special test values") + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementInverse(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Inverse: having the receiver as operand should output the same result", prop.ForAll( + func(a testPairElement) bool { + + var b Element + + b.Inverse(&a.element) + a.element.Inverse(&a.element) + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("Inverse: operation result must match big.Int result", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Inverse(&a.element) + + var d, e big.Int + d.ModInverse(&a.bigint, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, + )) + + properties.Property("Inverse: operation result must be smaller than modulus", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Inverse(&a.element) + return !c.biggerOrEqualModulus() + }, + genA, + )) + + specialValueTest := func() { + // test special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + var c Element + c.Inverse(&a) + + var d, e big.Int + d.ModInverse(&aBig, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Inverse failed special test values") + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementSqrt(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Sqrt: having the receiver as operand should output the same result", prop.ForAll( + func(a testPairElement) bool { + + b := a.element + + b.Sqrt(&a.element) + a.element.Sqrt(&a.element) + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("Sqrt: operation result must match big.Int result", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Sqrt(&a.element) + + var d, e big.Int + d.ModSqrt(&a.bigint, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, + )) + + properties.Property("Sqrt: operation result must be smaller than modulus", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Sqrt(&a.element) + return !c.biggerOrEqualModulus() + }, + genA, + )) + + specialValueTest := func() { + // test special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + var c Element + c.Sqrt(&a) + + var d, e big.Int + d.ModSqrt(&aBig, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Sqrt failed special test values") + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementDouble(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Double: having the receiver as operand should output the same result", prop.ForAll( + func(a testPairElement) bool { + + var b Element + + b.Double(&a.element) + a.element.Double(&a.element) + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("Double: operation result must match big.Int result", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Double(&a.element) + + var d, e big.Int + d.Lsh(&a.bigint, 1).Mod(&d, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, + )) + + properties.Property("Double: operation result must be smaller than modulus", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Double(&a.element) + return !c.biggerOrEqualModulus() + }, + genA, + )) + + properties.Property("Double: assembly implementation must be consistent with generic one", prop.ForAll( + func(a testPairElement) bool { + var c, d Element + c.Double(&a.element) + _doubleGeneric(&d, &a.element) + return c.Equal(&d) + }, + genA, + )) + + specialValueTest := func() { + // test special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + var c Element + c.Double(&a) + + var d, e big.Int + d.Lsh(&aBig, 1).Mod(&d, Modulus()) + + // checking asm against generic impl + var cGeneric Element + _doubleGeneric(&cGeneric, &a) + if !cGeneric.Equal(&c) { + t.Fatal("Double failed special test values: asm and generic impl don't match") + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Double failed special test values") + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementNeg(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Neg: having the receiver as operand should output the same result", prop.ForAll( + func(a testPairElement) bool { + + var b Element + + b.Neg(&a.element) + a.element.Neg(&a.element) + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("Neg: operation result must match big.Int result", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Neg(&a.element) + + var d, e big.Int + d.Neg(&a.bigint).Mod(&d, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, + )) + + properties.Property("Neg: operation result must be smaller than modulus", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Neg(&a.element) + return !c.biggerOrEqualModulus() + }, + genA, + )) + + properties.Property("Neg: assembly implementation must be consistent with generic one", prop.ForAll( + func(a testPairElement) bool { + var c, d Element + c.Neg(&a.element) + _negGeneric(&d, &a.element) + return c.Equal(&d) + }, + genA, + )) + + specialValueTest := func() { + // test special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + var c Element + c.Neg(&a) + + var d, e big.Int + d.Neg(&aBig).Mod(&d, Modulus()) + + // checking asm against generic impl + var cGeneric Element + _negGeneric(&cGeneric, &a) + if !cGeneric.Equal(&c) { + t.Fatal("Neg failed special test values: asm and generic impl don't match") + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Neg failed special test values") + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementFixedExp(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + var ( + _bLegendreExponentElement *big.Int + _bSqrtExponentElement *big.Int + ) + + _bLegendreExponentElement, _ = new(big.Int).SetString("260771f77b", 16) + const sqrtExponentElement = "1303b8fbbe" + _bSqrtExponentElement, _ = new(big.Int).SetString(sqrtExponentElement, 16) + + genA := gen() + + properties.Property(fmt.Sprintf("expBySqrtExp must match Exp(%s)", sqrtExponentElement), prop.ForAll( + func(a testPairElement) bool { + c := a.element + d := a.element + c.expBySqrtExp(c) + d.Exp(d, _bSqrtExponentElement) + return c.Equal(&d) + }, + genA, + )) + + properties.Property("expByLegendreExp must match Exp(260771f77b)", prop.ForAll( + func(a testPairElement) bool { + c := a.element + d := a.element + c.expByLegendreExp(c) + d.Exp(d, _bLegendreExponentElement) + return c.Equal(&d) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementHalve(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + var twoInv Element + twoInv.SetUint64(2) + twoInv.Inverse(&twoInv) + + properties.Property("z.Halve must match z / 2", prop.ForAll( + func(a testPairElement) bool { + c := a.element + d := a.element + c.Halve() + d.Mul(&d, &twoInv) + return c.Equal(&d) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func combineSelectionArguments(c int64, z int8) int { + if z%3 == 0 { + return 0 + } + return int(c) +} + +func TestElementSelect(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := genFull() + genB := genFull() + genC := ggen.Int64() //the condition + genZ := ggen.Int8() //to make zeros artificially more likely + + properties.Property("Select: must select correctly", prop.ForAll( + func(a, b Element, cond int64, z int8) bool { + condC := combineSelectionArguments(cond, z) + + var c Element + c.Select(condC, &a, &b) + + if condC == 0 { + return c.Equal(&a) + } + return c.Equal(&b) + }, + genA, + genB, + genC, + genZ, + )) + + properties.Property("Select: having the receiver as operand should output the same result", prop.ForAll( + func(a, b Element, cond int64, z int8) bool { + condC := combineSelectionArguments(cond, z) + + var c, d Element + d.Set(&a) + c.Select(condC, &a, &b) + a.Select(condC, &a, &b) + b.Select(condC, &d, &b) + return a.Equal(&b) && a.Equal(&c) && b.Equal(&c) + }, + genA, + genB, + genC, + genZ, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementSetInt64(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("z.SetInt64 must match z.SetString", prop.ForAll( + func(a testPairElement, v int64) bool { + c := a.element + d := a.element + + c.SetInt64(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, ggen.Int64(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementSetInterface(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genInt := ggen.Int + genInt8 := ggen.Int8 + genInt16 := ggen.Int16 + genInt32 := ggen.Int32 + genInt64 := ggen.Int64 + + genUint := ggen.UInt + genUint8 := ggen.UInt8 + genUint16 := ggen.UInt16 + genUint32 := ggen.UInt32 + genUint64 := ggen.UInt64 + + properties.Property("z.SetInterface must match z.SetString with int8", prop.ForAll( + func(a testPairElement, v int8) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genInt8(), + )) + + properties.Property("z.SetInterface must match z.SetString with int16", prop.ForAll( + func(a testPairElement, v int16) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genInt16(), + )) + + properties.Property("z.SetInterface must match z.SetString with int32", prop.ForAll( + func(a testPairElement, v int32) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genInt32(), + )) + + properties.Property("z.SetInterface must match z.SetString with int64", prop.ForAll( + func(a testPairElement, v int64) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genInt64(), + )) + + properties.Property("z.SetInterface must match z.SetString with int", prop.ForAll( + func(a testPairElement, v int) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genInt(), + )) + + properties.Property("z.SetInterface must match z.SetString with uint8", prop.ForAll( + func(a testPairElement, v uint8) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genUint8(), + )) + + properties.Property("z.SetInterface must match z.SetString with uint16", prop.ForAll( + func(a testPairElement, v uint16) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genUint16(), + )) + + properties.Property("z.SetInterface must match z.SetString with uint32", prop.ForAll( + func(a testPairElement, v uint32) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genUint32(), + )) + + properties.Property("z.SetInterface must match z.SetString with uint64", prop.ForAll( + func(a testPairElement, v uint64) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genUint64(), + )) + + properties.Property("z.SetInterface must match z.SetString with uint", prop.ForAll( + func(a testPairElement, v uint) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genUint(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementFromMont(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Assembly implementation must be consistent with generic one", prop.ForAll( + func(a testPairElement) bool { + c := a.element + d := a.element + c.FromMont() + _fromMontGeneric(&d) + return c.Equal(&d) + }, + genA, + )) + + properties.Property("x.FromMont().ToMont() == x", prop.ForAll( + func(a testPairElement) bool { + c := a.element + c.FromMont().ToMont() + return c.Equal(&a.element) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementJSON(t *testing.T) { + assert := require.New(t) + + type S struct { + A Element + B [3]Element + C *Element + D *Element + } + + // encode to JSON + var s S + s.A.SetString("-1") + s.B[2].SetUint64(42) + s.D = new(Element).SetUint64(8000) + + encoded, err := json.Marshal(&s) + assert.NoError(err) + // since our modulus is on 1 word, we may need to adjust "42" and "8000" values; + formatValue := func(v int64) string { + const maxUint16 = 65535 + var a, aNeg big.Int + a.SetInt64(v) + a.Mod(&a, Modulus()) + aNeg.Neg(&a).Mod(&aNeg, Modulus()) + fmt.Println("aNeg", aNeg.Text(10)) + if aNeg.Uint64() != 0 && aNeg.Uint64() <= maxUint16 { + return "-" + aNeg.Text(10) + } + return a.Text(10) + } + expected := fmt.Sprintf("{\"A\":-1,\"B\":[0,0,%s],\"C\":null,\"D\":%s}", formatValue(42), formatValue(8000)) + assert.Equal(expected, string(encoded)) + + // decode valid + var decoded S + err = json.Unmarshal([]byte(expected), &decoded) + assert.NoError(err) + + assert.Equal(s, decoded, "element -> json -> element round trip failed") + + // decode hex and string values + withHexValues := "{\"A\":\"-1\",\"B\":[0,\"0x00000\",\"0x2A\"],\"C\":null,\"D\":\"8000\"}" + + var decodedS S + err = json.Unmarshal([]byte(withHexValues), &decodedS) + assert.NoError(err) + + assert.Equal(s, decodedS, " json with strings -> element failed") + +} + +type testPairElement struct { + element Element + bigint big.Int +} + +func (z *Element) biggerOrEqualModulus() bool { + + return z[0] >= qElement[0] +} + +func gen() gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + var g testPairElement + + g.element = Element{ + genParams.NextUint64(), + } + if qElement[0] != ^uint64(0) { + g.element[0] %= (qElement[0] + 1) + } + + for g.element.biggerOrEqualModulus() { + g.element = Element{ + genParams.NextUint64(), + } + if qElement[0] != ^uint64(0) { + g.element[0] %= (qElement[0] + 1) + } + } + + g.element.ToBigIntRegular(&g.bigint) + genResult := gopter.NewGenResult(g, gopter.NoShrinker) + return genResult + } +} + +func genFull() gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + + genRandomFq := func() Element { + var g Element + + g = Element{ + genParams.NextUint64(), + } + + if qElement[0] != ^uint64(0) { + g[0] %= (qElement[0] + 1) + } + + for g.biggerOrEqualModulus() { + g = Element{ + genParams.NextUint64(), + } + if qElement[0] != ^uint64(0) { + g[0] %= (qElement[0] + 1) + } + } + + return g + } + a := genRandomFq() + + var carry uint64 + a[0], _ = bits.Add64(a[0], qElement[0], carry) + + genResult := gopter.NewGenResult(a, gopter.NoShrinker) + return genResult + } +} diff --git a/ecc/bls12-39/fr/arith.go b/ecc/bls12-39/fr/arith.go new file mode 100644 index 0000000000..79c1b8ed60 --- /dev/null +++ b/ecc/bls12-39/fr/arith.go @@ -0,0 +1,30 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fr + +import ( + "math/bits" +) + +// madd0 hi = a*b + c (discards lo bits) +func madd0(a, b, c uint64) (hi uint64) { + var carry, lo uint64 + hi, lo = bits.Mul64(a, b) + _, carry = bits.Add64(lo, c, 0) + hi, _ = bits.Add64(hi, 0, carry) + return +} diff --git a/ecc/bls12-39/fr/doc.go b/ecc/bls12-39/fr/doc.go new file mode 100644 index 0000000000..4f54d8df07 --- /dev/null +++ b/ecc/bls12-39/fr/doc.go @@ -0,0 +1,43 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +// Package fr contains field arithmetic operations for modulus = 0x5f5b9f1. +// +// The API is similar to math/big (big.Int), but the operations are significantly faster (up to 20x for the modular multiplication on amd64, see also https://hackmd.io/@gnark/modular_multiplication) +// +// The modulus is hardcoded in all the operations. +// +// Field elements are represented as an array, and assumed to be in Montgomery form in all methods: +// type Element [1]uint64 +// +// Example API signature +// // Mul z = x * y mod q +// func (z *Element) Mul(x, y *Element) *Element +// +// and can be used like so: +// var a, b Element +// a.SetUint64(2) +// b.SetString("984896738") +// a.Mul(a, b) +// a.Sub(a, a) +// .Add(a, b) +// .Inv(a) +// b.Exp(b, new(big.Int).SetUint64(42)) +// +// Modulus +// 0x5f5b9f1 // base 16 +// 99990001 // base 10 +package fr diff --git a/ecc/bls12-39/fr/element.go b/ecc/bls12-39/fr/element.go new file mode 100644 index 0000000000..1cbce93d50 --- /dev/null +++ b/ecc/bls12-39/fr/element.go @@ -0,0 +1,964 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fr + +// /!\ WARNING /!\ +// this code has not been audited and is provided as-is. In particular, +// there is no security guarantees such as constant time implementation +// or side-channel attack resistance +// /!\ WARNING /!\ + +import ( + "crypto/rand" + "encoding/binary" + "errors" + "io" + "math/big" + "math/bits" + "reflect" + "strconv" + "strings" + "sync" +) + +// Element represents a field element stored on 1 words (uint64) +// Element are assumed to be in Montgomery form in all methods +// field modulus q = +// +// 99990001 +type Element [1]uint64 + +// Limbs number of 64 bits words needed to represent Element +const Limbs = 1 + +// Bits number bits needed to represent Element +const Bits = 27 + +// Bytes number bytes needed to represent Element +const Bytes = Limbs * 8 + +// field modulus stored as big.Int +var _modulus big.Int + +// Modulus returns q as a big.Int +// q = +// +// 99990001 +func Modulus() *big.Int { + return new(big.Int).Set(&_modulus) +} + +// q (modulus) +const qElementWord0 uint64 = 99990001 +const q uint64 = qElementWord0 + +var qElement = Element{ + qElementWord0, +} + +// rSquare +var rSquare = Element{ + 96814941, +} + +var bigIntPool = sync.Pool{ + New: func() interface{} { + return new(big.Int) + }, +} + +func init() { + // base10: 99990001 + _modulus.SetString("5f5b9f1", 16) +} + +// NewElement returns a new Element from a uint64 value +// +// it is equivalent to +// var v NewElement +// v.SetUint64(...) +func NewElement(v uint64) Element { + z := Element{v} + z.Mul(&z, &rSquare) + return z +} + +// SetUint64 sets z to v and returns z +func (z *Element) SetUint64(v uint64) *Element { + // sets z LSB to v (non-Montgomery form) and convert z to Montgomery form + *z = Element{v} + return z.Mul(z, &rSquare) // z.ToMont() +} + +// SetInt64 sets z to v and returns z +func (z *Element) SetInt64(v int64) *Element { + + // absolute value of v + m := v >> 63 + z.SetUint64(uint64((v ^ m) - m)) + + if m != 0 { + // v is negative + z.Neg(z) + } + + return z +} + +// Set z = x +func (z *Element) Set(x *Element) *Element { + z[0] = x[0] + return z +} + +// SetInterface converts provided interface into Element +// returns an error if provided type is not supported +// supported types: Element, *Element, uint64, int, string (interpreted as base10 integer), +// *big.Int, big.Int, []byte +func (z *Element) SetInterface(i1 interface{}) (*Element, error) { + switch c1 := i1.(type) { + case Element: + return z.Set(&c1), nil + case *Element: + if c1 == nil { + return nil, errors.New("can't set fr.Element with ") + } + return z.Set(c1), nil + case uint8: + return z.SetUint64(uint64(c1)), nil + case uint16: + return z.SetUint64(uint64(c1)), nil + case uint32: + return z.SetUint64(uint64(c1)), nil + case uint: + return z.SetUint64(uint64(c1)), nil + case uint64: + return z.SetUint64(c1), nil + case int8: + return z.SetInt64(int64(c1)), nil + case int16: + return z.SetInt64(int64(c1)), nil + case int32: + return z.SetInt64(int64(c1)), nil + case int64: + return z.SetInt64(c1), nil + case int: + return z.SetInt64(int64(c1)), nil + case string: + return z.SetString(c1), nil + case *big.Int: + if c1 == nil { + return nil, errors.New("can't set fr.Element with ") + } + return z.SetBigInt(c1), nil + case big.Int: + return z.SetBigInt(&c1), nil + case []byte: + return z.SetBytes(c1), nil + default: + return nil, errors.New("can't set fr.Element from type " + reflect.TypeOf(i1).String()) + } +} + +// SetZero z = 0 +func (z *Element) SetZero() *Element { + z[0] = 0 + return z +} + +// SetOne z = 1 (in Montgomery form) +func (z *Element) SetOne() *Element { + z[0] = 98464136 + return z +} + +// Div z = x*y^-1 mod q +func (z *Element) Div(x, y *Element) *Element { + var yInv Element + yInv.Inverse(y) + z.Mul(x, &yInv) + return z +} + +// Bit returns the i'th bit, with lsb == bit 0. +// It is the responsibility of the caller to convert from Montgomery to Regular form if needed +func (z *Element) Bit(i uint64) uint64 { + j := i / 64 + if j >= 1 { + return 0 + } + return uint64(z[j] >> (i % 64) & 1) +} + +// Equal returns z == x; constant-time +func (z *Element) Equal(x *Element) bool { + return z.NotEqual(x) == 0 +} + +// NotEqual returns 0 if and only if z == x; constant-time +func (z *Element) NotEqual(x *Element) uint64 { + return (z[0] ^ x[0]) +} + +// IsZero returns z == 0 +func (z *Element) IsZero() bool { + return (z[0]) == 0 +} + +// IsOne returns z == 1 +func (z *Element) IsOne() bool { + return z[0] == 98464136 +} + +// IsUint64 reports whether z can be represented as an uint64. +func (z *Element) IsUint64() bool { + return true +} + +// Uint64 returns the uint64 representation of x. If x cannot be represented in a uint64, the result is undefined. +func (z *Element) Uint64() uint64 { + zz := *z + zz.FromMont() + return zz[0] +} + +// FitsOnOneWord reports whether z words (except the least significant word) are 0 +func (z *Element) FitsOnOneWord() bool { + return true +} + +// Cmp compares (lexicographic order) z and x and returns: +// +// -1 if z < x +// 0 if z == x +// +1 if z > x +// +func (z *Element) Cmp(x *Element) int { + _z := *z + _x := *x + _z.FromMont() + _x.FromMont() + if _z[0] > _x[0] { + return 1 + } else if _z[0] < _x[0] { + return -1 + } + return 0 +} + +// LexicographicallyLargest returns true if this element is strictly lexicographically +// larger than its negation, false otherwise +func (z *Element) LexicographicallyLargest() bool { + // adapted from github.com/zkcrypto/bls12_381 + // we check if the element is larger than (q-1) / 2 + // if z - (((q -1) / 2) + 1) have no underflow, then z > (q-1) / 2 + + _z := *z + _z.FromMont() + + var b uint64 + _, b = bits.Sub64(_z[0], 49995001, 0) + + return b == 0 +} + +// SetRandom sets z to a random element < q +func (z *Element) SetRandom() (*Element, error) { + var bytes [8]byte + if _, err := io.ReadFull(rand.Reader, bytes[:]); err != nil { + return nil, err + } + z[0] = binary.BigEndian.Uint64(bytes[0:8]) + z[0] %= 99990001 + + return z, nil +} + +// One returns 1 (in montgommery form) +func One() Element { + var one Element + one.SetOne() + return one +} + +// Halve sets z to z / 2 (mod p) +func (z *Element) Halve() { + + if z[0]&1 == 1 { + // z = z + q + z[0], _ = bits.Add64(z[0], 99990001, 0) + + } + // z = z >> 1 + z[0] >>= 1 + +} + +// Mul z = x * y mod q +// see https://hackmd.io/@gnark/modular_multiplication +func (z *Element) Mul(x, y *Element) *Element { + + // CIOS multiplication + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 14492505108528883951 + + var r uint64 + hi, lo := bits.Mul64(x[0], y[0]) + m := lo * qInvNegLsw + hi2, lo2 := bits.Mul64(m, q) + _, carry := bits.Add64(lo2, lo, 0) + r, carry = bits.Add64(hi2, hi, carry) + + if carry != 0 || r >= q { + // we need to reduce + r -= q + + } + z[0] = r + + return z +} + +// Square z = x * x mod q +// see https://hackmd.io/@gnark/modular_multiplication +func (z *Element) Square(x *Element) *Element { + + // CIOS multiplication + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 14492505108528883951 + + var r uint64 + hi, lo := bits.Mul64(x[0], x[0]) + m := lo * qInvNegLsw + hi2, lo2 := bits.Mul64(m, q) + _, carry := bits.Add64(lo2, lo, 0) + r, carry = bits.Add64(hi2, hi, carry) + + if carry != 0 || r >= q { + // we need to reduce + r -= q + + } + z[0] = r + + return z +} + +// FromMont converts z in place (i.e. mutates) from Montgomery to regular representation +// sets and returns z = z * 1 +func (z *Element) FromMont() *Element { + fromMont(z) + return z +} + +// Add z = x + y mod q +func (z *Element) Add(x, y *Element) *Element { + add(z, x, y) + return z +} + +// Double z = x + x mod q, aka Lsh 1 +func (z *Element) Double(x *Element) *Element { + double(z, x) + return z +} + +// Sub z = x - y mod q +func (z *Element) Sub(x, y *Element) *Element { + sub(z, x, y) + return z +} + +// Neg z = q - x +func (z *Element) Neg(x *Element) *Element { + neg(z, x) + return z +} + +// Select is a constant-time conditional move. +// If c=0, z = x0. Else z = x1 +func (z *Element) Select(c int, x0 *Element, x1 *Element) *Element { + cC := uint64((int64(c) | -int64(c)) >> 63) // "canonicized" into: 0 if c=0, -1 otherwise + z[0] = x0[0] ^ cC&(x0[0]^x1[0]) + return z +} + +// Generic (no ADX instructions, no AMD64) versions of multiplication and squaring algorithms + +func _mulGeneric(z, x, y *Element) { + + // CIOS multiplication + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 14492505108528883951 + + var r uint64 + hi, lo := bits.Mul64(x[0], y[0]) + m := lo * qInvNegLsw + hi2, lo2 := bits.Mul64(m, q) + _, carry := bits.Add64(lo2, lo, 0) + r, carry = bits.Add64(hi2, hi, carry) + + if carry != 0 || r >= q { + // we need to reduce + r -= q + + } + z[0] = r + +} + +func _fromMontGeneric(z *Element) { + // the following lines implement z = z * 1 + // with a modified CIOS montgomery multiplication + { + // m = z[0]n'[0] mod W + m := z[0] * 14492505108528883951 + C := madd0(m, 99990001, z[0]) + z[0] = C + } + + // if z >= q → z -= q + // note: this is NOT constant time + if z[0] >= q { + z[0] -= q + } +} + +func _addGeneric(z, x, y *Element) { + + z[0], _ = bits.Add64(x[0], y[0], 0) + if z[0] >= q { + z[0] -= q + } +} + +func _doubleGeneric(z, x *Element) { + if x[0]&(1<<63) == (1 << 63) { + // if highest bit is set, then we have a carry to x + x, we shift and subtract q + z[0] = (x[0] << 1) - q + } else { + // highest bit is not set, but x + x can still be >= q + z[0] = (x[0] << 1) + if z[0] >= q { + z[0] -= q + } + } +} + +func _subGeneric(z, x, y *Element) { + var b uint64 + z[0], b = bits.Sub64(x[0], y[0], 0) + if b != 0 { + z[0] += q + } +} + +func _negGeneric(z, x *Element) { + if x.IsZero() { + z.SetZero() + return + } + z[0] = q - x[0] +} + +func _reduceGeneric(z *Element) { + + // if z >= q → z -= q + // note: this is NOT constant time + if z[0] >= q { + z[0] -= q + } +} + +func mulByConstant(z *Element, c uint8) { + switch c { + case 0: + z.SetZero() + return + case 1: + return + case 2: + z.Double(z) + return + case 3: + _z := *z + z.Double(z).Add(z, &_z) + case 5: + _z := *z + z.Double(z).Double(z).Add(z, &_z) + case 11: + _z := *z + z.Double(z).Double(z).Add(z, &_z).Double(z).Add(z, &_z) + default: + var y Element + y.SetUint64(uint64(c)) + z.Mul(z, &y) + } +} + +// BatchInvert returns a new slice with every element inverted. +// Uses Montgomery batch inversion trick +func BatchInvert(a []Element) []Element { + res := make([]Element, len(a)) + if len(a) == 0 { + return res + } + + zeroes := make([]bool, len(a)) + accumulator := One() + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + zeroes[i] = true + continue + } + res[i] = accumulator + accumulator.Mul(&accumulator, &a[i]) + } + + accumulator.Inverse(&accumulator) + + for i := len(a) - 1; i >= 0; i-- { + if zeroes[i] { + continue + } + res[i].Mul(&res[i], &accumulator) + accumulator.Mul(&accumulator, &a[i]) + } + + return res +} + +func _butterflyGeneric(a, b *Element) { + t := *a + a.Add(a, b) + b.Sub(&t, b) +} + +// BitLen returns the minimum number of bits needed to represent z +// returns 0 if z == 0 +func (z *Element) BitLen() int { + return bits.Len64(z[0]) +} + +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { + return z.SetOne() + } + + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + + z.Set(&x) + + for i := e.BitLen() - 2; i >= 0; i-- { + z.Square(z) + if e.Bit(i) == 1 { + z.Mul(z, &x) + } + } + + return z +} + +// ToMont converts z to Montgomery form +// sets and returns z = z * r² +func (z *Element) ToMont() *Element { + return z.Mul(z, &rSquare) +} + +// ToRegular returns z in regular form (doesn't mutate z) +func (z Element) ToRegular() Element { + return *z.FromMont() +} + +// String returns the decimal representation of z as generated by +// z.Text(10). +func (z *Element) String() string { + return z.Text(10) +} + +// Text returns the string representation of z in the given base. +// Base must be between 2 and 36, inclusive. The result uses the +// lower-case letters 'a' to 'z' for digit values 10 to 35. +// No prefix (such as "0x") is added to the string. If z is a nil +// pointer it returns "". +// If base == 10 and -z fits in a uint16 prefix "-" is added to the string. +func (z *Element) Text(base int) string { + if base < 2 || base > 36 { + panic("invalid base") + } + if z == nil { + return "" + } + + const maxUint16 = 65535 + if base == 10 { + var zzNeg Element + zzNeg.Neg(z) + zzNeg.FromMont() + if zzNeg[0] <= maxUint16 && zzNeg[0] != 0 { + return "-" + strconv.FormatUint(zzNeg[0], base) + } + } + zz := *z + zz.FromMont() + return strconv.FormatUint(zz[0], base) +} + +// ToBigInt returns z as a big.Int in Montgomery form +func (z *Element) ToBigInt(res *big.Int) *big.Int { + var b [Limbs * 8]byte + binary.BigEndian.PutUint64(b[0:8], z[0]) + + return res.SetBytes(b[:]) +} + +// ToBigIntRegular returns z as a big.Int in regular form +func (z Element) ToBigIntRegular(res *big.Int) *big.Int { + z.FromMont() + return z.ToBigInt(res) +} + +// Bytes returns the regular (non montgomery) value +// of z as a big-endian byte array. +func (z *Element) Bytes() (res [Limbs * 8]byte) { + _z := z.ToRegular() + binary.BigEndian.PutUint64(res[0:8], _z[0]) + + return +} + +// Marshal returns the regular (non montgomery) value +// of z as a big-endian byte slice. +func (z *Element) Marshal() []byte { + b := z.Bytes() + return b[:] +} + +// SetBytes interprets e as the bytes of a big-endian unsigned integer, +// sets z to that value (in Montgomery form), and returns z. +func (z *Element) SetBytes(e []byte) *Element { + if len(e) == 8 { + // fast path + z[0] = binary.BigEndian.Uint64(e) + return z.ToMont() + } + // get a big int from our pool + vv := bigIntPool.Get().(*big.Int) + vv.SetBytes(e) + + // set big int + z.SetBigInt(vv) + + // put temporary object back in pool + bigIntPool.Put(vv) + + return z +} + +// SetBigInt sets z to v (regular form) and returns z in Montgomery form +func (z *Element) SetBigInt(v *big.Int) *Element { + z.SetZero() + + var zero big.Int + + // fast path + c := v.Cmp(&_modulus) + if c == 0 { + // v == 0 + return z + } else if c != 1 && v.Cmp(&zero) != -1 { + // 0 < v < q + return z.setBigInt(v) + } + + // get temporary big int from the pool + vv := bigIntPool.Get().(*big.Int) + + // copy input + modular reduction + vv.Set(v) + vv.Mod(v, &_modulus) + + // set big int byte value + z.setBigInt(vv) + + // release object into pool + bigIntPool.Put(vv) + return z +} + +// setBigInt assumes 0 ⩽ v < q +func (z *Element) setBigInt(v *big.Int) *Element { + vBits := v.Bits() + + if bits.UintSize == 64 { + for i := 0; i < len(vBits); i++ { + z[i] = uint64(vBits[i]) + } + } else { + for i := 0; i < len(vBits); i++ { + if i%2 == 0 { + z[i/2] = uint64(vBits[i]) + } else { + z[i/2] |= uint64(vBits[i]) << 32 + } + } + } + + return z.ToMont() +} + +// SetString creates a big.Int with number and calls SetBigInt on z +// +// The number prefix determines the actual base: A prefix of +// ''0b'' or ''0B'' selects base 2, ''0'', ''0o'' or ''0O'' selects base 8, +// and ''0x'' or ''0X'' selects base 16. Otherwise, the selected base is 10 +// and no prefix is accepted. +// +// For base 16, lower and upper case letters are considered the same: +// The letters 'a' to 'f' and 'A' to 'F' represent digit values 10 to 15. +// +// An underscore character ''_'' may appear between a base +// prefix and an adjacent digit, and between successive digits; such +// underscores do not change the value of the number. +// Incorrect placement of underscores is reported as a panic if there +// are no other errors. +// +func (z *Element) SetString(number string) *Element { + // get temporary big int from the pool + vv := bigIntPool.Get().(*big.Int) + + if _, ok := vv.SetString(number, 0); !ok { + panic("Element.SetString failed -> can't parse number into a big.Int " + number) + } + + z.SetBigInt(vv) + + // release object into pool + bigIntPool.Put(vv) + + return z +} + +// MarshalJSON returns json encoding of z (z.Text(10)) +// If z == nil, returns null +func (z *Element) MarshalJSON() ([]byte, error) { + if z == nil { + return []byte("null"), nil + } + const maxSafeBound = 15 // we encode it as number if it's small + s := z.Text(10) + if len(s) <= maxSafeBound { + return []byte(s), nil + } + var sbb strings.Builder + sbb.WriteByte('"') + sbb.WriteString(s) + sbb.WriteByte('"') + return []byte(sbb.String()), nil +} + +// UnmarshalJSON accepts numbers and strings as input +// See Element.SetString for valid prefixes (0x, 0b, ...) +func (z *Element) UnmarshalJSON(data []byte) error { + s := string(data) + if len(s) > Bits*3 { + return errors.New("value too large (max = Element.Bits * 3)") + } + + // we accept numbers and strings, remove leading and trailing quotes if any + if len(s) > 0 && s[0] == '"' { + s = s[1:] + } + if len(s) > 0 && s[len(s)-1] == '"' { + s = s[:len(s)-1] + } + + // get temporary big int from the pool + vv := bigIntPool.Get().(*big.Int) + + if _, ok := vv.SetString(s, 0); !ok { + return errors.New("can't parse into a big.Int: " + s) + } + + z.SetBigInt(vv) + + // release object into pool + bigIntPool.Put(vv) + return nil +} + +// Legendre returns the Legendre symbol of z (either +1, -1, or 0.) +func (z *Element) Legendre() int { + var l Element + // z^((q-1)/2) + l.expByLegendreExp(*z) + + if l.IsZero() { + return 0 + } + + // if l == 1 + if l[0] == 98464136 { + return 1 + } + return -1 +} + +// Sqrt z = √x mod q +// if the square root doesn't exist (x is not a square mod q) +// Sqrt leaves z unchanged and returns nil +func (z *Element) Sqrt(x *Element) *Element { + // q ≡ 1 (mod 4) + // see modSqrtTonelliShanks in math/big/int.go + // using https://www.maa.org/sites/default/files/pdf/upload_library/22/Polya/07468342.di020786.02p0470a.pdf + + var y, b, t, w Element + // w = x^((s-1)/2)) + w.expBySqrtExp(*x) + + // y = x^((s+1)/2)) = w * x + y.Mul(x, &w) + + // b = x^s = w * w * x = y * x + b.Mul(&w, &y) + + // g = nonResidue ^ s + var g = Element{ + 30080624, + } + r := uint64(4) + + // compute legendre symbol + // t = x^((q-1)/2) = r-1 squaring of x^s + t = b + for i := uint64(0); i < r-1; i++ { + t.Square(&t) + } + if t.IsZero() { + return z.SetZero() + } + if !(t[0] == 98464136) { + // t != 1, we don't have a square root + return nil + } + for { + var m uint64 + t = b + + // for t != 1 + for !(t[0] == 98464136) { + t.Square(&t) + m++ + } + + if m == 0 { + return z.Set(&y) + } + // t = g^(2^(r-m-1)) mod q + ge := int(r - m - 1) + t = g + for ge > 0 { + t.Square(&t) + ge-- + } + + g.Square(&t) + y.Mul(&y, &t) + b.Mul(&b, &g) + r = m + } +} + +// Inverse z = x⁻¹ mod q +// Algorithm 16 in "Efficient Software-Implementation of Finite Fields with Applications to Cryptography" +// if x == 0, sets and returns z = x +func (z *Element) Inverse(x *Element) *Element { + const q uint64 = qElementWord0 + if x.IsZero() { + z.SetZero() + return z + } + + var r, s, u, v uint64 + u = q // u = q + s = 96814941 // s = r^2 + r = 0 + v = x[0] + + var carry, borrow uint64 + + for (u != 1) && (v != 1) { + for v&1 == 0 { + v >>= 1 + if s&1 == 0 { + s >>= 1 + } else { + s, carry = bits.Add64(s, q, 0) + s >>= 1 + if carry != 0 { + s |= (1 << 63) + } + } + } + for u&1 == 0 { + u >>= 1 + if r&1 == 0 { + r >>= 1 + } else { + r, carry = bits.Add64(r, q, 0) + r >>= 1 + if carry != 0 { + r |= (1 << 63) + } + } + } + if v >= u { + v -= u + s, borrow = bits.Sub64(s, r, 0) + if borrow == 1 { + s += q + } + } else { + u -= v + r, borrow = bits.Sub64(r, s, 0) + if borrow == 1 { + r += q + } + } + } + + if u == 1 { + z[0] = r + } else { + z[0] = s + } + + return z +} diff --git a/ecc/bls12-39/fr/element_exp.go b/ecc/bls12-39/fr/element_exp.go new file mode 100644 index 0000000000..a5152e4ec7 --- /dev/null +++ b/ecc/bls12-39/fr/element_exp.go @@ -0,0 +1,188 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fr + +// expBySqrtExp is equivalent to z.Exp(x, 2fadcf) +// +// uses github.com/mmcloughlin/addchain v0.4.0 to generate a shorter addition chain +func (z *Element) expBySqrtExp(x Element) *Element { + // addition chain: + // + // _10 = 2*1 + // _11 = 1 + _10 + // _100 = 1 + _11 + // _111 = _11 + _100 + // _1011 = _100 + _111 + // _1011000 = _1011 << 3 + // _1011111 = _111 + _1011000 + // i26 = ((_1011111 << 5 + _1011) << 4 + _111) << 6 + // return _1011 + i26 + _100 + // + // Operations: 19 squares 9 multiplies + + // Allocate Temporaries. + var ( + t0 = new(Element) + t1 = new(Element) + t2 = new(Element) + ) + + // var t0,t1,t2 Element + // Step 1: z = x^0x2 + z.Square(&x) + + // Step 2: t0 = x^0x3 + t0.Mul(&x, z) + + // Step 3: z = x^0x4 + z.Mul(&x, t0) + + // Step 4: t1 = x^0x7 + t1.Mul(t0, z) + + // Step 5: t0 = x^0xb + t0.Mul(z, t1) + + // Step 8: t2 = x^0x58 + t2.Square(t0) + for s := 1; s < 3; s++ { + t2.Square(t2) + } + + // Step 9: t2 = x^0x5f + t2.Mul(t1, t2) + + // Step 14: t2 = x^0xbe0 + for s := 0; s < 5; s++ { + t2.Square(t2) + } + + // Step 15: t2 = x^0xbeb + t2.Mul(t0, t2) + + // Step 19: t2 = x^0xbeb0 + for s := 0; s < 4; s++ { + t2.Square(t2) + } + + // Step 20: t1 = x^0xbeb7 + t1.Mul(t1, t2) + + // Step 26: t1 = x^0x2fadc0 + for s := 0; s < 6; s++ { + t1.Square(t1) + } + + // Step 27: t0 = x^0x2fadcb + t0.Mul(t0, t1) + + // Step 28: z = x^0x2fadcf + z.Mul(z, t0) + + return z +} + +// expByLegendreExp is equivalent to z.Exp(x, 2fadcf8) +// +// uses github.com/mmcloughlin/addchain v0.4.0 to generate a shorter addition chain +func (z *Element) expByLegendreExp(x Element) *Element { + // addition chain: + // + // _1000 = 1 << 3 + // _1001 = 1 + _1000 + // _100100 = _1001 << 2 + // _1001000 = 2*_100100 + // _1101100 = _100100 + _1001000 + // _1110101 = _1001 + _1101100 + // _11101010 = 2*_1110101 + // i11 = _100100 + _11101010 + // i14 = 2*i11 + i11 + _1110101 + // i29 = (2*i14 + i14 + i11) << 11 + i14 + // return i29 << 3 + // + // Operations: 23 squares 9 multiplies + + // Allocate Temporaries. + var ( + t0 = new(Element) + t1 = new(Element) + ) + + // var t0,t1 Element + // Step 3: z = x^0x8 + z.Square(&x) + for s := 1; s < 3; s++ { + z.Square(z) + } + + // Step 4: z = x^0x9 + z.Mul(&x, z) + + // Step 6: t0 = x^0x24 + t0.Square(z) + for s := 1; s < 2; s++ { + t0.Square(t0) + } + + // Step 7: t1 = x^0x48 + t1.Square(t0) + + // Step 8: t1 = x^0x6c + t1.Mul(t0, t1) + + // Step 9: z = x^0x75 + z.Mul(z, t1) + + // Step 10: t1 = x^0xea + t1.Square(z) + + // Step 11: t0 = x^0x10e + t0.Mul(t0, t1) + + // Step 12: t1 = x^0x21c + t1.Square(t0) + + // Step 13: t1 = x^0x32a + t1.Mul(t0, t1) + + // Step 14: z = x^0x39f + z.Mul(z, t1) + + // Step 15: t1 = x^0x73e + t1.Square(z) + + // Step 16: t1 = x^0xadd + t1.Mul(z, t1) + + // Step 17: t0 = x^0xbeb + t0.Mul(t0, t1) + + // Step 28: t0 = x^0x5f5800 + for s := 0; s < 11; s++ { + t0.Square(t0) + } + + // Step 29: z = x^0x5f5b9f + z.Mul(z, t0) + + // Step 32: z = x^0x2fadcf8 + for s := 0; s < 3; s++ { + z.Square(z) + } + + return z +} diff --git a/ecc/bls12-39/fr/element_fuzz.go b/ecc/bls12-39/fr/element_fuzz.go new file mode 100644 index 0000000000..987800fb87 --- /dev/null +++ b/ecc/bls12-39/fr/element_fuzz.go @@ -0,0 +1,113 @@ +//go:build gofuzz +// +build gofuzz + +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fr + +import ( + "bytes" + "encoding/binary" + "io" + "math/big" + "math/bits" +) + +const ( + fuzzInteresting = 1 + fuzzNormal = 0 + fuzzDiscard = -1 +) + +// Fuzz arithmetic operations fuzzer +func Fuzz(data []byte) int { + r := bytes.NewReader(data) + + var e1, e2 Element + e1.SetRawBytes(r) + e2.SetRawBytes(r) + + { + // mul assembly + + var c, _c Element + a, _a, b, _b := e1, e1, e2, e2 + c.Mul(&a, &b) + _mulGeneric(&_c, &_a, &_b) + + if !c.Equal(&_c) { + panic("mul asm != mul generic on Element") + } + } + + { + // inverse + inv := e1 + inv.Inverse(&inv) + + var bInv, b1, b2 big.Int + e1.ToBigIntRegular(&b1) + bInv.ModInverse(&b1, Modulus()) + inv.ToBigIntRegular(&b2) + + if b2.Cmp(&bInv) != 0 { + panic("inverse operation doesn't match big int result") + } + } + + { + // a + -a == 0 + a, b := e1, e1 + b.Neg(&b) + a.Add(&a, &b) + if !a.IsZero() { + panic("a + -a != 0") + } + } + + return fuzzNormal + +} + +// SetRawBytes reads up to Bytes (bytes needed to represent Element) from reader +// and interpret it as big endian uint64 +// used for fuzzing purposes only +func (z *Element) SetRawBytes(r io.Reader) { + + buf := make([]byte, 8) + + for i := 0; i < len(z); i++ { + if _, err := io.ReadFull(r, buf); err != nil { + goto eof + } + z[i] = binary.BigEndian.Uint64(buf[:]) + } +eof: + z[0] %= qElement[0] + + if z.BiggerModulus() { + var b uint64 + z[0], b = bits.Sub64(z[0], qElement[0], 0) + } + + return +} + +func (z *Element) BiggerModulus() bool { + + return z[0] >= qElement[0] +} diff --git a/ecc/bls12-39/fr/element_ops_noasm.go b/ecc/bls12-39/fr/element_ops_noasm.go new file mode 100644 index 0000000000..60383871a7 --- /dev/null +++ b/ecc/bls12-39/fr/element_ops_noasm.go @@ -0,0 +1,77 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fr + +// /!\ WARNING /!\ +// this code has not been audited and is provided as-is. In particular, +// there is no security guarantees such as constant time implementation +// or side-channel attack resistance +// /!\ WARNING /!\ + +// MulBy3 x *= 3 +func MulBy3(x *Element) { + var y Element + y.SetUint64(3) + x.Mul(x, &y) +} + +// MulBy5 x *= 5 +func MulBy5(x *Element) { + var y Element + y.SetUint64(5) + x.Mul(x, &y) +} + +// MulBy13 x *= 13 +func MulBy13(x *Element) { + var y Element + y.SetUint64(13) + x.Mul(x, &y) +} + +// Butterfly sets +// a = a + b +// b = a - b +func Butterfly(a, b *Element) { + _butterflyGeneric(a, b) +} + +// FromMont converts z in place (i.e. mutates) from Montgomery to regular representation +// sets and returns z = z * 1 +func fromMont(z *Element) { + _fromMontGeneric(z) +} + +func add(z, x, y *Element) { + _addGeneric(z, x, y) +} + +func double(z, x *Element) { + _doubleGeneric(z, x) +} + +func sub(z, x, y *Element) { + _subGeneric(z, x, y) +} + +func neg(z, x *Element) { + _negGeneric(z, x) +} + +func reduce(z *Element) { + _reduceGeneric(z) +} diff --git a/ecc/bls12-39/fr/element_test.go b/ecc/bls12-39/fr/element_test.go new file mode 100644 index 0000000000..3134752b34 --- /dev/null +++ b/ecc/bls12-39/fr/element_test.go @@ -0,0 +1,2249 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fr + +import ( + "crypto/rand" + "encoding/json" + "fmt" + "math/big" + "math/bits" + + "testing" + + "github.com/leanovate/gopter" + ggen "github.com/leanovate/gopter/gen" + "github.com/leanovate/gopter/prop" + + "github.com/stretchr/testify/require" +) + +// ------------------------------------------------------------------------------------------------- +// benchmarks +// most benchmarks are rudimentary and should sample a large number of random inputs +// or be run multiple times to ensure it didn't measure the fastest path of the function + +var benchResElement Element + +func BenchmarkElementSelect(b *testing.B) { + var x, y Element + x.SetRandom() + y.SetRandom() + + for i := 0; i < b.N; i++ { + benchResElement.Select(i%3, &x, &y) + } +} + +func BenchmarkElementSetBytes(b *testing.B) { + var x Element + x.SetRandom() + bb := x.Bytes() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + benchResElement.SetBytes(bb[:]) + } + +} + +func BenchmarkElementMulByConstants(b *testing.B) { + b.Run("mulBy3", func(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + MulBy3(&benchResElement) + } + }) + b.Run("mulBy5", func(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + MulBy5(&benchResElement) + } + }) + b.Run("mulBy13", func(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + MulBy13(&benchResElement) + } + }) +} + +func BenchmarkElementInverse(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + benchResElement.Inverse(&x) + } + +} + +func BenchmarkElementButterfly(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + Butterfly(&x, &benchResElement) + } +} + +func BenchmarkElementExp(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b1, _ := rand.Int(rand.Reader, Modulus()) + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Exp(x, b1) + } +} + +func BenchmarkElementDouble(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Double(&benchResElement) + } +} + +func BenchmarkElementAdd(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Add(&x, &benchResElement) + } +} + +func BenchmarkElementSub(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Sub(&x, &benchResElement) + } +} + +func BenchmarkElementNeg(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Neg(&benchResElement) + } +} + +func BenchmarkElementDiv(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Div(&x, &benchResElement) + } +} + +func BenchmarkElementFromMont(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.FromMont() + } +} + +func BenchmarkElementToMont(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.ToMont() + } +} +func BenchmarkElementSquare(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Square(&benchResElement) + } +} + +func BenchmarkElementSqrt(b *testing.B) { + var a Element + a.SetUint64(4) + a.Neg(&a) + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Sqrt(&a) + } +} + +func BenchmarkElementMul(b *testing.B) { + x := Element{ + 96814941, + } + benchResElement.SetOne() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Mul(&benchResElement, &x) + } +} + +func BenchmarkElementCmp(b *testing.B) { + x := Element{ + 96814941, + } + benchResElement = x + benchResElement[0] = 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Cmp(&x) + } +} + +func TestElementCmp(t *testing.T) { + var x, y Element + + if x.Cmp(&y) != 0 { + t.Fatal("x == y") + } + + one := One() + y.Sub(&y, &one) + + if x.Cmp(&y) != -1 { + t.Fatal("x < y") + } + if y.Cmp(&x) != 1 { + t.Fatal("x < y") + } + + x = y + if x.Cmp(&y) != 0 { + t.Fatal("x == y") + } + + x.Sub(&x, &one) + if x.Cmp(&y) != -1 { + t.Fatal("x < y") + } + if y.Cmp(&x) != 1 { + t.Fatal("x < y") + } +} + +func TestElementNegZero(t *testing.T) { + var a, b Element + b.SetZero() + for a.IsZero() { + a.SetRandom() + } + a.Neg(&b) + if !a.IsZero() { + t.Fatal("neg(0) != 0") + } +} + +// ------------------------------------------------------------------------------------------------- +// Gopter tests +// most of them are generated with a template + +const ( + nbFuzzShort = 200 + nbFuzz = 1000 +) + +// special values to be used in tests +var staticTestValues []Element + +func init() { + staticTestValues = append(staticTestValues, Element{}) // zero + staticTestValues = append(staticTestValues, One()) // one + staticTestValues = append(staticTestValues, rSquare) // r² + var e, one Element + one.SetOne() + e.Sub(&qElement, &one) + staticTestValues = append(staticTestValues, e) // q - 1 + e.Double(&one) + staticTestValues = append(staticTestValues, e) // 2 + + { + a := qElement + a[0]-- + staticTestValues = append(staticTestValues, a) + } + staticTestValues = append(staticTestValues, Element{0}) + staticTestValues = append(staticTestValues, Element{1}) + staticTestValues = append(staticTestValues, Element{2}) + + { + a := qElement + a[0]-- + staticTestValues = append(staticTestValues, a) + } + + { + a := qElement + a[0] = 0 + staticTestValues = append(staticTestValues, a) + } + + { + a := qElement + a[0] = 0 + staticTestValues = append(staticTestValues, a) + } + +} + +func TestElementReduce(t *testing.T) { + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, s := range testValues { + expected := s + reduce(&s) + _reduceGeneric(&expected) + if !s.Equal(&expected) { + t.Fatal("reduce failed: asm and generic impl don't match") + } + } + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := genFull() + + properties.Property("reduce should output a result smaller than modulus", prop.ForAll( + func(a Element) bool { + b := a + reduce(&a) + _reduceGeneric(&b) + return !a.biggerOrEqualModulus() && a.Equal(&b) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestElementEqual(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("x.Equal(&y) iff x == y; likely false for random pairs", prop.ForAll( + func(a testPairElement, b testPairElement) bool { + return a.element.Equal(&b.element) == (a.element == b.element) + }, + genA, + genB, + )) + + properties.Property("x.Equal(&y) if x == y", prop.ForAll( + func(a testPairElement) bool { + b := a.element + return a.element.Equal(&b) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBytes(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("SetBytes(Bytes()) should stay constant", prop.ForAll( + func(a testPairElement) bool { + var b Element + bytes := a.element.Bytes() + b.SetBytes(bytes[:]) + return a.element.Equal(&b) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementInverseExp(t *testing.T) { + // inverse must be equal to exp^-2 + exp := Modulus() + exp.Sub(exp, new(big.Int).SetUint64(2)) + + invMatchExp := func(a testPairElement) bool { + var b Element + b.Set(&a.element) + a.element.Inverse(&a.element) + b.Exp(b, exp) + + return a.element.Equal(&b) + } + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + properties := gopter.NewProperties(parameters) + genA := gen() + properties.Property("inv == exp^-2", prop.ForAll(invMatchExp, genA)) + properties.TestingRun(t, gopter.ConsoleReporter(false)) + + parameters.MinSuccessfulTests = 1 + properties = gopter.NewProperties(parameters) + properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementMulByConstants(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + implemented := []uint8{0, 1, 2, 3, 5, 13} + properties.Property("mulByConstant", prop.ForAll( + func(a testPairElement) bool { + for _, c := range implemented { + var constant Element + constant.SetUint64(uint64(c)) + + b := a.element + b.Mul(&b, &constant) + + aa := a.element + mulByConstant(&aa, c) + + if !aa.Equal(&b) { + return false + } + } + + return true + }, + genA, + )) + + properties.Property("MulBy3(x) == Mul(x, 3)", prop.ForAll( + func(a testPairElement) bool { + var constant Element + constant.SetUint64(3) + + b := a.element + b.Mul(&b, &constant) + + MulBy3(&a.element) + + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("MulBy5(x) == Mul(x, 5)", prop.ForAll( + func(a testPairElement) bool { + var constant Element + constant.SetUint64(5) + + b := a.element + b.Mul(&b, &constant) + + MulBy5(&a.element) + + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("MulBy13(x) == Mul(x, 13)", prop.ForAll( + func(a testPairElement) bool { + var constant Element + constant.SetUint64(13) + + b := a.element + b.Mul(&b, &constant) + + MulBy13(&a.element) + + return a.element.Equal(&b) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestElementLegendre(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("legendre should output same result than big.Int.Jacobi", prop.ForAll( + func(a testPairElement) bool { + return a.element.Legendre() == big.Jacobi(&a.bigint, Modulus()) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestElementButterflies(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("butterfly0 == a -b; a +b", prop.ForAll( + func(a, b testPairElement) bool { + a0, b0 := a.element, b.element + + _butterflyGeneric(&a.element, &b.element) + Butterfly(&a0, &b0) + + return a.element.Equal(&a0) && b.element.Equal(&b0) + }, + genA, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestElementLexicographicallyLargest(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("element.Cmp should match LexicographicallyLargest output", prop.ForAll( + func(a testPairElement) bool { + var negA Element + negA.Neg(&a.element) + + cmpResult := a.element.Cmp(&negA) + lResult := a.element.LexicographicallyLargest() + + if lResult && cmpResult == 1 { + return true + } + if !lResult && cmpResult != 1 { + return true + } + return false + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestElementAdd(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("Add: having the receiver as operand should output the same result", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + d.Set(&a.element) + + c.Add(&a.element, &b.element) + a.element.Add(&a.element, &b.element) + b.element.Add(&d, &b.element) + + return a.element.Equal(&b.element) && a.element.Equal(&c) && b.element.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("Add: operation result must match big.Int result", prop.ForAll( + func(a, b testPairElement) bool { + { + var c Element + + c.Add(&a.element, &b.element) + + var d, e big.Int + d.Add(&a.bigint, &b.bigint).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + + // fixed elements + // a is random + // r takes special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, r := range testValues { + var d, e, rb big.Int + r.ToBigIntRegular(&rb) + + var c Element + c.Add(&a.element, &r) + d.Add(&a.bigint, &rb).Mod(&d, Modulus()) + + // checking generic impl against asm path + var cGeneric Element + _addGeneric(&cGeneric, &a.element, &r) + if !cGeneric.Equal(&c) { + // need to give context to failing error. + return false + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + return true + }, + genA, + genB, + )) + + properties.Property("Add: operation result must be smaller than modulus", prop.ForAll( + func(a, b testPairElement) bool { + var c Element + + c.Add(&a.element, &b.element) + + return !c.biggerOrEqualModulus() + }, + genA, + genB, + )) + + properties.Property("Add: assembly implementation must be consistent with generic one", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + c.Add(&a.element, &b.element) + _addGeneric(&d, &a.element, &b.element) + return c.Equal(&d) + }, + genA, + genB, + )) + + specialValueTest := func() { + // test special values against special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + for _, b := range testValues { + + var bBig, d, e big.Int + b.ToBigIntRegular(&bBig) + + var c Element + c.Add(&a, &b) + d.Add(&aBig, &bBig).Mod(&d, Modulus()) + + // checking asm against generic impl + var cGeneric Element + _addGeneric(&cGeneric, &a, &b) + if !cGeneric.Equal(&c) { + t.Fatal("Add failed special test values: asm and generic impl don't match") + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Add failed special test values") + } + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementSub(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("Sub: having the receiver as operand should output the same result", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + d.Set(&a.element) + + c.Sub(&a.element, &b.element) + a.element.Sub(&a.element, &b.element) + b.element.Sub(&d, &b.element) + + return a.element.Equal(&b.element) && a.element.Equal(&c) && b.element.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("Sub: operation result must match big.Int result", prop.ForAll( + func(a, b testPairElement) bool { + { + var c Element + + c.Sub(&a.element, &b.element) + + var d, e big.Int + d.Sub(&a.bigint, &b.bigint).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + + // fixed elements + // a is random + // r takes special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, r := range testValues { + var d, e, rb big.Int + r.ToBigIntRegular(&rb) + + var c Element + c.Sub(&a.element, &r) + d.Sub(&a.bigint, &rb).Mod(&d, Modulus()) + + // checking generic impl against asm path + var cGeneric Element + _subGeneric(&cGeneric, &a.element, &r) + if !cGeneric.Equal(&c) { + // need to give context to failing error. + return false + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + return true + }, + genA, + genB, + )) + + properties.Property("Sub: operation result must be smaller than modulus", prop.ForAll( + func(a, b testPairElement) bool { + var c Element + + c.Sub(&a.element, &b.element) + + return !c.biggerOrEqualModulus() + }, + genA, + genB, + )) + + properties.Property("Sub: assembly implementation must be consistent with generic one", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + c.Sub(&a.element, &b.element) + _subGeneric(&d, &a.element, &b.element) + return c.Equal(&d) + }, + genA, + genB, + )) + + specialValueTest := func() { + // test special values against special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + for _, b := range testValues { + + var bBig, d, e big.Int + b.ToBigIntRegular(&bBig) + + var c Element + c.Sub(&a, &b) + d.Sub(&aBig, &bBig).Mod(&d, Modulus()) + + // checking asm against generic impl + var cGeneric Element + _subGeneric(&cGeneric, &a, &b) + if !cGeneric.Equal(&c) { + t.Fatal("Sub failed special test values: asm and generic impl don't match") + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Sub failed special test values") + } + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementMul(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("Mul: having the receiver as operand should output the same result", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + d.Set(&a.element) + + c.Mul(&a.element, &b.element) + a.element.Mul(&a.element, &b.element) + b.element.Mul(&d, &b.element) + + return a.element.Equal(&b.element) && a.element.Equal(&c) && b.element.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("Mul: operation result must match big.Int result", prop.ForAll( + func(a, b testPairElement) bool { + { + var c Element + + c.Mul(&a.element, &b.element) + + var d, e big.Int + d.Mul(&a.bigint, &b.bigint).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + + // fixed elements + // a is random + // r takes special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, r := range testValues { + var d, e, rb big.Int + r.ToBigIntRegular(&rb) + + var c Element + c.Mul(&a.element, &r) + d.Mul(&a.bigint, &rb).Mod(&d, Modulus()) + + // checking generic impl against asm path + var cGeneric Element + _mulGeneric(&cGeneric, &a.element, &r) + if !cGeneric.Equal(&c) { + // need to give context to failing error. + return false + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + return true + }, + genA, + genB, + )) + + properties.Property("Mul: operation result must be smaller than modulus", prop.ForAll( + func(a, b testPairElement) bool { + var c Element + + c.Mul(&a.element, &b.element) + + return !c.biggerOrEqualModulus() + }, + genA, + genB, + )) + + properties.Property("Mul: assembly implementation must be consistent with generic one", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + c.Mul(&a.element, &b.element) + _mulGeneric(&d, &a.element, &b.element) + return c.Equal(&d) + }, + genA, + genB, + )) + + specialValueTest := func() { + // test special values against special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + for _, b := range testValues { + + var bBig, d, e big.Int + b.ToBigIntRegular(&bBig) + + var c Element + c.Mul(&a, &b) + d.Mul(&aBig, &bBig).Mod(&d, Modulus()) + + // checking asm against generic impl + var cGeneric Element + _mulGeneric(&cGeneric, &a, &b) + if !cGeneric.Equal(&c) { + t.Fatal("Mul failed special test values: asm and generic impl don't match") + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Mul failed special test values") + } + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementDiv(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("Div: having the receiver as operand should output the same result", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + d.Set(&a.element) + + c.Div(&a.element, &b.element) + a.element.Div(&a.element, &b.element) + b.element.Div(&d, &b.element) + + return a.element.Equal(&b.element) && a.element.Equal(&c) && b.element.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("Div: operation result must match big.Int result", prop.ForAll( + func(a, b testPairElement) bool { + { + var c Element + + c.Div(&a.element, &b.element) + + var d, e big.Int + d.ModInverse(&b.bigint, Modulus()) + d.Mul(&d, &a.bigint).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + + // fixed elements + // a is random + // r takes special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, r := range testValues { + var d, e, rb big.Int + r.ToBigIntRegular(&rb) + + var c Element + c.Div(&a.element, &r) + d.ModInverse(&rb, Modulus()) + d.Mul(&d, &a.bigint).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + return true + }, + genA, + genB, + )) + + properties.Property("Div: operation result must be smaller than modulus", prop.ForAll( + func(a, b testPairElement) bool { + var c Element + + c.Div(&a.element, &b.element) + + return !c.biggerOrEqualModulus() + }, + genA, + genB, + )) + + specialValueTest := func() { + // test special values against special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + for _, b := range testValues { + + var bBig, d, e big.Int + b.ToBigIntRegular(&bBig) + + var c Element + c.Div(&a, &b) + d.ModInverse(&bBig, Modulus()) + d.Mul(&d, &aBig).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Div failed special test values") + } + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementExp(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("Exp: having the receiver as operand should output the same result", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + d.Set(&a.element) + + c.Exp(a.element, &b.bigint) + a.element.Exp(a.element, &b.bigint) + b.element.Exp(d, &b.bigint) + + return a.element.Equal(&b.element) && a.element.Equal(&c) && b.element.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("Exp: operation result must match big.Int result", prop.ForAll( + func(a, b testPairElement) bool { + { + var c Element + + c.Exp(a.element, &b.bigint) + + var d, e big.Int + d.Exp(&a.bigint, &b.bigint, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + + // fixed elements + // a is random + // r takes special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, r := range testValues { + var d, e, rb big.Int + r.ToBigIntRegular(&rb) + + var c Element + c.Exp(a.element, &rb) + d.Exp(&a.bigint, &rb, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + return true + }, + genA, + genB, + )) + + properties.Property("Exp: operation result must be smaller than modulus", prop.ForAll( + func(a, b testPairElement) bool { + var c Element + + c.Exp(a.element, &b.bigint) + + return !c.biggerOrEqualModulus() + }, + genA, + genB, + )) + + specialValueTest := func() { + // test special values against special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + for _, b := range testValues { + + var bBig, d, e big.Int + b.ToBigIntRegular(&bBig) + + var c Element + c.Exp(a, &bBig) + d.Exp(&aBig, &bBig, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Exp failed special test values") + } + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementSquare(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Square: having the receiver as operand should output the same result", prop.ForAll( + func(a testPairElement) bool { + + var b Element + + b.Square(&a.element) + a.element.Square(&a.element) + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("Square: operation result must match big.Int result", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Square(&a.element) + + var d, e big.Int + d.Mul(&a.bigint, &a.bigint).Mod(&d, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, + )) + + properties.Property("Square: operation result must be smaller than modulus", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Square(&a.element) + return !c.biggerOrEqualModulus() + }, + genA, + )) + + specialValueTest := func() { + // test special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + var c Element + c.Square(&a) + + var d, e big.Int + d.Mul(&aBig, &aBig).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Square failed special test values") + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementInverse(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Inverse: having the receiver as operand should output the same result", prop.ForAll( + func(a testPairElement) bool { + + var b Element + + b.Inverse(&a.element) + a.element.Inverse(&a.element) + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("Inverse: operation result must match big.Int result", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Inverse(&a.element) + + var d, e big.Int + d.ModInverse(&a.bigint, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, + )) + + properties.Property("Inverse: operation result must be smaller than modulus", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Inverse(&a.element) + return !c.biggerOrEqualModulus() + }, + genA, + )) + + specialValueTest := func() { + // test special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + var c Element + c.Inverse(&a) + + var d, e big.Int + d.ModInverse(&aBig, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Inverse failed special test values") + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementSqrt(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Sqrt: having the receiver as operand should output the same result", prop.ForAll( + func(a testPairElement) bool { + + b := a.element + + b.Sqrt(&a.element) + a.element.Sqrt(&a.element) + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("Sqrt: operation result must match big.Int result", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Sqrt(&a.element) + + var d, e big.Int + d.ModSqrt(&a.bigint, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, + )) + + properties.Property("Sqrt: operation result must be smaller than modulus", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Sqrt(&a.element) + return !c.biggerOrEqualModulus() + }, + genA, + )) + + specialValueTest := func() { + // test special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + var c Element + c.Sqrt(&a) + + var d, e big.Int + d.ModSqrt(&aBig, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Sqrt failed special test values") + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementDouble(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Double: having the receiver as operand should output the same result", prop.ForAll( + func(a testPairElement) bool { + + var b Element + + b.Double(&a.element) + a.element.Double(&a.element) + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("Double: operation result must match big.Int result", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Double(&a.element) + + var d, e big.Int + d.Lsh(&a.bigint, 1).Mod(&d, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, + )) + + properties.Property("Double: operation result must be smaller than modulus", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Double(&a.element) + return !c.biggerOrEqualModulus() + }, + genA, + )) + + properties.Property("Double: assembly implementation must be consistent with generic one", prop.ForAll( + func(a testPairElement) bool { + var c, d Element + c.Double(&a.element) + _doubleGeneric(&d, &a.element) + return c.Equal(&d) + }, + genA, + )) + + specialValueTest := func() { + // test special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + var c Element + c.Double(&a) + + var d, e big.Int + d.Lsh(&aBig, 1).Mod(&d, Modulus()) + + // checking asm against generic impl + var cGeneric Element + _doubleGeneric(&cGeneric, &a) + if !cGeneric.Equal(&c) { + t.Fatal("Double failed special test values: asm and generic impl don't match") + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Double failed special test values") + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementNeg(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Neg: having the receiver as operand should output the same result", prop.ForAll( + func(a testPairElement) bool { + + var b Element + + b.Neg(&a.element) + a.element.Neg(&a.element) + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("Neg: operation result must match big.Int result", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Neg(&a.element) + + var d, e big.Int + d.Neg(&a.bigint).Mod(&d, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, + )) + + properties.Property("Neg: operation result must be smaller than modulus", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Neg(&a.element) + return !c.biggerOrEqualModulus() + }, + genA, + )) + + properties.Property("Neg: assembly implementation must be consistent with generic one", prop.ForAll( + func(a testPairElement) bool { + var c, d Element + c.Neg(&a.element) + _negGeneric(&d, &a.element) + return c.Equal(&d) + }, + genA, + )) + + specialValueTest := func() { + // test special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + var c Element + c.Neg(&a) + + var d, e big.Int + d.Neg(&aBig).Mod(&d, Modulus()) + + // checking asm against generic impl + var cGeneric Element + _negGeneric(&cGeneric, &a) + if !cGeneric.Equal(&c) { + t.Fatal("Neg failed special test values: asm and generic impl don't match") + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Neg failed special test values") + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementFixedExp(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + var ( + _bLegendreExponentElement *big.Int + _bSqrtExponentElement *big.Int + ) + + _bLegendreExponentElement, _ = new(big.Int).SetString("2fadcf8", 16) + const sqrtExponentElement = "2fadcf" + _bSqrtExponentElement, _ = new(big.Int).SetString(sqrtExponentElement, 16) + + genA := gen() + + properties.Property(fmt.Sprintf("expBySqrtExp must match Exp(%s)", sqrtExponentElement), prop.ForAll( + func(a testPairElement) bool { + c := a.element + d := a.element + c.expBySqrtExp(c) + d.Exp(d, _bSqrtExponentElement) + return c.Equal(&d) + }, + genA, + )) + + properties.Property("expByLegendreExp must match Exp(2fadcf8)", prop.ForAll( + func(a testPairElement) bool { + c := a.element + d := a.element + c.expByLegendreExp(c) + d.Exp(d, _bLegendreExponentElement) + return c.Equal(&d) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementHalve(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + var twoInv Element + twoInv.SetUint64(2) + twoInv.Inverse(&twoInv) + + properties.Property("z.Halve must match z / 2", prop.ForAll( + func(a testPairElement) bool { + c := a.element + d := a.element + c.Halve() + d.Mul(&d, &twoInv) + return c.Equal(&d) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func combineSelectionArguments(c int64, z int8) int { + if z%3 == 0 { + return 0 + } + return int(c) +} + +func TestElementSelect(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := genFull() + genB := genFull() + genC := ggen.Int64() //the condition + genZ := ggen.Int8() //to make zeros artificially more likely + + properties.Property("Select: must select correctly", prop.ForAll( + func(a, b Element, cond int64, z int8) bool { + condC := combineSelectionArguments(cond, z) + + var c Element + c.Select(condC, &a, &b) + + if condC == 0 { + return c.Equal(&a) + } + return c.Equal(&b) + }, + genA, + genB, + genC, + genZ, + )) + + properties.Property("Select: having the receiver as operand should output the same result", prop.ForAll( + func(a, b Element, cond int64, z int8) bool { + condC := combineSelectionArguments(cond, z) + + var c, d Element + d.Set(&a) + c.Select(condC, &a, &b) + a.Select(condC, &a, &b) + b.Select(condC, &d, &b) + return a.Equal(&b) && a.Equal(&c) && b.Equal(&c) + }, + genA, + genB, + genC, + genZ, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementSetInt64(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("z.SetInt64 must match z.SetString", prop.ForAll( + func(a testPairElement, v int64) bool { + c := a.element + d := a.element + + c.SetInt64(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, ggen.Int64(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementSetInterface(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genInt := ggen.Int + genInt8 := ggen.Int8 + genInt16 := ggen.Int16 + genInt32 := ggen.Int32 + genInt64 := ggen.Int64 + + genUint := ggen.UInt + genUint8 := ggen.UInt8 + genUint16 := ggen.UInt16 + genUint32 := ggen.UInt32 + genUint64 := ggen.UInt64 + + properties.Property("z.SetInterface must match z.SetString with int8", prop.ForAll( + func(a testPairElement, v int8) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genInt8(), + )) + + properties.Property("z.SetInterface must match z.SetString with int16", prop.ForAll( + func(a testPairElement, v int16) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genInt16(), + )) + + properties.Property("z.SetInterface must match z.SetString with int32", prop.ForAll( + func(a testPairElement, v int32) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genInt32(), + )) + + properties.Property("z.SetInterface must match z.SetString with int64", prop.ForAll( + func(a testPairElement, v int64) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genInt64(), + )) + + properties.Property("z.SetInterface must match z.SetString with int", prop.ForAll( + func(a testPairElement, v int) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genInt(), + )) + + properties.Property("z.SetInterface must match z.SetString with uint8", prop.ForAll( + func(a testPairElement, v uint8) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genUint8(), + )) + + properties.Property("z.SetInterface must match z.SetString with uint16", prop.ForAll( + func(a testPairElement, v uint16) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genUint16(), + )) + + properties.Property("z.SetInterface must match z.SetString with uint32", prop.ForAll( + func(a testPairElement, v uint32) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genUint32(), + )) + + properties.Property("z.SetInterface must match z.SetString with uint64", prop.ForAll( + func(a testPairElement, v uint64) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genUint64(), + )) + + properties.Property("z.SetInterface must match z.SetString with uint", prop.ForAll( + func(a testPairElement, v uint) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genUint(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementFromMont(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Assembly implementation must be consistent with generic one", prop.ForAll( + func(a testPairElement) bool { + c := a.element + d := a.element + c.FromMont() + _fromMontGeneric(&d) + return c.Equal(&d) + }, + genA, + )) + + properties.Property("x.FromMont().ToMont() == x", prop.ForAll( + func(a testPairElement) bool { + c := a.element + c.FromMont().ToMont() + return c.Equal(&a.element) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementJSON(t *testing.T) { + assert := require.New(t) + + type S struct { + A Element + B [3]Element + C *Element + D *Element + } + + // encode to JSON + var s S + s.A.SetString("-1") + s.B[2].SetUint64(42) + s.D = new(Element).SetUint64(8000) + + encoded, err := json.Marshal(&s) + assert.NoError(err) + // since our modulus is on 1 word, we may need to adjust "42" and "8000" values; + formatValue := func(v int64) string { + const maxUint16 = 65535 + var a, aNeg big.Int + a.SetInt64(v) + a.Mod(&a, Modulus()) + aNeg.Neg(&a).Mod(&aNeg, Modulus()) + fmt.Println("aNeg", aNeg.Text(10)) + if aNeg.Uint64() != 0 && aNeg.Uint64() <= maxUint16 { + return "-" + aNeg.Text(10) + } + return a.Text(10) + } + expected := fmt.Sprintf("{\"A\":-1,\"B\":[0,0,%s],\"C\":null,\"D\":%s}", formatValue(42), formatValue(8000)) + assert.Equal(expected, string(encoded)) + + // decode valid + var decoded S + err = json.Unmarshal([]byte(expected), &decoded) + assert.NoError(err) + + assert.Equal(s, decoded, "element -> json -> element round trip failed") + + // decode hex and string values + withHexValues := "{\"A\":\"-1\",\"B\":[0,\"0x00000\",\"0x2A\"],\"C\":null,\"D\":\"8000\"}" + + var decodedS S + err = json.Unmarshal([]byte(withHexValues), &decodedS) + assert.NoError(err) + + assert.Equal(s, decodedS, " json with strings -> element failed") + +} + +type testPairElement struct { + element Element + bigint big.Int +} + +func (z *Element) biggerOrEqualModulus() bool { + + return z[0] >= qElement[0] +} + +func gen() gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + var g testPairElement + + g.element = Element{ + genParams.NextUint64(), + } + if qElement[0] != ^uint64(0) { + g.element[0] %= (qElement[0] + 1) + } + + for g.element.biggerOrEqualModulus() { + g.element = Element{ + genParams.NextUint64(), + } + if qElement[0] != ^uint64(0) { + g.element[0] %= (qElement[0] + 1) + } + } + + g.element.ToBigIntRegular(&g.bigint) + genResult := gopter.NewGenResult(g, gopter.NoShrinker) + return genResult + } +} + +func genFull() gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + + genRandomFq := func() Element { + var g Element + + g = Element{ + genParams.NextUint64(), + } + + if qElement[0] != ^uint64(0) { + g[0] %= (qElement[0] + 1) + } + + for g.biggerOrEqualModulus() { + g = Element{ + genParams.NextUint64(), + } + if qElement[0] != ^uint64(0) { + g[0] %= (qElement[0] + 1) + } + } + + return g + } + a := genRandomFq() + + var carry uint64 + a[0], _ = bits.Add64(a[0], qElement[0], carry) + + genResult := gopter.NewGenResult(a, gopter.NoShrinker) + return genResult + } +} diff --git a/ecc/bls12-39/fr/fft/doc.go b/ecc/bls12-39/fr/fft/doc.go new file mode 100644 index 0000000000..3c35170e8d --- /dev/null +++ b/ecc/bls12-39/fr/fft/doc.go @@ -0,0 +1,18 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +// Package fft provides in-place discrete Fourier transform. +package fft diff --git a/ecc/bls12-39/fr/fft/domain.go b/ecc/bls12-39/fr/fft/domain.go new file mode 100644 index 0000000000..4edd0cf714 --- /dev/null +++ b/ecc/bls12-39/fr/fft/domain.go @@ -0,0 +1,240 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fft + +import ( + "fmt" + "io" + "math/big" + "math/bits" + "runtime" + "sync" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + + curve "github.com/consensys/gnark-crypto/ecc/bls12-39" + + "github.com/consensys/gnark-crypto/ecc" +) + +// Domain with a power of 2 cardinality +// compute a field element of order 2x and store it in FinerGenerator +// all other values can be derived from x, GeneratorSqrt +type Domain struct { + Cardinality uint64 + CardinalityInv fr.Element + Generator fr.Element + GeneratorInv fr.Element + FrMultiplicativeGen fr.Element // generator of Fr* + FrMultiplicativeGenInv fr.Element + + // the following slices are not serialized and are (re)computed through domain.preComputeTwiddles() + + // Twiddles factor for the FFT using Generator for each stage of the recursive FFT + Twiddles [][]fr.Element + + // Twiddles factor for the FFT using GeneratorInv for each stage of the recursive FFT + TwiddlesInv [][]fr.Element + + // we precompute these mostly to avoid the memory intensive bit reverse permutation in the groth16.Prover + + // CosetTable u*<1,g,..,g^(n-1)> + CosetTable []fr.Element + CosetTableReversed []fr.Element // optional, this is computed on demand at the creation of the domain + + // CosetTable[i][j] = domain.Generator(i-th)SqrtInv ^ j + CosetTableInv []fr.Element + CosetTableInvReversed []fr.Element // optional, this is computed on demand at the creation of the domain +} + +// NewDomain returns a subgroup with a power of 2 cardinality +// cardinality >= m +func NewDomain(m uint64) *Domain { + + domain := &Domain{} + x := ecc.NextPowerOfTwo(m) + domain.Cardinality = uint64(x) + + // generator of the largest 2-adic subgroup + var rootOfUnity fr.Element + + rootOfUnity.SetString("23751747") + const maxOrderRoot uint64 = 4 + domain.FrMultiplicativeGen.SetUint64(13) + + domain.FrMultiplicativeGenInv.Inverse(&domain.FrMultiplicativeGen) + + // find generator for Z/2^(log(m))Z + logx := uint64(bits.TrailingZeros64(x)) + if logx > maxOrderRoot { + panic(fmt.Sprintf("m (%d) is too big: the required root of unity does not exist", m)) + } + + // Generator = FinerGenerator^2 has order x + expo := uint64(1 << (maxOrderRoot - logx)) + domain.Generator.Exp(rootOfUnity, big.NewInt(int64(expo))) // order x + domain.GeneratorInv.Inverse(&domain.Generator) + domain.CardinalityInv.SetUint64(uint64(x)).Inverse(&domain.CardinalityInv) + + // twiddle factors + domain.preComputeTwiddles() + + // store the bit reversed coset tables + domain.reverseCosetTables() + + return domain +} + +func (d *Domain) reverseCosetTables() { + d.CosetTableReversed = make([]fr.Element, d.Cardinality) + d.CosetTableInvReversed = make([]fr.Element, d.Cardinality) + copy(d.CosetTableReversed, d.CosetTable) + copy(d.CosetTableInvReversed, d.CosetTableInv) + BitReverse(d.CosetTableReversed) + BitReverse(d.CosetTableInvReversed) +} + +func (d *Domain) preComputeTwiddles() { + + // nb fft stages + nbStages := uint64(bits.TrailingZeros64(d.Cardinality)) + + d.Twiddles = make([][]fr.Element, nbStages) + d.TwiddlesInv = make([][]fr.Element, nbStages) + d.CosetTable = make([]fr.Element, d.Cardinality) + d.CosetTableInv = make([]fr.Element, d.Cardinality) + + var wg sync.WaitGroup + + // for each fft stage, we pre compute the twiddle factors + twiddles := func(t [][]fr.Element, omega fr.Element) { + for i := uint64(0); i < nbStages; i++ { + t[i] = make([]fr.Element, 1+(1<<(nbStages-i-1))) + var w fr.Element + if i == 0 { + w = omega + } else { + w = t[i-1][2] + } + t[i][0] = fr.One() + t[i][1] = w + for j := 2; j < len(t[i]); j++ { + t[i][j].Mul(&t[i][j-1], &w) + } + } + wg.Done() + } + + expTable := func(sqrt fr.Element, t []fr.Element) { + t[0] = fr.One() + precomputeExpTable(sqrt, t) + wg.Done() + } + + wg.Add(4) + go twiddles(d.Twiddles, d.Generator) + go twiddles(d.TwiddlesInv, d.GeneratorInv) + go expTable(d.FrMultiplicativeGen, d.CosetTable) + go expTable(d.FrMultiplicativeGenInv, d.CosetTableInv) + + wg.Wait() + +} + +func precomputeExpTable(w fr.Element, table []fr.Element) { + n := len(table) + + // see if it makes sense to parallelize exp tables pre-computation + interval := 0 + if runtime.NumCPU() >= 4 { + interval = (n - 1) / (runtime.NumCPU() / 4) + } + + // this ratio roughly correspond to the number of multiplication one can do in place of a Exp operation + const ratioExpMul = 6000 / 17 + + if interval < ratioExpMul { + precomputeExpTableChunk(w, 1, table[1:]) + return + } + + // we parallelize + var wg sync.WaitGroup + for i := 1; i < n; i += interval { + start := i + end := i + interval + if end > n { + end = n + } + wg.Add(1) + go func() { + precomputeExpTableChunk(w, uint64(start), table[start:end]) + wg.Done() + }() + } + wg.Wait() +} + +func precomputeExpTableChunk(w fr.Element, power uint64, table []fr.Element) { + + // this condition ensures that creating a domain of size 1 with cosets don't fail + if len(table) > 0 { + table[0].Exp(w, new(big.Int).SetUint64(power)) + for i := 1; i < len(table); i++ { + table[i].Mul(&table[i-1], &w) + } + } +} + +// WriteTo writes a binary representation of the domain (without the precomputed twiddle factors) +// to the provided writer +func (d *Domain) WriteTo(w io.Writer) (int64, error) { + + enc := curve.NewEncoder(w) + + toEncode := []interface{}{d.Cardinality, &d.CardinalityInv, &d.Generator, &d.GeneratorInv, &d.FrMultiplicativeGen, &d.FrMultiplicativeGenInv} + + for _, v := range toEncode { + if err := enc.Encode(v); err != nil { + return enc.BytesWritten(), err + } + } + + return enc.BytesWritten(), nil +} + +// ReadFrom attempts to decode a domain from Reader +func (d *Domain) ReadFrom(r io.Reader) (int64, error) { + + dec := curve.NewDecoder(r) + + toDecode := []interface{}{&d.Cardinality, &d.CardinalityInv, &d.Generator, &d.GeneratorInv, &d.FrMultiplicativeGen, &d.FrMultiplicativeGenInv} + + for _, v := range toDecode { + if err := dec.Decode(v); err != nil { + return dec.BytesRead(), err + } + } + + // twiddle factors + d.preComputeTwiddles() + + // store the bit reversed coset tables if needed + d.reverseCosetTables() + + return dec.BytesRead(), nil +} diff --git a/ecc/bls12-39/fr/fft/domain_test.go b/ecc/bls12-39/fr/fft/domain_test.go new file mode 100644 index 0000000000..14d23dd992 --- /dev/null +++ b/ecc/bls12-39/fr/fft/domain_test.go @@ -0,0 +1,47 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fft + +import ( + "bytes" + "reflect" + "testing" +) + +func TestDomainSerialization(t *testing.T) { + + domain := NewDomain(1 << 6) + var reconstructed Domain + + var buf bytes.Buffer + written, err := domain.WriteTo(&buf) + if err != nil { + t.Fatal(err) + } + var read int64 + read, err = reconstructed.ReadFrom(&buf) + if err != nil { + t.Fatal(err) + } + + if written != read { + t.Fatal("didn't read as many bytes as we wrote") + } + if !reflect.DeepEqual(domain, &reconstructed) { + t.Fatal("Domain.SetBytes(Bytes()) failed") + } +} diff --git a/ecc/bls12-39/fr/fft/fft.go b/ecc/bls12-39/fr/fft/fft.go new file mode 100644 index 0000000000..ad156607f2 --- /dev/null +++ b/ecc/bls12-39/fr/fft/fft.go @@ -0,0 +1,299 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fft + +import ( + "math/bits" + "runtime" + + "github.com/consensys/gnark-crypto/ecc" + "github.com/consensys/gnark-crypto/internal/parallel" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" +) + +// Decimation is used in the FFT call to select decimation in time or in frequency +type Decimation uint8 + +const ( + DIT Decimation = iota + DIF +) + +// parallelize threshold for a single butterfly op, if the fft stage is not parallelized already +const butterflyThreshold = 16 + +// FFT computes (recursively) the discrete Fourier transform of a and stores the result in a +// if decimation == DIT (decimation in time), the input must be in bit-reversed order +// if decimation == DIF (decimation in frequency), the output will be in bit-reversed order +// if coset if set, the FFT(a) returns the evaluation of a on a coset. +func (domain *Domain) FFT(a []fr.Element, decimation Decimation, coset ...bool) { + + numCPU := uint64(runtime.NumCPU()) + + _coset := false + if len(coset) > 0 { + _coset = coset[0] + } + + // if coset != 0, scale by coset table + if _coset { + scale := func(cosetTable []fr.Element) { + parallel.Execute(len(a), func(start, end int) { + for i := start; i < end; i++ { + a[i].Mul(&a[i], &cosetTable[i]) + } + }) + } + if decimation == DIT { + scale(domain.CosetTableReversed) + + } else { + scale(domain.CosetTable) + } + } + + // find the stage where we should stop spawning go routines in our recursive calls + // (ie when we have as many go routines running as we have available CPUs) + maxSplits := bits.TrailingZeros64(ecc.NextPowerOfTwo(numCPU)) + if numCPU <= 1 { + maxSplits = -1 + } + + switch decimation { + case DIF: + difFFT(a, domain.Twiddles, 0, maxSplits, nil) + case DIT: + ditFFT(a, domain.Twiddles, 0, maxSplits, nil) + default: + panic("not implemented") + } +} + +// FFTInverse computes (recursively) the inverse discrete Fourier transform of a and stores the result in a +// if decimation == DIT (decimation in time), the input must be in bit-reversed order +// if decimation == DIF (decimation in frequency), the output will be in bit-reversed order +// coset sets the shift of the fft (0 = no shift, standard fft) +// len(a) must be a power of 2, and w must be a len(a)th root of unity in field F. +func (domain *Domain) FFTInverse(a []fr.Element, decimation Decimation, coset ...bool) { + + numCPU := uint64(runtime.NumCPU()) + + _coset := false + if len(coset) > 0 { + _coset = coset[0] + } + + // find the stage where we should stop spawning go routines in our recursive calls + // (ie when we have as many go routines running as we have available CPUs) + maxSplits := bits.TrailingZeros64(ecc.NextPowerOfTwo(numCPU)) + if numCPU <= 1 { + maxSplits = -1 + } + switch decimation { + case DIF: + difFFT(a, domain.TwiddlesInv, 0, maxSplits, nil) + case DIT: + ditFFT(a, domain.TwiddlesInv, 0, maxSplits, nil) + default: + panic("not implemented") + } + + // scale by CardinalityInv + if !_coset { + parallel.Execute(len(a), func(start, end int) { + for i := start; i < end; i++ { + a[i].Mul(&a[i], &domain.CardinalityInv) + } + }) + return + } + + scale := func(cosetTable []fr.Element) { + parallel.Execute(len(a), func(start, end int) { + for i := start; i < end; i++ { + a[i].Mul(&a[i], &cosetTable[i]). + Mul(&a[i], &domain.CardinalityInv) + } + }) + } + if decimation == DIT { + scale(domain.CosetTableInv) + return + } + + // decimation == DIF + scale(domain.CosetTableInvReversed) + +} + +func difFFT(a []fr.Element, twiddles [][]fr.Element, stage, maxSplits int, chDone chan struct{}) { + if chDone != nil { + defer close(chDone) + } + + n := len(a) + if n == 1 { + return + } else if n == 8 { + kerDIF8(a, twiddles, stage) + return + } + m := n >> 1 + + // if stage < maxSplits, we parallelize this butterfly + // but we have only numCPU / stage cpus available + if (m > butterflyThreshold) && (stage < maxSplits) { + // 1 << stage == estimated used CPUs + numCPU := runtime.NumCPU() / (1 << (stage)) + parallel.Execute(m, func(start, end int) { + for i := start; i < end; i++ { + fr.Butterfly(&a[i], &a[i+m]) + a[i+m].Mul(&a[i+m], &twiddles[stage][i]) + } + }, numCPU) + } else { + // i == 0 + fr.Butterfly(&a[0], &a[m]) + for i := 1; i < m; i++ { + fr.Butterfly(&a[i], &a[i+m]) + a[i+m].Mul(&a[i+m], &twiddles[stage][i]) + } + } + + if m == 1 { + return + } + + nextStage := stage + 1 + if stage < maxSplits { + chDone := make(chan struct{}, 1) + go difFFT(a[m:n], twiddles, nextStage, maxSplits, chDone) + difFFT(a[0:m], twiddles, nextStage, maxSplits, nil) + <-chDone + } else { + difFFT(a[0:m], twiddles, nextStage, maxSplits, nil) + difFFT(a[m:n], twiddles, nextStage, maxSplits, nil) + } + +} + +func ditFFT(a []fr.Element, twiddles [][]fr.Element, stage, maxSplits int, chDone chan struct{}) { + if chDone != nil { + defer close(chDone) + } + n := len(a) + if n == 1 { + return + } else if n == 8 { + kerDIT8(a, twiddles, stage) + return + } + m := n >> 1 + + nextStage := stage + 1 + + if stage < maxSplits { + // that's the only time we fire go routines + chDone := make(chan struct{}, 1) + go ditFFT(a[m:], twiddles, nextStage, maxSplits, chDone) + ditFFT(a[0:m], twiddles, nextStage, maxSplits, nil) + <-chDone + } else { + ditFFT(a[0:m], twiddles, nextStage, maxSplits, nil) + ditFFT(a[m:n], twiddles, nextStage, maxSplits, nil) + + } + + // if stage < maxSplits, we parallelize this butterfly + // but we have only numCPU / stage cpus available + if (m > butterflyThreshold) && (stage < maxSplits) { + // 1 << stage == estimated used CPUs + numCPU := runtime.NumCPU() / (1 << (stage)) + parallel.Execute(m, func(start, end int) { + for k := start; k < end; k++ { + a[k+m].Mul(&a[k+m], &twiddles[stage][k]) + fr.Butterfly(&a[k], &a[k+m]) + } + }, numCPU) + + } else { + fr.Butterfly(&a[0], &a[m]) + for k := 1; k < m; k++ { + a[k+m].Mul(&a[k+m], &twiddles[stage][k]) + fr.Butterfly(&a[k], &a[k+m]) + } + } +} + +// BitReverse applies the bit-reversal permutation to a. +// len(a) must be a power of 2 (as in every single function in this file) +func BitReverse(a []fr.Element) { + n := uint64(len(a)) + nn := uint64(64 - bits.TrailingZeros64(n)) + + for i := uint64(0); i < n; i++ { + irev := bits.Reverse64(i) >> nn + if irev > i { + a[i], a[irev] = a[irev], a[i] + } + } +} + +// kerDIT8 is a kernel that process a FFT of size 8 +func kerDIT8(a []fr.Element, twiddles [][]fr.Element, stage int) { + + fr.Butterfly(&a[0], &a[1]) + fr.Butterfly(&a[2], &a[3]) + fr.Butterfly(&a[4], &a[5]) + fr.Butterfly(&a[6], &a[7]) + fr.Butterfly(&a[0], &a[2]) + a[3].Mul(&a[3], &twiddles[stage+1][1]) + fr.Butterfly(&a[1], &a[3]) + fr.Butterfly(&a[4], &a[6]) + a[7].Mul(&a[7], &twiddles[stage+1][1]) + fr.Butterfly(&a[5], &a[7]) + fr.Butterfly(&a[0], &a[4]) + a[5].Mul(&a[5], &twiddles[stage+0][1]) + fr.Butterfly(&a[1], &a[5]) + a[6].Mul(&a[6], &twiddles[stage+0][2]) + fr.Butterfly(&a[2], &a[6]) + a[7].Mul(&a[7], &twiddles[stage+0][3]) + fr.Butterfly(&a[3], &a[7]) +} + +// kerDIF8 is a kernel that process a FFT of size 8 +func kerDIF8(a []fr.Element, twiddles [][]fr.Element, stage int) { + + fr.Butterfly(&a[0], &a[4]) + fr.Butterfly(&a[1], &a[5]) + fr.Butterfly(&a[2], &a[6]) + fr.Butterfly(&a[3], &a[7]) + a[5].Mul(&a[5], &twiddles[stage+0][1]) + a[6].Mul(&a[6], &twiddles[stage+0][2]) + a[7].Mul(&a[7], &twiddles[stage+0][3]) + fr.Butterfly(&a[0], &a[2]) + fr.Butterfly(&a[1], &a[3]) + fr.Butterfly(&a[4], &a[6]) + fr.Butterfly(&a[5], &a[7]) + a[3].Mul(&a[3], &twiddles[stage+1][1]) + a[7].Mul(&a[7], &twiddles[stage+1][1]) + fr.Butterfly(&a[0], &a[1]) + fr.Butterfly(&a[2], &a[3]) + fr.Butterfly(&a[4], &a[5]) + fr.Butterfly(&a[6], &a[7]) +} diff --git a/ecc/bls12-39/fr/fft/fft_test.go b/ecc/bls12-39/fr/fft/fft_test.go new file mode 100644 index 0000000000..f3b2f7f81b --- /dev/null +++ b/ecc/bls12-39/fr/fft/fft_test.go @@ -0,0 +1,326 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fft + +import ( + "math/big" + "strconv" + "testing" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/gen" + "github.com/leanovate/gopter/prop" +) + +func TestFFT(t *testing.T) { + const maxSize = 1 << 10 + + nbCosets := 3 + domainWithPrecompute := NewDomain(maxSize) + + parameters := gopter.DefaultTestParameters() + parameters.MinSuccessfulTests = 5 + + properties := gopter.NewProperties(parameters) + + properties.Property("DIF FFT should be consistent with dual basis", prop.ForAll( + + // checks that a random evaluation of a dual function eval(gen**ithpower) is consistent with the FFT result + func(ithpower int) bool { + + pol := make([]fr.Element, maxSize) + backupPol := make([]fr.Element, maxSize) + + for i := 0; i < maxSize; i++ { + pol[i].SetRandom() + } + copy(backupPol, pol) + + domainWithPrecompute.FFT(pol, DIF, false) + BitReverse(pol) + + sample := domainWithPrecompute.Generator + sample.Exp(sample, big.NewInt(int64(ithpower))) + + eval := evaluatePolynomial(backupPol, sample) + + return eval.Equal(&pol[ithpower]) + + }, + gen.IntRange(0, maxSize-1), + )) + + properties.Property("DIF FFT on cosets should be consistent with dual basis", prop.ForAll( + + // checks that a random evaluation of a dual function eval(gen**ithpower) is consistent with the FFT result + func(ithpower int) bool { + + pol := make([]fr.Element, maxSize) + backupPol := make([]fr.Element, maxSize) + + for i := 0; i < maxSize; i++ { + pol[i].SetRandom() + } + copy(backupPol, pol) + + domainWithPrecompute.FFT(pol, DIF, true) + BitReverse(pol) + + sample := domainWithPrecompute.Generator + sample.Exp(sample, big.NewInt(int64(ithpower))). + Mul(&sample, &domainWithPrecompute.FrMultiplicativeGen) + + eval := evaluatePolynomial(backupPol, sample) + + return eval.Equal(&pol[ithpower]) + + }, + gen.IntRange(0, maxSize-1), + )) + + properties.Property("DIT FFT should be consistent with dual basis", prop.ForAll( + + // checks that a random evaluation of a dual function eval(gen**ithpower) is consistent with the FFT result + func(ithpower int) bool { + + pol := make([]fr.Element, maxSize) + backupPol := make([]fr.Element, maxSize) + + for i := 0; i < maxSize; i++ { + pol[i].SetRandom() + } + copy(backupPol, pol) + + BitReverse(pol) + domainWithPrecompute.FFT(pol, DIT, false) + + sample := domainWithPrecompute.Generator + sample.Exp(sample, big.NewInt(int64(ithpower))) + + eval := evaluatePolynomial(backupPol, sample) + + return eval.Equal(&pol[ithpower]) + + }, + gen.IntRange(0, maxSize-1), + )) + + properties.Property("bitReverse(DIF FFT(DIT FFT (bitReverse))))==id", prop.ForAll( + + func() bool { + + pol := make([]fr.Element, maxSize) + backupPol := make([]fr.Element, maxSize) + + for i := 0; i < maxSize; i++ { + pol[i].SetRandom() + } + copy(backupPol, pol) + + BitReverse(pol) + domainWithPrecompute.FFT(pol, DIT, false) + domainWithPrecompute.FFTInverse(pol, DIF, false) + BitReverse(pol) + + check := true + for i := 0; i < len(pol); i++ { + check = check && pol[i].Equal(&backupPol[i]) + } + return check + }, + )) + + properties.Property("bitReverse(DIF FFT(DIT FFT (bitReverse))))==id on cosets", prop.ForAll( + + func() bool { + + pol := make([]fr.Element, maxSize) + backupPol := make([]fr.Element, maxSize) + + for i := 0; i < maxSize; i++ { + pol[i].SetRandom() + } + copy(backupPol, pol) + + check := true + + for i := 1; i <= nbCosets; i++ { + + BitReverse(pol) + domainWithPrecompute.FFT(pol, DIT, true) + domainWithPrecompute.FFTInverse(pol, DIF, true) + BitReverse(pol) + + for i := 0; i < len(pol); i++ { + check = check && pol[i].Equal(&backupPol[i]) + } + } + + return check + }, + )) + + properties.Property("DIT FFT(DIF FFT)==id", prop.ForAll( + + func() bool { + + pol := make([]fr.Element, maxSize) + backupPol := make([]fr.Element, maxSize) + + for i := 0; i < maxSize; i++ { + pol[i].SetRandom() + } + copy(backupPol, pol) + + domainWithPrecompute.FFTInverse(pol, DIF, false) + domainWithPrecompute.FFT(pol, DIT, false) + + check := true + for i := 0; i < len(pol); i++ { + check = check && (pol[i] == backupPol[i]) + } + return check + }, + )) + + properties.Property("DIT FFT(DIF FFT)==id on cosets", prop.ForAll( + + func() bool { + + pol := make([]fr.Element, maxSize) + backupPol := make([]fr.Element, maxSize) + + for i := 0; i < maxSize; i++ { + pol[i].SetRandom() + } + copy(backupPol, pol) + + domainWithPrecompute.FFTInverse(pol, DIF, true) + domainWithPrecompute.FFT(pol, DIT, true) + + check := true + for i := 0; i < len(pol); i++ { + check = check && (pol[i] == backupPol[i]) + } + return check + }, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +// -------------------------------------------------------------------- +// benches +func BenchmarkBitReverse(b *testing.B) { + + const maxSize = 1 << 20 + + pol := make([]fr.Element, maxSize) + pol[0].SetRandom() + for i := 1; i < maxSize; i++ { + pol[i] = pol[i-1] + } + + for i := 8; i < 20; i++ { + b.Run("bit reversing 2**"+strconv.Itoa(i)+"bits", func(b *testing.B) { + b.ResetTimer() + for j := 0; j < b.N; j++ { + BitReverse(pol[:1< (1 << 15) { + size = 1 << 15 + } + paddedSize := ecc.NextPowerOfTwo(uint64(size)) + p1 := make([]fr.Element, paddedSize) + p2 := make([]fr.Element, paddedSize) + for i := 0; i < len(p1); i++ { + p1[i].SetRawBytes(r) + } + copy(p2, p1) + + // fft domain + domainWithPrecompute := NewDomain(paddedSize) + domainWOPrecompute := NewDomain(paddedSize) + + // bitReverse(DIF FFT(DIT FFT (bitReverse))))==id + // bitReverse(DIF FFT(DIT FFT (bitReverse))))==id + BitReverse(p1) + domainWithPrecompute.FFT(p1, DIT, true) + domainWOPrecompute.FFTInverse(p1, DIF, true) + BitReverse(p1) + + for i := 0; i < len(p1); i++ { + if !p1[i].Equal(&p2[i]) { + panic(fmt.Sprintf("bitReverse(DIF FFT(DIT FFT (bitReverse)))) != id, size %d", size)) + } + } + + return fuzzNormal +} diff --git a/ecc/bls12-39/fr/fft/fuzz_test.go b/ecc/bls12-39/fr/fft/fuzz_test.go new file mode 100644 index 0000000000..9890547c0e --- /dev/null +++ b/ecc/bls12-39/fr/fft/fuzz_test.go @@ -0,0 +1,56 @@ +//go:build gofuzz +// +build gofuzz + +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fft + +import ( + "encoding/hex" + "io" + "math/rand" + "runtime/debug" + "testing" + "time" +) + +func TestFuzz(t *testing.T) { + const maxBytes = 1 << 10 + const testCount = 7 + var bytes [maxBytes]byte + var i int + seed := time.Now().UnixNano() + defer func() { + if r := recover(); r != nil { + t.Error(r) + t.Error(string(debug.Stack())) + t.Fatal("test panicked", i, hex.EncodeToString(bytes[:i]), "seed", seed) + } + }() + r := rand.New(rand.NewSource(seed)) + + for i = 1; i < maxBytes; i++ { + for j := 0; j < testCount; j++ { + if _, err := io.ReadFull(r, bytes[:i]); err != nil { + t.Fatal("couldn't read random bytes", err) + } + + Fuzz(bytes[:i]) + } + } + +} diff --git a/ecc/bls12-39/fr/kzg/doc.go b/ecc/bls12-39/fr/kzg/doc.go new file mode 100644 index 0000000000..d8a77e8f64 --- /dev/null +++ b/ecc/bls12-39/fr/kzg/doc.go @@ -0,0 +1,18 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +// Package kzg provides a KZG commitment scheme. +package kzg diff --git a/ecc/bls12-39/fr/kzg/fuzz.go b/ecc/bls12-39/fr/kzg/fuzz.go new file mode 100644 index 0000000000..a08b788f72 --- /dev/null +++ b/ecc/bls12-39/fr/kzg/fuzz.go @@ -0,0 +1,84 @@ +//go:build gofuzz +// +build gofuzz + +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package kzg + +import ( + "bytes" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr/polynomial" +) + +const ( + fuzzInteresting = 1 + fuzzNormal = 0 + fuzzDiscard = -1 +) + +func Fuzz(data []byte) int { + if len(data) == 0 { + return fuzzDiscard + } + size := int(uint8(data[0])) + 2 // TODO fix min size in NewScheme + if size > (1 << 15) { + size = 1 << 15 + } + r := bytes.NewReader(data[1:]) + var alpha, point fr.Element + alpha.SetRawBytes(r) + point.SetRawBytes(r) + s := NewScheme(size, alpha) + + // create polynomials + f := make([]polynomial.Polynomial, size/2) + for i := 0; i < len(f); i++ { + f[i] = make(polynomial.Polynomial, size) + for j := 0; j < len(f[i]); j++ { + f[i][j].SetRawBytes(r) + } + } + + // commit the polynomials + digests := make([]Digest, size/2) + for i := 0; i < len(digests); i++ { + digests[i], _ = s.Commit(f[i]) + + } + + proof, err := s.BatchOpenSinglePoint(&point, digests, f) + if err != nil { + panic(err) + } + + // verify the claimed values + for i := 0; i < len(f); i++ { + expectedClaim := f[i].Eval(&point) + if !expectedClaim.Equal(&proof.ClaimedValues[i]) { + panic("inconsistant claimed values") + } + } + + // verify correct proof + err = s.BatchVerifySinglePoint(digests, &proof) + if err != nil { + panic(err) + } + + return fuzzNormal +} diff --git a/ecc/bls12-39/fr/kzg/fuzz_test.go b/ecc/bls12-39/fr/kzg/fuzz_test.go new file mode 100644 index 0000000000..8379a59c73 --- /dev/null +++ b/ecc/bls12-39/fr/kzg/fuzz_test.go @@ -0,0 +1,56 @@ +//go:build gofuzz +// +build gofuzz + +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package kzg + +import ( + "encoding/hex" + "io" + "math/rand" + "runtime/debug" + "testing" + "time" +) + +func TestFuzz(t *testing.T) { + const maxBytes = 1 << 10 + const testCount = 7 + var bytes [maxBytes]byte + var i int + seed := time.Now().UnixNano() + defer func() { + if r := recover(); r != nil { + t.Error(r) + t.Error(string(debug.Stack())) + t.Fatal("test panicked", i, hex.EncodeToString(bytes[:i]), "seed", seed) + } + }() + r := rand.New(rand.NewSource(seed)) + + for i = 1; i < maxBytes; i++ { + for j := 0; j < testCount; j++ { + if _, err := io.ReadFull(r, bytes[:i]); err != nil { + t.Fatal("couldn't read random bytes", err) + } + + Fuzz(bytes[:i]) + } + } + +} diff --git a/ecc/bls12-39/fr/kzg/kzg.go b/ecc/bls12-39/fr/kzg/kzg.go new file mode 100644 index 0000000000..36da95825a --- /dev/null +++ b/ecc/bls12-39/fr/kzg/kzg.go @@ -0,0 +1,532 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package kzg + +import ( + "errors" + "hash" + "math/big" + "sync" + + "github.com/consensys/gnark-crypto/ecc" + "github.com/consensys/gnark-crypto/ecc/bls12-39" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/consensys/gnark-crypto/fiat-shamir" +) + +var ( + ErrInvalidNbDigests = errors.New("number of digests is not the same as the number of polynomials") + ErrInvalidPolynomialSize = errors.New("invalid polynomial size (larger than SRS or == 0)") + ErrVerifyOpeningProof = errors.New("can't verify opening proof") + ErrVerifyBatchOpeningSinglePoint = errors.New("can't verify batch opening proof at single point") + ErrMinSRSSize = errors.New("minimum srs size is 2") +) + +// Digest commitment of a polynomial. +type Digest = bls1239.G1Affine + +// SRS stores the result of the MPC +type SRS struct { + G1 []bls1239.G1Affine // [G₁ [α]G₁ , [α²]G₁, ... ] + G2 [2]bls1239.G2Affine // [G₂, [α]G₂ ] +} + +// eval returns p(point) where p is interpreted as a polynomial +// ∑_{i= 0; i-- { + res.Mul(&res, &point).Add(&res, &p[i]) + } + return res +} + +// NewSRS returns a new SRS using alpha as randomness source +// +// In production, a SRS generated through MPC should be used. +// +// implements io.ReaderFrom and io.WriterTo +func NewSRS(size uint64, bAlpha *big.Int) (*SRS, error) { + + if size < 2 { + return nil, ErrMinSRSSize + } + + var srs SRS + srs.G1 = make([]bls1239.G1Affine, size) + + var alpha fr.Element + alpha.SetBigInt(bAlpha) + + _, _, gen1Aff, gen2Aff := bls1239.Generators() + srs.G1[0] = gen1Aff + srs.G2[0] = gen2Aff + srs.G2[1].ScalarMultiplication(&gen2Aff, bAlpha) + + alphas := make([]fr.Element, size-1) + alphas[0] = alpha + for i := 1; i < len(alphas); i++ { + alphas[i].Mul(&alphas[i-1], &alpha) + } + for i := 0; i < len(alphas); i++ { + alphas[i].FromMont() + } + g1s := bls1239.BatchScalarMultiplicationG1(&gen1Aff, alphas) + copy(srs.G1[1:], g1s) + + return &srs, nil +} + +// OpeningProof KZG proof for opening at a single point. +// +// implements io.ReaderFrom and io.WriterTo +type OpeningProof struct { + // H quotient polynomial (f - f(z))/(x-z) + H bls1239.G1Affine + + // ClaimedValue purported value + ClaimedValue fr.Element +} + +// BatchOpeningProof opening proof for many polynomials at the same point +// +// implements io.ReaderFrom and io.WriterTo +type BatchOpeningProof struct { + // H quotient polynomial Sum_i gamma**i*(f - f(z))/(x-z) + H bls1239.G1Affine + + // ClaimedValues purported values + ClaimedValues []fr.Element +} + +// Commit commits to a polynomial using a multi exponentiation with the SRS. +// It is assumed that the polynomial is in canonical form, in Montgomery form. +func Commit(p []fr.Element, srs *SRS, nbTasks ...int) (Digest, error) { + + if len(p) == 0 || len(p) > len(srs.G1) { + return Digest{}, ErrInvalidPolynomialSize + } + + var res bls1239.G1Affine + + config := ecc.MultiExpConfig{ScalarsMont: true} + if len(nbTasks) > 0 { + config.NbTasks = nbTasks[0] + } + if _, err := res.MultiExp(srs.G1[:len(p)], p, config); err != nil { + return Digest{}, err + } + + return res, nil +} + +// Open computes an opening proof of polynomial p at given point. +// fft.Domain Cardinality must be larger than p.Degree() +func Open(p []fr.Element, point fr.Element, srs *SRS) (OpeningProof, error) { + if len(p) == 0 || len(p) > len(srs.G1) { + return OpeningProof{}, ErrInvalidPolynomialSize + } + + // build the proof + res := OpeningProof{ + ClaimedValue: eval(p, point), + } + + // compute H + _p := make([]fr.Element, len(p)) + copy(_p, p) + h := dividePolyByXminusA(_p, res.ClaimedValue, point) + + _p = nil // h re-use this memory + + // commit to H + hCommit, err := Commit(h, srs) + if err != nil { + return OpeningProof{}, err + } + res.H.Set(&hCommit) + + return res, nil +} + +// Verify verifies a KZG opening proof at a single point +func Verify(commitment *Digest, proof *OpeningProof, point fr.Element, srs *SRS) error { + + // [f(a)]G₁ + var claimedValueG1Aff bls1239.G1Affine + var claimedValueBigInt big.Int + proof.ClaimedValue.ToBigIntRegular(&claimedValueBigInt) + claimedValueG1Aff.ScalarMultiplication(&srs.G1[0], &claimedValueBigInt) + + // [f(α) - f(a)]G₁ + var fminusfaG1Jac, tmpG1Jac bls1239.G1Jac + fminusfaG1Jac.FromAffine(commitment) + tmpG1Jac.FromAffine(&claimedValueG1Aff) + fminusfaG1Jac.SubAssign(&tmpG1Jac) + + // [-H(α)]G₁ + var negH bls1239.G1Affine + negH.Neg(&proof.H) + + // [α-a]G₂ + var alphaMinusaG2Jac, genG2Jac, alphaG2Jac bls1239.G2Jac + var pointBigInt big.Int + point.ToBigIntRegular(&pointBigInt) + genG2Jac.FromAffine(&srs.G2[0]) + alphaG2Jac.FromAffine(&srs.G2[1]) + alphaMinusaG2Jac.ScalarMultiplication(&genG2Jac, &pointBigInt). + Neg(&alphaMinusaG2Jac). + AddAssign(&alphaG2Jac) + + // [α-a]G₂ + var xminusaG2Aff bls1239.G2Affine + xminusaG2Aff.FromJacobian(&alphaMinusaG2Jac) + + // [f(α) - f(a)]G₁ + var fminusfaG1Aff bls1239.G1Affine + fminusfaG1Aff.FromJacobian(&fminusfaG1Jac) + + // e([f(α) - f(a)]G₁, G₂).e([-H(α)]G₁, [α-a]G₂) ==? 1 + check, err := bls1239.PairingCheck( + []bls1239.G1Affine{fminusfaG1Aff, negH}, + []bls1239.G2Affine{srs.G2[0], xminusaG2Aff}, + ) + if err != nil { + return err + } + if !check { + return ErrVerifyOpeningProof + } + return nil +} + +// BatchOpenSinglePoint creates a batch opening proof at point of a list of polynomials. +// It's an interactive protocol, made non interactive using Fiat Shamir. +// +// * point is the point at which the polynomials are opened. +// * digests is the list of committed polynomials to open, need to derive the challenge using Fiat Shamir. +// * polynomials is the list of polynomials to open, they are supposed to be of the same size. +func BatchOpenSinglePoint(polynomials [][]fr.Element, digests []Digest, point fr.Element, hf hash.Hash, srs *SRS) (BatchOpeningProof, error) { + + // check for invalid sizes + nbDigests := len(digests) + if nbDigests != len(polynomials) { + return BatchOpeningProof{}, ErrInvalidNbDigests + } + + // TODO ensure the polynomials are of the same size + largestPoly := -1 + for _, p := range polynomials { + if len(p) == 0 || len(p) > len(srs.G1) { + return BatchOpeningProof{}, ErrInvalidPolynomialSize + } + if len(p) > largestPoly { + largestPoly = len(p) + } + } + + var res BatchOpeningProof + + // compute the purported values + res.ClaimedValues = make([]fr.Element, len(polynomials)) + var wg sync.WaitGroup + wg.Add(len(polynomials)) + for i := 0; i < len(polynomials); i++ { + go func(_i int) { + res.ClaimedValues[_i] = eval(polynomials[_i], point) + wg.Done() + }(i) + } + + // derive the challenge γ, binded to the point and the commitments + gamma, err := deriveGamma(point, digests, hf) + if err != nil { + return BatchOpeningProof{}, err + } + + // ∑ᵢγⁱf(a) + var foldedEvaluations fr.Element + chSumGammai := make(chan struct{}, 1) + go func() { + // wait for polynomial evaluations to be completed (res.ClaimedValues) + wg.Wait() + foldedEvaluations = res.ClaimedValues[nbDigests-1] + for i := nbDigests - 2; i >= 0; i-- { + foldedEvaluations.Mul(&foldedEvaluations, &gamma). + Add(&foldedEvaluations, &res.ClaimedValues[i]) + } + close(chSumGammai) + }() + + // compute ∑ᵢγⁱfᵢ + // note: if we are willing to paralellize that, we could clone the poly and scale them by + // gamma n in parallel, before reducing into foldedPolynomials + foldedPolynomials := make([]fr.Element, largestPoly) + copy(foldedPolynomials, polynomials[0]) + acc := gamma + var pj fr.Element + for i := 1; i < len(polynomials); i++ { + for j := 0; j < len(polynomials[i]); j++ { + pj.Mul(&polynomials[i][j], &acc) + foldedPolynomials[j].Add(&foldedPolynomials[j], &pj) + } + acc.Mul(&acc, &gamma) + } + + // compute H + <-chSumGammai + h := dividePolyByXminusA(foldedPolynomials, foldedEvaluations, point) + foldedPolynomials = nil // same memory as h + + res.H, err = Commit(h, srs) + if err != nil { + return BatchOpeningProof{}, err + } + + return res, nil +} + +// FoldProof fold the digests and the proofs in batchOpeningProof using Fiat Shamir +// to obtain an opening proof at a single point. +// +// * digests list of digests on which batchOpeningProof is based +// * batchOpeningProof opening proof of digests +// * returns the folded version of batchOpeningProof, Digest, the folded version of digests +func FoldProof(digests []Digest, batchOpeningProof *BatchOpeningProof, point fr.Element, hf hash.Hash) (OpeningProof, Digest, error) { + + nbDigests := len(digests) + + // check consistancy between numbers of claims vs number of digests + if nbDigests != len(batchOpeningProof.ClaimedValues) { + return OpeningProof{}, Digest{}, ErrInvalidNbDigests + } + + // derive the challenge γ, binded to the point and the commitments + gamma, err := deriveGamma(point, digests, hf) + if err != nil { + return OpeningProof{}, Digest{}, ErrInvalidNbDigests + } + + // fold the claimed values and digests + // gammai = [1,γ,γ²,..,γⁿ⁻¹] + gammai := make([]fr.Element, nbDigests) + gammai[0].SetOne() + for i := 1; i < nbDigests; i++ { + gammai[i].Mul(&gammai[i-1], &gamma) + } + + foldedDigests, foldedEvaluations, err := fold(digests, batchOpeningProof.ClaimedValues, gammai) + if err != nil { + return OpeningProof{}, Digest{}, err + } + + // create the folded opening proof + var res OpeningProof + res.ClaimedValue.Set(&foldedEvaluations) + res.H.Set(&batchOpeningProof.H) + + return res, foldedDigests, nil +} + +// BatchVerifySinglePoint verifies a batched opening proof at a single point of a list of polynomials. +// +// * digests list of digests on which opening proof is done +// * batchOpeningProof proof of correct opening on the digests +func BatchVerifySinglePoint(digests []Digest, batchOpeningProof *BatchOpeningProof, point fr.Element, hf hash.Hash, srs *SRS) error { + + // fold the proof + foldedProof, foldedDigest, err := FoldProof(digests, batchOpeningProof, point, hf) + if err != nil { + return err + } + + // verify the foldedProof againts the foldedDigest + err = Verify(&foldedDigest, &foldedProof, point, srs) + return err + +} + +// BatchVerifyMultiPoints batch verifies a list of opening proofs at different points. +// The purpose of the batching is to have only one pairing for verifying several proofs. +// +// * digests list of committed polynomials +// * proofs list of opening proofs, one for each digest +// * points the list of points at which the opening are done +func BatchVerifyMultiPoints(digests []Digest, proofs []OpeningProof, points []fr.Element, srs *SRS) error { + + // check consistancy nb proogs vs nb digests + if len(digests) != len(proofs) || len(digests) != len(points) { + return ErrInvalidNbDigests + } + + // if only one digest, call Verify + if len(digests) == 1 { + return Verify(&digests[0], &proofs[0], points[0], srs) + } + + // sample random numbers λᵢ for sampling + randomNumbers := make([]fr.Element, len(digests)) + randomNumbers[0].SetOne() + for i := 1; i < len(randomNumbers); i++ { + _, err := randomNumbers[i].SetRandom() + if err != nil { + return err + } + } + + // fold the committed quotients compute ∑ᵢλᵢ[Hᵢ(α)]G₁ + var foldedQuotients bls1239.G1Affine + quotients := make([]bls1239.G1Affine, len(proofs)) + for i := 0; i < len(randomNumbers); i++ { + quotients[i].Set(&proofs[i].H) + } + config := ecc.MultiExpConfig{ScalarsMont: true} + _, err := foldedQuotients.MultiExp(quotients, randomNumbers, config) + if err != nil { + return nil + } + + // fold digests and evals + evals := make([]fr.Element, len(digests)) + for i := 0; i < len(randomNumbers); i++ { + evals[i].Set(&proofs[i].ClaimedValue) + } + + // fold the digests: ∑ᵢλᵢ[f_i(α)]G₁ + // fold the evals : ∑ᵢλᵢfᵢ(aᵢ) + foldedDigests, foldedEvals, err := fold(digests, evals, randomNumbers) + if err != nil { + return err + } + + // compute commitment to folded Eval [∑ᵢλᵢfᵢ(aᵢ)]G₁ + var foldedEvalsCommit bls1239.G1Affine + var foldedEvalsBigInt big.Int + foldedEvals.ToBigIntRegular(&foldedEvalsBigInt) + foldedEvalsCommit.ScalarMultiplication(&srs.G1[0], &foldedEvalsBigInt) + + // compute foldedDigests = ∑ᵢλᵢ[fᵢ(α)]G₁ - [∑ᵢλᵢfᵢ(aᵢ)]G₁ + foldedDigests.Sub(&foldedDigests, &foldedEvalsCommit) + + // combien the points and the quotients using γᵢ + // ∑ᵢλᵢ[p_i]([Hᵢ(α)]G₁) + var foldedPointsQuotients bls1239.G1Affine + for i := 0; i < len(randomNumbers); i++ { + randomNumbers[i].Mul(&randomNumbers[i], &points[i]) + } + _, err = foldedPointsQuotients.MultiExp(quotients, randomNumbers, config) + if err != nil { + return err + } + + // ∑ᵢλᵢ[f_i(α)]G₁ - [∑ᵢλᵢfᵢ(aᵢ)]G₁ + ∑ᵢλᵢ[p_i]([Hᵢ(α)]G₁) + // = [∑ᵢλᵢf_i(α) - ∑ᵢλᵢfᵢ(aᵢ) + ∑ᵢλᵢpᵢHᵢ(α)]G₁ + foldedDigests.Add(&foldedDigests, &foldedPointsQuotients) + + // -∑ᵢλᵢ[Qᵢ(α)]G₁ + foldedQuotients.Neg(&foldedQuotients) + + // pairing check + // e([∑ᵢλᵢ(fᵢ(α) - fᵢ(pᵢ) + pᵢHᵢ(α))]G₁, G₂).e([-∑ᵢλᵢ[Hᵢ(α)]G₁), [α]G₂) + check, err := bls1239.PairingCheck( + []bls1239.G1Affine{foldedDigests, foldedQuotients}, + []bls1239.G2Affine{srs.G2[0], srs.G2[1]}, + ) + if err != nil { + return err + } + if !check { + return ErrVerifyOpeningProof + } + return nil + +} + +// fold folds digests and evaluations using the list of factors as random numbers. +// +// * digests list of digests to fold +// * evaluations list of evaluations to fold +// * factors list of multiplicative factors used for the folding (in Montgomery form) +// +// * Returns ∑ᵢcᵢdᵢ, ∑ᵢcᵢf(aᵢ) +func fold(di []Digest, fai []fr.Element, ci []fr.Element) (Digest, fr.Element, error) { + + // length inconsistancy between digests and evaluations should have been done before calling this function + nbDigests := len(di) + + // fold the claimed values ∑ᵢcᵢf(aᵢ) + var foldedEvaluations, tmp fr.Element + for i := 0; i < nbDigests; i++ { + tmp.Mul(&fai[i], &ci[i]) + foldedEvaluations.Add(&foldedEvaluations, &tmp) + } + + // fold the digests ∑ᵢ[cᵢ]([fᵢ(α)]G₁) + var foldedDigests Digest + _, err := foldedDigests.MultiExp(di, ci, ecc.MultiExpConfig{ScalarsMont: true}) + if err != nil { + return foldedDigests, foldedEvaluations, err + } + + // folding done + return foldedDigests, foldedEvaluations, nil + +} + +// deriveGamma derives a challenge using Fiat Shamir to fold proofs. +func deriveGamma(point fr.Element, digests []Digest, hf hash.Hash) (fr.Element, error) { + + // derive the challenge gamma, binded to the point and the commitments + fs := fiatshamir.NewTranscript(hf, "gamma") + if err := fs.Bind("gamma", point.Marshal()); err != nil { + return fr.Element{}, err + } + for i := 0; i < len(digests); i++ { + if err := fs.Bind("gamma", digests[i].Marshal()); err != nil { + return fr.Element{}, err + } + } + gammaByte, err := fs.ComputeChallenge("gamma") + if err != nil { + return fr.Element{}, err + } + var gamma fr.Element + gamma.SetBytes(gammaByte) + + return gamma, nil +} + +// dividePolyByXminusA computes (f-f(a))/(x-a), in canonical basis, in regular form +// f memory is re-used for the result +func dividePolyByXminusA(f []fr.Element, fa, a fr.Element) []fr.Element { + + // first we compute f-f(a) + f[0].Sub(&f[0], &fa) + + // now we use syntetic division to divide by x-a + var t fr.Element + for i := len(f) - 2; i >= 0; i-- { + t.Mul(&f[i+1], &a) + + f[i].Add(&f[i], &t) + } + + // the result is of degree deg(f)-1 + return f[1:] +} diff --git a/ecc/bls12-39/fr/kzg/kzg_test.go b/ecc/bls12-39/fr/kzg/kzg_test.go new file mode 100644 index 0000000000..59f46179c9 --- /dev/null +++ b/ecc/bls12-39/fr/kzg/kzg_test.go @@ -0,0 +1,481 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package kzg + +import ( + "bytes" + "crypto/sha256" + "math/big" + "reflect" + "testing" + + "github.com/consensys/gnark-crypto/ecc" + "github.com/consensys/gnark-crypto/ecc/bls12-39" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" +) + +// testSRS re-used accross tests of the KZG scheme +var testSRS *SRS + +func init() { + const srsSize = 230 + testSRS, _ = NewSRS(ecc.NextPowerOfTwo(srsSize), new(big.Int).SetInt64(42)) +} + +func TestDividePolyByXminusA(t *testing.T) { + + const pSize = 230 + + // build random polynomial + pol := make([]fr.Element, pSize) + pol[0].SetRandom() + for i := 1; i < pSize; i++ { + pol[i] = pol[i-1] + } + + // evaluate the polynomial at a random point + var point fr.Element + point.SetRandom() + evaluation := eval(pol, point) + + // probabilistic test (using Schwartz Zippel lemma, evaluation at one point is enough) + var randPoint, xminusa fr.Element + randPoint.SetRandom() + polRandpoint := eval(pol, randPoint) + polRandpoint.Sub(&polRandpoint, &evaluation) // f(rand)-f(point) + + // compute f-f(a)/x-a + h := dividePolyByXminusA(pol, evaluation, point) + pol = nil // h reuses this memory + + if len(h) != 229 { + t.Fatal("inconsistant size of quotient") + } + + hRandPoint := eval(h, randPoint) + xminusa.Sub(&randPoint, &point) // rand-point + + // f(rand)-f(point) ==? h(rand)*(rand-point) + hRandPoint.Mul(&hRandPoint, &xminusa) + + if !hRandPoint.Equal(&polRandpoint) { + t.Fatal("Error f-f(a)/x-a") + } +} + +func TestSerializationSRS(t *testing.T) { + + // create a SRS + srs, err := NewSRS(64, new(big.Int).SetInt64(42)) + if err != nil { + t.Fatal(err) + } + + // serialize it... + var buf bytes.Buffer + _, err = srs.WriteTo(&buf) + if err != nil { + t.Fatal(err) + } + + // reconstruct the SRS + var _srs SRS + _, err = _srs.ReadFrom(&buf) + if err != nil { + t.Fatal(err) + } + + // compare + if !reflect.DeepEqual(srs, &_srs) { + t.Fatal("scheme serialization failed") + } + +} + +func TestCommit(t *testing.T) { + + // create a polynomial + f := make([]fr.Element, 60) + for i := 0; i < 60; i++ { + f[i].SetRandom() + } + + // commit using the method from KZG + _kzgCommit, err := Commit(f, testSRS) + if err != nil { + t.Fatal(err) + } + var kzgCommit bls1239.G1Affine + kzgCommit.Unmarshal(_kzgCommit.Marshal()) + + // check commitment using manual commit + var x fr.Element + x.SetString("42") + fx := eval(f, x) + var fxbi big.Int + fx.ToBigIntRegular(&fxbi) + var manualCommit bls1239.G1Affine + manualCommit.Set(&testSRS.G1[0]) + manualCommit.ScalarMultiplication(&manualCommit, &fxbi) + + // compare both results + if !kzgCommit.Equal(&manualCommit) { + t.Fatal("error KZG commitment") + } + +} + +func TestVerifySinglePoint(t *testing.T) { + + // create a polynomial + f := randomPolynomial(60) + + // commit the polynomial + digest, err := Commit(f, testSRS) + if err != nil { + t.Fatal(err) + } + + // compute opening proof at a random point + var point fr.Element + point.SetString("4321") + proof, err := Open(f, point, testSRS) + if err != nil { + t.Fatal(err) + } + + // verify the claimed valued + expected := eval(f, point) + if !proof.ClaimedValue.Equal(&expected) { + t.Fatal("inconsistant claimed value") + } + + // verify correct proof + err = Verify(&digest, &proof, point, testSRS) + if err != nil { + t.Fatal(err) + } + + { + // verify wrong proof + proof.ClaimedValue.Double(&proof.ClaimedValue) + err = Verify(&digest, &proof, point, testSRS) + if err == nil { + t.Fatal("verifying wrong proof should have failed") + } + } + { + // verify wrong proof with quotient set to zero + // see https://cryptosubtlety.medium.com/00-8d4adcf4d255 + proof.H.X.SetZero() + proof.H.Y.SetZero() + err = Verify(&digest, &proof, point, testSRS) + if err == nil { + t.Fatal("verifying wrong proof should have failed") + } + } +} + +func TestBatchVerifySinglePoint(t *testing.T) { + + size := 40 + + // create polynomials + f := make([][]fr.Element, 10) + for i := 0; i < 10; i++ { + f[i] = randomPolynomial(size) + } + + // commit the polynomials + digests := make([]Digest, 10) + for i := 0; i < 10; i++ { + digests[i], _ = Commit(f[i], testSRS) + + } + + // pick a hash function + hf := sha256.New() + + // compute opening proof at a random point + var point fr.Element + point.SetString("4321") + proof, err := BatchOpenSinglePoint(f, digests, point, hf, testSRS) + if err != nil { + t.Fatal(err) + } + + // verify the claimed values + for i := 0; i < 10; i++ { + expectedClaim := eval(f[i], point) + if !expectedClaim.Equal(&proof.ClaimedValues[i]) { + t.Fatal("inconsistant claimed values") + } + } + + // verify correct proof + err = BatchVerifySinglePoint(digests, &proof, point, hf, testSRS) + if err != nil { + t.Fatal(err) + } + + { + // verify wrong proof + proof.ClaimedValues[0].Double(&proof.ClaimedValues[0]) + err = BatchVerifySinglePoint(digests, &proof, point, hf, testSRS) + if err == nil { + t.Fatal("verifying wrong proof should have failed") + } + } + { + // verify wrong proof with quotient set to zero + // see https://cryptosubtlety.medium.com/00-8d4adcf4d255 + proof.H.X.SetZero() + proof.H.Y.SetZero() + err = BatchVerifySinglePoint(digests, &proof, point, hf, testSRS) + if err == nil { + t.Fatal("verifying wrong proof should have failed") + } + } + +} + +func TestBatchVerifyMultiPoints(t *testing.T) { + + // create polynomials + f := make([][]fr.Element, 10) + for i := 0; i < 10; i++ { + f[i] = randomPolynomial(40) + } + + // commit the polynomials + digests := make([]Digest, 10) + for i := 0; i < 10; i++ { + digests[i], _ = Commit(f[i], testSRS) + } + + // pick a hash function + hf := sha256.New() + + // compute 2 batch opening proofs at 2 random points + points := make([]fr.Element, 2) + batchProofs := make([]BatchOpeningProof, 2) + points[0].SetRandom() + batchProofs[0], _ = BatchOpenSinglePoint(f[:5], digests[:5], points[0], hf, testSRS) + points[1].SetRandom() + batchProofs[1], _ = BatchOpenSinglePoint(f[5:], digests[5:], points[1], hf, testSRS) + + // fold the 2 batch opening proofs + proofs := make([]OpeningProof, 2) + foldedDigests := make([]Digest, 2) + proofs[0], foldedDigests[0], _ = FoldProof(digests[:5], &batchProofs[0], points[0], hf) + proofs[1], foldedDigests[1], _ = FoldProof(digests[5:], &batchProofs[1], points[1], hf) + + // check the the individual batch proofs are correct + err := Verify(&foldedDigests[0], &proofs[0], points[0], testSRS) + if err != nil { + t.Fatal(err) + } + err = Verify(&foldedDigests[1], &proofs[1], points[1], testSRS) + if err != nil { + t.Fatal(err) + } + + // batch verify correct folded proofs + err = BatchVerifyMultiPoints(foldedDigests, proofs, points, testSRS) + if err != nil { + t.Fatal(err) + } + + { + // batch verify tampered folded proofs + proofs[0].ClaimedValue.Double(&proofs[0].ClaimedValue) + + err = BatchVerifyMultiPoints(foldedDigests, proofs, points, testSRS) + if err == nil { + t.Fatal(err) + } + } + { + // batch verify tampered folded proofs with quotients set to infinity + // see https://cryptosubtlety.medium.com/00-8d4adcf4d255 + proofs[0].H.X.SetZero() + proofs[0].H.Y.SetZero() + proofs[1].H.X.SetZero() + proofs[1].H.Y.SetZero() + err = BatchVerifyMultiPoints(foldedDigests, proofs, points, testSRS) + if err == nil { + t.Fatal(err) + } + } + +} + +const benchSize = 1 << 16 + +func BenchmarkKZGCommit(b *testing.B) { + benchSRS, err := NewSRS(ecc.NextPowerOfTwo(benchSize), new(big.Int).SetInt64(42)) + if err != nil { + b.Fatal(err) + } + // random polynomial + p := randomPolynomial(benchSize / 2) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = Commit(p, benchSRS) + } +} + +func BenchmarkDivideByXMinusA(b *testing.B) { + const pSize = 1 << 22 + + // build random polynomial + pol := make([]fr.Element, pSize) + pol[0].SetRandom() + for i := 1; i < pSize; i++ { + pol[i] = pol[i-1] + } + var a, fa fr.Element + a.SetRandom() + fa.SetRandom() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + dividePolyByXminusA(pol, fa, a) + pol = pol[:pSize] + pol[pSize-1] = pol[0] + } +} + +func BenchmarkKZGOpen(b *testing.B) { + benchSRS, err := NewSRS(ecc.NextPowerOfTwo(benchSize), new(big.Int).SetInt64(42)) + if err != nil { + b.Fatal(err) + } + + // random polynomial + p := randomPolynomial(benchSize / 2) + var r fr.Element + r.SetRandom() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = Open(p, r, benchSRS) + } +} + +func BenchmarkKZGVerify(b *testing.B) { + benchSRS, err := NewSRS(ecc.NextPowerOfTwo(benchSize), new(big.Int).SetInt64(42)) + if err != nil { + b.Fatal(err) + } + + // random polynomial + p := randomPolynomial(benchSize / 2) + var r fr.Element + r.SetRandom() + + // commit + comm, err := Commit(p, benchSRS) + if err != nil { + b.Fatal(err) + } + + // open + openingProof, err := Open(p, r, benchSRS) + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + Verify(&comm, &openingProof, r, benchSRS) + } +} + +func BenchmarkKZGBatchOpen10(b *testing.B) { + benchSRS, err := NewSRS(ecc.NextPowerOfTwo(benchSize), new(big.Int).SetInt64(42)) + if err != nil { + b.Fatal(err) + } + + // 10 random polynomials + var ps [10][]fr.Element + for i := 0; i < 10; i++ { + ps[i] = randomPolynomial(benchSize / 2) + } + + // commitments + var commitments [10]Digest + for i := 0; i < 10; i++ { + commitments[i], _ = Commit(ps[i], benchSRS) + } + + // pick a hash function + hf := sha256.New() + + var r fr.Element + r.SetRandom() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + BatchOpenSinglePoint(ps[:], commitments[:], r, hf, benchSRS) + } +} + +func BenchmarkKZGBatchVerify10(b *testing.B) { + benchSRS, err := NewSRS(ecc.NextPowerOfTwo(benchSize), new(big.Int).SetInt64(42)) + if err != nil { + b.Fatal(err) + } + + // 10 random polynomials + var ps [10][]fr.Element + for i := 0; i < 10; i++ { + ps[i] = randomPolynomial(benchSize / 2) + } + + // commitments + var commitments [10]Digest + for i := 0; i < 10; i++ { + commitments[i], _ = Commit(ps[i], benchSRS) + } + + // pick a hash function + hf := sha256.New() + + var r fr.Element + r.SetRandom() + + proof, err := BatchOpenSinglePoint(ps[:], commitments[:], r, hf, benchSRS) + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + BatchVerifySinglePoint(commitments[:], &proof, r, hf, benchSRS) + } +} + +func randomPolynomial(size int) []fr.Element { + f := make([]fr.Element, size) + for i := 0; i < size; i++ { + f[i].SetRandom() + } + return f +} diff --git a/ecc/bls12-39/fr/kzg/marshal.go b/ecc/bls12-39/fr/kzg/marshal.go new file mode 100644 index 0000000000..5f813b8c8d --- /dev/null +++ b/ecc/bls12-39/fr/kzg/marshal.go @@ -0,0 +1,134 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package kzg + +import ( + "github.com/consensys/gnark-crypto/ecc/bls12-39" + "io" +) + +// WriteTo writes binary encoding of the SRS +func (srs *SRS) WriteTo(w io.Writer) (int64, error) { + // encode the SRS + enc := bls1239.NewEncoder(w) + + toEncode := []interface{}{ + &srs.G2[0], + &srs.G2[1], + srs.G1, + } + + for _, v := range toEncode { + if err := enc.Encode(v); err != nil { + return enc.BytesWritten(), err + } + } + + return enc.BytesWritten(), nil +} + +// ReadFrom decodes SRS data from reader. +func (srs *SRS) ReadFrom(r io.Reader) (int64, error) { + // decode the SRS + dec := bls1239.NewDecoder(r) + + toDecode := []interface{}{ + &srs.G2[0], + &srs.G2[1], + &srs.G1, + } + + for _, v := range toDecode { + if err := dec.Decode(v); err != nil { + return dec.BytesRead(), err + } + } + + return dec.BytesRead(), nil +} + +// WriteTo writes binary encoding of a OpeningProof +func (proof *OpeningProof) WriteTo(w io.Writer) (int64, error) { + enc := bls1239.NewEncoder(w) + + toEncode := []interface{}{ + &proof.H, + &proof.ClaimedValue, + } + + for _, v := range toEncode { + if err := enc.Encode(v); err != nil { + return enc.BytesWritten(), err + } + } + + return enc.BytesWritten(), nil +} + +// ReadFrom decodes OpeningProof data from reader. +func (proof *OpeningProof) ReadFrom(r io.Reader) (int64, error) { + dec := bls1239.NewDecoder(r) + + toDecode := []interface{}{ + &proof.H, + &proof.ClaimedValue, + } + + for _, v := range toDecode { + if err := dec.Decode(v); err != nil { + return dec.BytesRead(), err + } + } + + return dec.BytesRead(), nil +} + +// WriteTo writes binary encoding of a BatchOpeningProof +func (proof *BatchOpeningProof) WriteTo(w io.Writer) (int64, error) { + enc := bls1239.NewEncoder(w) + + toEncode := []interface{}{ + &proof.H, + proof.ClaimedValues, + } + + for _, v := range toEncode { + if err := enc.Encode(v); err != nil { + return enc.BytesWritten(), err + } + } + + return enc.BytesWritten(), nil +} + +// ReadFrom decodes BatchOpeningProof data from reader. +func (proof *BatchOpeningProof) ReadFrom(r io.Reader) (int64, error) { + dec := bls1239.NewDecoder(r) + + toDecode := []interface{}{ + &proof.H, + &proof.ClaimedValues, + } + + for _, v := range toDecode { + if err := dec.Decode(v); err != nil { + return dec.BytesRead(), err + } + } + + return dec.BytesRead(), nil +} diff --git a/ecc/bls12-39/fr/mimc/doc.go b/ecc/bls12-39/fr/mimc/doc.go new file mode 100644 index 0000000000..497bd40a97 --- /dev/null +++ b/ecc/bls12-39/fr/mimc/doc.go @@ -0,0 +1,18 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +// Package mimc provides MiMC hash function using Miyaguchi–Preneel construction. +package mimc diff --git a/ecc/bls12-39/fr/mimc/fuzz.go b/ecc/bls12-39/fr/mimc/fuzz.go new file mode 100644 index 0000000000..6410e43473 --- /dev/null +++ b/ecc/bls12-39/fr/mimc/fuzz.go @@ -0,0 +1,34 @@ +//go:build gofuzz +// +build gofuzz + +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package mimc + +const ( + fuzzInteresting = 1 + fuzzNormal = 0 + fuzzDiscard = -1 +) + +func Fuzz(data []byte) int { + var s []byte + h := NewMiMC() + h.Write(data) + h.Sum(s) + return fuzzNormal +} diff --git a/ecc/bls12-39/fr/mimc/mimc.go b/ecc/bls12-39/fr/mimc/mimc.go new file mode 100644 index 0000000000..9e1d0afa99 --- /dev/null +++ b/ecc/bls12-39/fr/mimc/mimc.go @@ -0,0 +1,184 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package mimc + +import ( + "hash" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "golang.org/x/crypto/sha3" + "math/big" + "sync" +) + +const ( + mimcNbRounds = 91 + seed = "seed" // seed to derive the constants + BlockSize = fr.Bytes // BlockSize size that mimc consumes +) + +// Params constants for the mimc hash function +var ( + mimcConstants [mimcNbRounds]fr.Element + once sync.Once +) + +// digest represents the partial evaluation of the checksum +// along with the params of the mimc function +type digest struct { + h fr.Element + data []byte // data to hash +} + +// GetConstants exposed to be used in gnark +func GetConstants() []big.Int { + once.Do(initConstants) // init constants + res := make([]big.Int, mimcNbRounds) + for i := 0; i < mimcNbRounds; i++ { + mimcConstants[i].ToBigIntRegular(&res[i]) + } + return res +} + +// NewMiMC returns a MiMCImpl object, pure-go reference implementation +func NewMiMC() hash.Hash { + d := new(digest) + d.Reset() + return d +} + +// Reset resets the Hash to its initial state. +func (d *digest) Reset() { + d.data = nil + d.h.SetZero() +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (d *digest) Sum(b []byte) []byte { + buffer := d.checksum() + d.data = nil // flush the data already hashed + hash := buffer.Bytes() + b = append(b, hash[:]...) + return b +} + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (d *digest) Size() int { + return BlockSize +} + +// BlockSize returns the number of bytes Sum will return. +func (d *digest) BlockSize() int { + return BlockSize +} + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + d.data = append(d.data, p...) + return +} + +// Hash hash using Miyaguchi–Preneel: +// https://en.wikipedia.org/wiki/One-way_compression_function +// The XOR operation is replaced by field addition, data is in Montgomery form +func (d *digest) checksum() fr.Element { + + var buffer [BlockSize]byte + var x fr.Element + + // if data size is not multiple of BlockSizes we padd: + // .. || 0xaf8 -> .. || 0x0000...0af8 + if len(d.data)%BlockSize != 0 { + q := len(d.data) / BlockSize + r := len(d.data) % BlockSize + sliceq := make([]byte, q*BlockSize) + copy(sliceq, d.data) + slicer := make([]byte, r) + copy(slicer, d.data[q*BlockSize:]) + sliceremainder := make([]byte, BlockSize-r) + d.data = append(sliceq, sliceremainder...) + d.data = append(d.data, slicer...) + } + + if len(d.data) == 0 { + d.data = make([]byte, 32) + } + + nbChunks := len(d.data) / BlockSize + + for i := 0; i < nbChunks; i++ { + copy(buffer[:], d.data[i*BlockSize:(i+1)*BlockSize]) + x.SetBytes(buffer[:]) + r := d.encrypt(x) + d.h.Add(&r, &d.h).Add(&d.h, &x) + } + + return d.h +} + +// plain execution of a mimc run +// m: message +// k: encryption key +func (d *digest) encrypt(m fr.Element) fr.Element { + once.Do(initConstants) // init constants + + for i := 0; i < mimcNbRounds; i++ { + // m = (m+k+c)^7 + var tmp fr.Element + tmp.Add(&m, &d.h).Add(&tmp, &mimcConstants[i]) + m.Square(&tmp). + Square(&m). + Square(&m). + Mul(&m, &tmp) + } + m.Add(&m, &d.h) + return m +} + +// Sum computes the mimc hash of msg from seed +func Sum(msg []byte) ([]byte, error) { + var d digest + if _, err := d.Write(msg); err != nil { + return nil, err + } + h := d.checksum() + bytes := h.Bytes() + return bytes[:], nil +} + +func initConstants() { + bseed := ([]byte)(seed) + + hash := sha3.NewLegacyKeccak256() + _, _ = hash.Write(bseed) + rnd := hash.Sum(nil) // pre hash before use + hash.Reset() + _, _ = hash.Write(rnd) + + for i := 0; i < mimcNbRounds; i++ { + rnd = hash.Sum(nil) + mimcConstants[i].SetBytes(rnd) + hash.Reset() + _, _ = hash.Write(rnd) + } +} diff --git a/ecc/bls12-39/fr/permutation/doc.go b/ecc/bls12-39/fr/permutation/doc.go new file mode 100644 index 0000000000..bdf98e6ca9 --- /dev/null +++ b/ecc/bls12-39/fr/permutation/doc.go @@ -0,0 +1,18 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +// Package permutation provides an API to build permutation proofs. +package permutation diff --git a/ecc/bls12-39/fr/permutation/permutation.go b/ecc/bls12-39/fr/permutation/permutation.go new file mode 100644 index 0000000000..698144b7e7 --- /dev/null +++ b/ecc/bls12-39/fr/permutation/permutation.go @@ -0,0 +1,379 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package permutation + +import ( + "crypto/sha256" + "errors" + "math/big" + "math/bits" + + "github.com/consensys/gnark-crypto/ecc/bls12-39" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr/fft" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr/kzg" + fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" +) + +var ( + ErrIncompatibleSize = errors.New("t1 and t2 should be of the same size") + ErrSize = errors.New("t1 and t2 should be of size a power of 2") + ErrPermutationProof = errors.New("permutation proof verification failed") + ErrGenerator = errors.New("wrong generator") +) + +// Proof proof that the commitments of t1 and t2 come from +// the same vector but permuted. +type Proof struct { + + // size of the polynomials + size int + + // generator of the fft domain, used for shifting the evaluation point + g fr.Element + + // commitments of t1 & t2, the permuted vectors, and z, the accumulation + // polynomial + t1, t2, z kzg.Digest + + // commitment to the quotient polynomial + q kzg.Digest + + // opening proofs of t1, t2, z, q (in that order) + batchedProof kzg.BatchOpeningProof + + // shifted opening proof of z + shiftedProof kzg.OpeningProof +} + +// evaluateAccumulationPolynomialBitReversed returns the accumulation polynomial in Lagrange basis. +func evaluateAccumulationPolynomialBitReversed(lt1, lt2 []fr.Element, epsilon fr.Element) []fr.Element { + + s := len(lt1) + z := make([]fr.Element, s) + d := make([]fr.Element, s) + z[0].SetOne() + d[0].SetOne() + nn := uint64(64 - bits.TrailingZeros64(uint64(s))) + var t fr.Element + for i := 0; i < s-1; i++ { + _i := int(bits.Reverse64(uint64(i)) >> nn) + _ii := int(bits.Reverse64(uint64((i+1)%s)) >> nn) + z[_ii].Mul(&z[_i], t.Sub(&epsilon, <1[i])) + d[i+1].Mul(&d[i], t.Sub(&epsilon, <2[i])) + } + d = fr.BatchInvert(d) + for i := 0; i < s-1; i++ { + _ii := int(bits.Reverse64(uint64((i+1)%s)) >> nn) + z[_ii].Mul(&z[_ii], &d[i+1]) + } + + return z +} + +// evaluateFirstPartNumReverse computes lt2*z(gx) - lt1*z +func evaluateFirstPartNumReverse(lt1, lt2, lz []fr.Element, epsilon fr.Element) []fr.Element { + + s := len(lt1) + res := make([]fr.Element, s) + var a, b fr.Element + nn := uint64(64 - bits.TrailingZeros64(uint64(s))) + for i := 0; i < s; i++ { + _i := int(bits.Reverse64(uint64(i)) >> nn) + _ii := int(bits.Reverse64(uint64((i+1)%s)) >> nn) + a.Sub(&epsilon, <2[_i]) + a.Mul(&lz[_ii], &a) + b.Sub(&epsilon, <1[_i]) + b.Mul(&lz[_i], &b) + res[_i].Sub(&a, &b) + } + return res +} + +// evaluateSecondPartNumReverse computes L0 * (z-1) +func evaluateSecondPartNumReverse(lz []fr.Element, d *fft.Domain) []fr.Element { + + var tn, o, g fr.Element + o.SetOne() + tn.Exp(d.FrMultiplicativeGen, big.NewInt(int64(d.Cardinality))). + Sub(&tn, &o) + s := len(lz) + u := make([]fr.Element, s) + g.Set(&d.FrMultiplicativeGen) + for i := 0; i < s; i++ { + u[i].Sub(&g, &o) + g.Mul(&g, &d.Generator) + } + u = fr.BatchInvert(u) + res := make([]fr.Element, s) + nn := uint64(64 - bits.TrailingZeros64(uint64(s))) + for i := 0; i < s; i++ { + _i := int(bits.Reverse64(uint64(i)) >> nn) + res[_i].Sub(&lz[_i], &o). + Mul(&res[_i], &u[i]). + Mul(&res[_i], &tn) + } + return res +} + +// Prove generates a proof that t1 and t2 are the same but permuted. +// The size of t1 and t2 should be the same and a power of 2. +func Prove(srs *kzg.SRS, t1, t2 []fr.Element) (Proof, error) { + + // res + var proof Proof + var err error + + // size checking + if len(t1) != len(t2) { + return proof, ErrIncompatibleSize + } + + // create the domains + d := fft.NewDomain(uint64(len(t1))) + if d.Cardinality != uint64(len(t1)) { + return proof, ErrSize + } + s := int(d.Cardinality) + proof.size = s + proof.g.Set(&d.Generator) + + // hash function for Fiat Shamir + hFunc := sha256.New() + + // transcript to derive the challenge + fs := fiatshamir.NewTranscript(hFunc, "epsilon", "omega", "eta") + + // commit t1, t2 + ct1 := make([]fr.Element, s) + ct2 := make([]fr.Element, s) + copy(ct1, t1) + copy(ct2, t2) + d.FFTInverse(ct1, fft.DIF) + d.FFTInverse(ct2, fft.DIF) + fft.BitReverse(ct1) + fft.BitReverse(ct2) + proof.t1, err = kzg.Commit(ct1, srs) + if err != nil { + return proof, err + } + proof.t2, err = kzg.Commit(ct2, srs) + if err != nil { + return proof, err + } + + // derive challenge for z + epsilon, err := deriveRandomness(&fs, "epsilon", &proof.t1, &proof.t2) + if err != nil { + return proof, err + } + + // compute Z and commit it + cz := evaluateAccumulationPolynomialBitReversed(t1, t2, epsilon) + d.FFTInverse(cz, fft.DIT) + proof.z, err = kzg.Commit(cz, srs) + if err != nil { + return proof, err + } + lz := make([]fr.Element, s) + copy(lz, cz) + d.FFT(lz, fft.DIF, true) + + // compute the first part of the numerator + lt1 := make([]fr.Element, s) + lt2 := make([]fr.Element, s) + copy(lt1, ct1) + copy(lt2, ct2) + d.FFT(lt1, fft.DIF, true) + d.FFT(lt2, fft.DIF, true) + lsNumFirstPart := evaluateFirstPartNumReverse(lt1, lt2, lz, epsilon) + + // compute second part of the numerator + lsNum := evaluateSecondPartNumReverse(lz, d) + + // derive challenge used for the folding + omega, err := deriveRandomness(&fs, "omega", &proof.z) + if err != nil { + return proof, err + } + + // fold the numerator and divide it by x^n-1 + var t, one fr.Element + one.SetOne() + t.Exp(d.FrMultiplicativeGen, big.NewInt(int64(d.Cardinality))).Sub(&t, &one).Inverse(&t) + for i := 0; i < s; i++ { + lsNum[i].Mul(&omega, &lsNum[i]). + Add(&lsNum[i], &lsNumFirstPart[i]). + Mul(&lsNum[i], &t) + } + + // get the quotient and commit it + d.FFTInverse(lsNum, fft.DIT, true) + proof.q, err = kzg.Commit(lsNum, srs) + if err != nil { + return proof, err + } + + // derive the evaluation challenge + eta, err := deriveRandomness(&fs, "eta", &proof.q) + if err != nil { + return proof, err + } + + // compute the opening proofs + proof.batchedProof, err = kzg.BatchOpenSinglePoint( + [][]fr.Element{ + ct1, + ct2, + cz, + lsNum, + }, + []kzg.Digest{ + proof.t1, + proof.t2, + proof.z, + proof.q, + }, + eta, + hFunc, + srs, + ) + if err != nil { + return proof, err + } + + var shiftedEta fr.Element + shiftedEta.Mul(&eta, &d.Generator) + proof.shiftedProof, err = kzg.Open( + cz, + shiftedEta, + srs, + ) + if err != nil { + return proof, err + } + + // done + return proof, nil + +} + +// Verify verifies a permutation proof. +func Verify(srs *kzg.SRS, proof Proof) error { + + // hash function that is used for Fiat Shamir + hFunc := sha256.New() + + // transcript to derive the challenge + fs := fiatshamir.NewTranscript(hFunc, "epsilon", "omega", "eta") + + // derive the challenges + epsilon, err := deriveRandomness(&fs, "epsilon", &proof.t1, &proof.t2) + if err != nil { + return err + } + + omega, err := deriveRandomness(&fs, "omega", &proof.z) + if err != nil { + return err + } + + eta, err := deriveRandomness(&fs, "eta", &proof.q) + if err != nil { + return err + } + + // check the relation + bs := big.NewInt(int64(proof.size)) + var l0, a, b, one, rhs, lhs fr.Element + one.SetOne() + rhs.Exp(eta, bs). + Sub(&rhs, &one) + a.Sub(&eta, &one) + l0.Div(&rhs, &a) + rhs.Mul(&rhs, &proof.batchedProof.ClaimedValues[3]) + a.Sub(&epsilon, &proof.batchedProof.ClaimedValues[1]). + Mul(&a, &proof.shiftedProof.ClaimedValue) + b.Sub(&epsilon, &proof.batchedProof.ClaimedValues[0]). + Mul(&b, &proof.batchedProof.ClaimedValues[2]) + lhs.Sub(&a, &b) + a.Sub(&proof.batchedProof.ClaimedValues[2], &one). + Mul(&a, &l0). + Mul(&a, &omega) + lhs.Add(&a, &lhs) + if !lhs.Equal(&rhs) { + return ErrPermutationProof + } + + // check the opening proofs + err = kzg.BatchVerifySinglePoint( + []kzg.Digest{ + proof.t1, + proof.t2, + proof.z, + proof.q, + }, + &proof.batchedProof, + eta, + hFunc, + srs, + ) + if err != nil { + return err + } + + var shiftedEta fr.Element + shiftedEta.Mul(&eta, &proof.g) + err = kzg.Verify(&proof.z, &proof.shiftedProof, shiftedEta, srs) + if err != nil { + return err + } + + // check the generator is correct + var checkOrder fr.Element + checkOrder.Exp(proof.g, big.NewInt(int64(proof.size/2))) + if checkOrder.Equal(&one) { + return ErrGenerator + } + checkOrder.Square(&checkOrder) + if !checkOrder.Equal(&one) { + return ErrGenerator + } + + return nil +} + +// TODO put that in fiat-shamir package +func deriveRandomness(fs *fiatshamir.Transcript, challenge string, points ...*bls1239.G1Affine) (fr.Element, error) { + + var buf [bls1239.SizeOfG1AffineUncompressed]byte + var r fr.Element + + for _, p := range points { + buf = p.RawBytes() + if err := fs.Bind(challenge, buf[:]); err != nil { + return r, err + } + } + + b, err := fs.ComputeChallenge(challenge) + if err != nil { + return r, err + } + r.SetBytes(b) + return r, nil +} diff --git a/ecc/bls12-39/fr/permutation/permutation_test.go b/ecc/bls12-39/fr/permutation/permutation_test.go new file mode 100644 index 0000000000..ca1fa2b3ba --- /dev/null +++ b/ecc/bls12-39/fr/permutation/permutation_test.go @@ -0,0 +1,94 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package permutation + +import ( + "math/big" + "testing" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr/kzg" +) + +func TestProof(t *testing.T) { + + srs, err := kzg.NewSRS(64, big.NewInt(13)) + if err != nil { + t.Fatal(err) + } + + a := make([]fr.Element, 8) + b := make([]fr.Element, 8) + + for i := 0; i < 8; i++ { + a[i].SetUint64(uint64(4*i + 1)) + } + for i := 0; i < 8; i++ { + b[i].Set(&a[(5*i)%8]) + } + + // correct proof + { + proof, err := Prove(srs, a, b) + if err != nil { + t.Fatal(err) + } + + err = Verify(srs, proof) + if err != nil { + t.Fatal(err) + } + } + + // wrong proof + { + a[0].SetRandom() + proof, err := Prove(srs, a, b) + if err != nil { + t.Fatal(err) + } + + err = Verify(srs, proof) + if err == nil { + t.Fatal(err) + } + } + +} + +func BenchmarkProver(b *testing.B) { + + srsSize := 1 << 15 + polySize := 1 << 14 + + srs, _ := kzg.NewSRS(uint64(srsSize), big.NewInt(13)) + a := make([]fr.Element, polySize) + c := make([]fr.Element, polySize) + + for i := 0; i < polySize; i++ { + a[i].SetUint64(uint64(i)) + } + for i := 0; i < polySize; i++ { + c[i].Set(&a[(5*i)%(polySize)]) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + Prove(srs, a, c) + } + +} diff --git a/ecc/bls12-39/fr/plookup/doc.go b/ecc/bls12-39/fr/plookup/doc.go new file mode 100644 index 0000000000..ec4b912876 --- /dev/null +++ b/ecc/bls12-39/fr/plookup/doc.go @@ -0,0 +1,18 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +// Package plookup provides an API to build plookup proofs. +package plookup diff --git a/ecc/bls12-39/fr/plookup/plookup_test.go b/ecc/bls12-39/fr/plookup/plookup_test.go new file mode 100644 index 0000000000..0e12509c0f --- /dev/null +++ b/ecc/bls12-39/fr/plookup/plookup_test.go @@ -0,0 +1,139 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package plookup + +import ( + "math/big" + "testing" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr/kzg" +) + +func TestLookupVector(t *testing.T) { + + lookupVector := make(Table, 8) + fvector := make(Table, 7) + for i := 0; i < 8; i++ { + lookupVector[i].SetUint64(uint64(2 * i)) + } + for i := 0; i < 7; i++ { + fvector[i].Set(&lookupVector[(4*i+1)%8]) + } + + srs, err := kzg.NewSRS(64, big.NewInt(13)) + if err != nil { + t.Fatal(err) + } + + // correct proof vector + { + proof, err := ProveLookupVector(srs, fvector, lookupVector) + if err != nil { + t.Fatal(err) + } + + err = VerifyLookupVector(srs, proof) + if err != nil { + t.Fatal(err) + } + } + + // wrong proofs vector + { + fvector[0].SetRandom() + + proof, err := ProveLookupVector(srs, fvector, lookupVector) + if err != nil { + t.Fatal(err) + } + + err = VerifyLookupVector(srs, proof) + if err == nil { + t.Fatal(err) + } + } + +} + +func TestLookupTable(t *testing.T) { + + srs, err := kzg.NewSRS(64, big.NewInt(13)) + if err != nil { + t.Fatal(err) + } + + lookupTable := make([]Table, 3) + fTable := make([]Table, 3) + for i := 0; i < 3; i++ { + lookupTable[i] = make(Table, 8) + fTable[i] = make(Table, 7) + for j := 0; j < 8; j++ { + lookupTable[i][j].SetUint64(uint64(2*i + j)) + } + for j := 0; j < 7; j++ { + fTable[i][j].Set(&lookupTable[i][(4*j+1)%8]) + } + } + + // correct proof + { + proof, err := ProveLookupTables(srs, fTable, lookupTable) + if err != nil { + t.Fatal(err) + } + + err = VerifyLookupTables(srs, proof) + if err != nil { + t.Fatal(err) + } + } + + // wrong proof + { + fTable[0][0].SetRandom() + proof, err := ProveLookupTables(srs, fTable, lookupTable) + if err != nil { + t.Fatal(err) + } + + err = VerifyLookupTables(srs, proof) + if err == nil { + t.Fatal(err) + } + } + +} + +func BenchmarkPlookup(b *testing.B) { + + srsSize := 1 << 15 + polySize := 1 << 14 + + srs, _ := kzg.NewSRS(uint64(srsSize), big.NewInt(13)) + a := make(Table, polySize) + c := make(Table, polySize) + + for i := 0; i < 1<<14; i++ { + a[i].SetUint64(uint64(i)) + c[i].SetUint64(uint64((8 * i) % polySize)) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + ProveLookupVector(srs, a, c) + } +} diff --git a/ecc/bls12-39/fr/plookup/table.go b/ecc/bls12-39/fr/plookup/table.go new file mode 100644 index 0000000000..2cdb857385 --- /dev/null +++ b/ecc/bls12-39/fr/plookup/table.go @@ -0,0 +1,252 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package plookup + +import ( + "crypto/sha256" + "errors" + "math/big" + "sort" + + bls1239 "github.com/consensys/gnark-crypto/ecc/bls12-39" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr/fft" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr/kzg" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr/permutation" + fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" +) + +var ( + ErrIncompatibleSize = errors.New("the tables in f and t are not of the same size") + ErrFoldedCommitment = errors.New("the folded commitment is malformed") + ErrNumberDigests = errors.New("proof.ts and proof.fs are not of the same length") +) + +// ProofLookupTables proofs that a list of tables +type ProofLookupTables struct { + + // commitments to the rows f + fs []kzg.Digest + + // commitments to the rows of t + ts []kzg.Digest + + // lookup proof for the f and t folded + foldedProof ProofLookupVector + + // proof that the ts folded correspond to t in the folded proof + permutationProof permutation.Proof +} + +// ProveLookupTables generates a proof that f, seen as a multi dimensional table, +// consists of vectors that are in t. In other words for each i, f[:][i] must be one +// of the t[:][j]. +// +// For instance, if t is the truth table of the XOR function, t will be populated such +// that t[:][i] contains the i-th entry of the truth table, so t[0][i] XOR t[1][i] = t[2][i]. +// +// The Table in f and t are supposed to be of the same size constant size. +func ProveLookupTables(srs *kzg.SRS, f, t []Table) (ProofLookupTables, error) { + + // res + proof := ProofLookupTables{} + var err error + + // hash function used for Fiat Shamir + hFunc := sha256.New() + + // transcript to derive the challenge + fs := fiatshamir.NewTranscript(hFunc, "lambda") + + // check the sizes + if len(f) != len(t) { + return proof, ErrIncompatibleSize + } + s := len(f[0]) + for i := 1; i < len(f); i++ { + if len(f[i]) != s { + return proof, ErrIncompatibleSize + } + } + s = len(t[0]) + for i := 1; i < len(t); i++ { + if len(t[i]) != s { + return proof, ErrIncompatibleSize + } + } + + // commit to the tables in f and t + nbRows := len(t) + proof.fs = make([]kzg.Digest, nbRows) + proof.ts = make([]kzg.Digest, nbRows) + _nbColumns := len(f[0]) + 1 + if _nbColumns < len(t[0]) { + _nbColumns = len(t[0]) + } + d := fft.NewDomain(uint64(_nbColumns)) + nbColumns := d.Cardinality + lfs := make([][]fr.Element, nbRows) + cfs := make([][]fr.Element, nbRows) + lts := make([][]fr.Element, nbRows) + cts := make([][]fr.Element, nbRows) + + for i := 0; i < nbRows; i++ { + + cfs[i] = make([]fr.Element, nbColumns) + lfs[i] = make([]fr.Element, nbColumns) + copy(cfs[i], f[i]) + copy(lfs[i], f[i]) + for j := len(f[i]); j < int(nbColumns); j++ { + cfs[i][j] = f[i][len(f[i])-1] + lfs[i][j] = f[i][len(f[i])-1] + } + d.FFTInverse(cfs[i], fft.DIF) + fft.BitReverse(cfs[i]) + proof.fs[i], err = kzg.Commit(cfs[i], srs) + if err != nil { + return proof, err + } + + cts[i] = make([]fr.Element, nbColumns) + lts[i] = make([]fr.Element, nbColumns) + copy(cts[i], t[i]) + copy(lts[i], t[i]) + for j := len(t[i]); j < int(d.Cardinality); j++ { + cts[i][j] = t[i][len(t[i])-1] + lts[i][j] = t[i][len(t[i])-1] + } + d.FFTInverse(cts[i], fft.DIF) + fft.BitReverse(cts[i]) + proof.ts[i], err = kzg.Commit(cts[i], srs) + if err != nil { + return proof, err + } + } + + // fold f and t + comms := make([]*kzg.Digest, 2*nbRows) + for i := 0; i < nbRows; i++ { + comms[i] = new(kzg.Digest) + comms[i].Set(&proof.fs[i]) + comms[nbRows+i] = new(kzg.Digest) + comms[nbRows+i].Set(&proof.ts[i]) + } + lambda, err := deriveRandomness(&fs, "lambda", comms...) + if err != nil { + return proof, err + } + foldedf := make(Table, nbColumns) + foldedt := make(Table, nbColumns) + for i := 0; i < int(nbColumns); i++ { + for j := nbRows - 1; j >= 0; j-- { + foldedf[i].Mul(&foldedf[i], &lambda). + Add(&foldedf[i], &lfs[j][i]) + foldedt[i].Mul(&foldedt[i], &lambda). + Add(&foldedt[i], <s[j][i]) + } + } + + // generate a proof of permutation of the foldedt and sort(foldedt) + foldedtSorted := make(Table, nbColumns) + copy(foldedtSorted, foldedt) + sort.Sort(foldedtSorted) + proof.permutationProof, err = permutation.Prove(srs, foldedt, foldedtSorted) + if err != nil { + return proof, err + } + + // call plookupVector, on foldedf[:len(foldedf)-1] to ensure that the domain size + // in ProveLookupVector is the same as d's + proof.foldedProof, err = ProveLookupVector(srs, foldedf[:len(foldedf)-1], foldedt) + + return proof, err +} + +// VerifyLookupTables verifies that a ProofLookupTables proof is correct. +func VerifyLookupTables(srs *kzg.SRS, proof ProofLookupTables) error { + + // hash function used for Fiat Shamir + hFunc := sha256.New() + + // transcript to derive the challenge + fs := fiatshamir.NewTranscript(hFunc, "lambda") + + // check that the number of digests is the same + if len(proof.fs) != len(proof.ts) { + return ErrNumberDigests + } + + // fold the commitments fs and ts + nbRows := len(proof.fs) + comms := make([]*kzg.Digest, 2*nbRows) + for i := 0; i < nbRows; i++ { + comms[i] = &proof.fs[i] + comms[i+nbRows] = &proof.ts[i] + } + lambda, err := deriveRandomness(&fs, "lambda", comms...) + if err != nil { + return err + } + + // fold the commitments of the rows of t and f + var comf, comt kzg.Digest + comf.Set(&proof.fs[nbRows-1]) + comt.Set(&proof.ts[nbRows-1]) + var blambda big.Int + lambda.ToBigIntRegular(&blambda) + for i := nbRows - 2; i >= 0; i-- { + comf.ScalarMultiplication(&comf, &blambda). + Add(&comf, &proof.fs[i]) + comt.ScalarMultiplication(&comt, &blambda). + Add(&comt, &proof.ts[i]) + } + + // check that the folded commitment of the fs correspond to foldedProof.f + if !comf.Equal(&proof.foldedProof.f) { + return ErrFoldedCommitment + } + + // check that the folded commitment of the ts is a permutation of proof.FoldedProof.t + err = permutation.Verify(srs, proof.permutationProof) + if err != nil { + return err + } + + // verify the inner proof + return VerifyLookupVector(srs, proof.foldedProof) +} + +// TODO put that in fiat-shamir package +func deriveRandomness(fs *fiatshamir.Transcript, challenge string, points ...*bls1239.G1Affine) (fr.Element, error) { + + var buf [bls1239.SizeOfG1AffineUncompressed]byte + var r fr.Element + + for _, p := range points { + buf = p.RawBytes() + if err := fs.Bind(challenge, buf[:]); err != nil { + return r, err + } + } + + b, err := fs.ComputeChallenge(challenge) + if err != nil { + return r, err + } + r.SetBytes(b) + return r, nil +} diff --git a/ecc/bls12-39/fr/plookup/vector.go b/ecc/bls12-39/fr/plookup/vector.go new file mode 100644 index 0000000000..8185f7802b --- /dev/null +++ b/ecc/bls12-39/fr/plookup/vector.go @@ -0,0 +1,735 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package plookup + +import ( + "crypto/sha256" + "errors" + "math/big" + "math/bits" + "sort" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr/fft" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr/kzg" + fiatshamir "github.com/consensys/gnark-crypto/fiat-shamir" +) + +var ( + ErrNotInTable = errors.New("some value in the vector is not in the lookup table") + ErrPlookupVerification = errors.New("plookup verification failed") + ErrGenerator = errors.New("wrong generator") +) + +type Table []fr.Element + +// Len is the number of elements in the collection. +func (t Table) Len() int { + return len(t) +} + +// Less reports whether the element with +// index i should sort before the element with index j. +func (t Table) Less(i, j int) bool { + return t[i].Cmp(&t[j]) == -1 +} + +// Swap swaps the elements with indexes i and j. +func (t Table) Swap(i, j int) { + t[i], t[j] = t[j], t[i] +} + +// Proof Plookup proof, containing opening proofs +type ProofLookupVector struct { + + // size of the system + size uint64 + + // generator of the fft domain, used for shifting the evaluation point + g fr.Element + + // Commitments to h1, h2, t, z, f, h + h1, h2, t, z, f, h kzg.Digest + + // Batch opening proof of h1, h2, z, t + BatchedProof kzg.BatchOpeningProof + + // Batch opening proof of h1, h2, z shifted by g + BatchedProofShifted kzg.BatchOpeningProof +} + +// evaluateAccumulationPolynomial computes Z, in Lagrange basis. Z is the accumulation of the partial +// ratios of 2 fully split polynomials (cf https://eprint.iacr.org/2020/315.pdf) +// * lf is the list of values that should be in lt +// * lt is the lookup table +// * lh1, lh2 is lf sorted by lt split in 2 overlapping slices +// * beta, gamma are challenges (Schwartz-zippel: they are the random evaluations point) +func evaluateAccumulationPolynomial(lf, lt, lh1, lh2 []fr.Element, beta, gamma fr.Element) []fr.Element { + + z := make([]fr.Element, len(lt)) + + n := len(lt) + d := make([]fr.Element, n-1) + var u, c fr.Element + c.SetOne(). + Add(&c, &beta). + Mul(&c, &gamma) + for i := 0; i < n-1; i++ { + + d[i].Mul(&beta, &lh1[i+1]). + Add(&d[i], &lh1[i]). + Add(&d[i], &c) + + u.Mul(&beta, &lh2[i+1]). + Add(&u, &lh2[i]). + Add(&u, &c) + + d[i].Mul(&d[i], &u) + } + d = fr.BatchInvert(d) + + z[0].SetOne() + var a, b, e fr.Element + e.SetOne().Add(&e, &beta) + for i := 0; i < n-1; i++ { + + a.Add(&gamma, &lf[i]) + + b.Mul(&beta, <[i+1]). + Add(&b, <[i]). + Add(&b, &c) + + a.Mul(&a, &b). + Mul(&a, &e) + + z[i+1].Mul(&z[i], &a). + Mul(&z[i+1], &d[i]) + } + + return z +} + +// evaluateNumBitReversed computes the evaluation (shifted, bit reversed) of h where +// h = (x-1)*z*(1+\beta)*(\gamma+f)*(\gamma(1+\beta) + t+ \beta*t(gX)) - +// (x-1)*z(gX)*(\gamma(1+\beta) + h_{1} + \beta*h_{1}(gX))*(\gamma(1+\beta) + h_{2} + \beta*h_{2}(gX) ) +// +// * cz, ch1, ch2, ct, cf are the polynomials z, h1, h2, t, f in canonical basis +// * _lz, _lh1, _lh2, _lt, _lf are the polynomials z, h1, h2, t, f in shifted Lagrange basis (domainBig) +// * beta, gamma are the challenges +// * it returns h in canonical basis +func evaluateNumBitReversed(_lz, _lh1, _lh2, _lt, _lf []fr.Element, beta, gamma fr.Element, domainBig *fft.Domain) []fr.Element { + + // result + s := int(domainBig.Cardinality) + num := make([]fr.Element, domainBig.Cardinality) + + var u, onePlusBeta, GammaTimesOnePlusBeta, m, n, one fr.Element + + one.SetOne() + onePlusBeta.Add(&one, &beta) + GammaTimesOnePlusBeta.Mul(&onePlusBeta, &gamma) + + g := make([]fr.Element, s) + g[0].Set(&domainBig.FrMultiplicativeGen) + for i := 1; i < s; i++ { + g[i].Mul(&g[i-1], &domainBig.Generator) + } + + var gg fr.Element + expo := big.NewInt(int64(domainBig.Cardinality>>1 - 1)) + gg.Square(&domainBig.Generator).Exp(gg, expo) + + nn := uint64(64 - bits.TrailingZeros64(domainBig.Cardinality)) + + for i := 0; i < s; i++ { + + _i := int(bits.Reverse64(uint64(i)) >> nn) + _is := int(bits.Reverse64(uint64((i+2)%s)) >> nn) + + // m = z*(1+\beta)*(\gamma+f)*(\gamma(1+\beta) + t+ \beta*t(gX)) + m.Mul(&onePlusBeta, &_lz[_i]) + u.Add(&gamma, &_lf[_i]) + m.Mul(&m, &u) + u.Mul(&beta, &_lt[_is]). + Add(&u, &_lt[_i]). + Add(&u, &GammaTimesOnePlusBeta) + m.Mul(&m, &u) + + // n = z(gX)*(\gamma(1+\beta) + h_{1} + \beta*h_{1}(gX))*(\gamma(1+\beta) + h_{2} + \beta*h_{2}(gX) + n.Mul(&beta, &_lh1[_is]). + Add(&n, &_lh1[_i]). + Add(&n, &GammaTimesOnePlusBeta) + u.Mul(&beta, &_lh2[_is]). + Add(&u, &_lh2[_i]). + Add(&u, &GammaTimesOnePlusBeta) + n.Mul(&n, &u). + Mul(&n, &_lz[_is]) + + // (x-gg**(n-1))*(m-n) + num[_i].Sub(&m, &n) + u.Sub(&g[i], &gg) + num[_i].Mul(&num[_i], &u) + + } + + return num +} + +// evaluateXnMinusOneDomainBig returns the evaluation of (x^{n}-1) on FrMultiplicativeGen*< g > +func evaluateXnMinusOneDomainBig(domainBig *fft.Domain) [2]fr.Element { + + sizeDomainSmall := domainBig.Cardinality / 2 + + var one fr.Element + one.SetOne() + + // x^{n}-1 on FrMultiplicativeGen*< g > + var res [2]fr.Element + var shift fr.Element + shift.Exp(domainBig.FrMultiplicativeGen, big.NewInt(int64(sizeDomainSmall))) + res[0].Sub(&shift, &one) + res[1].Add(&shift, &one).Neg(&res[1]) + + return res + +} + +// evaluateL0DomainBig returns the evaluation of (x^{n}-1)/(x-1) on +// x^{n}-1 on FrMultiplicativeGen*< g > +func evaluateL0DomainBig(domainBig *fft.Domain) ([2]fr.Element, []fr.Element) { + + var one fr.Element + one.SetOne() + + // x^{n}-1 on FrMultiplicativeGen*< g > + xnMinusOne := evaluateXnMinusOneDomainBig(domainBig) + + // 1/(x-1) on FrMultiplicativeGen*< g > + var acc fr.Element + denL0 := make([]fr.Element, domainBig.Cardinality) + acc.Set(&domainBig.FrMultiplicativeGen) + for i := 0; i < int(domainBig.Cardinality); i++ { + denL0[i].Sub(&acc, &one) + acc.Mul(&acc, &domainBig.Generator) + } + denL0 = fr.BatchInvert(denL0) + + return xnMinusOne, denL0 +} + +// evaluationLnDomainBig returns the evaluation of (x^{n}-1)/(x-g^{n-1}) on +// x^{n}-1 on FrMultiplicativeGen*< g > +func evaluationLnDomainBig(domainBig *fft.Domain) ([2]fr.Element, []fr.Element) { + + sizeDomainSmall := domainBig.Cardinality / 2 + + var one fr.Element + one.SetOne() + + // x^{n}-1 on FrMultiplicativeGen*< g > + numLn := evaluateXnMinusOneDomainBig(domainBig) + + // 1/(x-g^{n-1}) on FrMultiplicativeGen*< g > + var gg, acc fr.Element + gg.Square(&domainBig.Generator).Exp(gg, big.NewInt(int64(sizeDomainSmall-1))) + denLn := make([]fr.Element, domainBig.Cardinality) + acc.Set(&domainBig.FrMultiplicativeGen) + for i := 0; i < int(domainBig.Cardinality); i++ { + denLn[i].Sub(&acc, &gg) + acc.Mul(&acc, &domainBig.Generator) + } + denLn = fr.BatchInvert(denLn) + + return numLn, denLn + +} + +// evaluateZStartsByOneBitReversed returns l0 * (z-1), in Lagrange basis and bit reversed order +func evaluateZStartsByOneBitReversed(lsZBitReversed []fr.Element, domainBig *fft.Domain) []fr.Element { + + var one fr.Element + one.SetOne() + + res := make([]fr.Element, domainBig.Cardinality) + + nn := uint64(64 - bits.TrailingZeros64(domainBig.Cardinality)) + + xnMinusOne, denL0 := evaluateL0DomainBig(domainBig) + + for i := 0; i < len(lsZBitReversed); i++ { + _i := int(bits.Reverse64(uint64(i)) >> nn) + res[_i].Sub(&lsZBitReversed[_i], &one). + Mul(&res[_i], &xnMinusOne[i%2]). + Mul(&res[_i], &denL0[i]) + } + + return res +} + +// evaluateZEndsByOneBitReversed returns ln * (z-1), in Lagrange basis and bit reversed order +func evaluateZEndsByOneBitReversed(lsZBitReversed []fr.Element, domainBig *fft.Domain) []fr.Element { + + var one fr.Element + one.SetOne() + + numLn, denLn := evaluationLnDomainBig(domainBig) + + res := make([]fr.Element, len(lsZBitReversed)) + nn := uint64(64 - bits.TrailingZeros64(domainBig.Cardinality)) + + for i := 0; i < len(lsZBitReversed); i++ { + _i := int(bits.Reverse64(uint64(i)) >> nn) + res[_i].Sub(&lsZBitReversed[_i], &one). + Mul(&res[_i], &numLn[i%2]). + Mul(&res[_i], &denLn[i]) + } + + return res +} + +// evaluateOverlapH1h2BitReversed returns ln * (h1 - h2(g.x)), in Lagrange basis and bit reversed order +func evaluateOverlapH1h2BitReversed(_lh1, _lh2 []fr.Element, domainBig *fft.Domain) []fr.Element { + + var one fr.Element + one.SetOne() + + numLn, denLn := evaluationLnDomainBig(domainBig) + + res := make([]fr.Element, len(_lh1)) + nn := uint64(64 - bits.TrailingZeros64(domainBig.Cardinality)) + + s := len(_lh1) + for i := 0; i < s; i++ { + + _i := int(bits.Reverse64(uint64(i)) >> nn) + _is := int(bits.Reverse64(uint64((i+2)%s)) >> nn) + + res[_i].Sub(&_lh1[_i], &_lh2[_is]). + Mul(&res[_i], &numLn[i%2]). + Mul(&res[_i], &denLn[i]) + } + + return res +} + +// computeQuotientCanonical computes the full quotient of the plookup protocol. +// * alpha is the challenge to fold the numerator +// * lh, lh0, lhn, lh1h2 are the various pieces of the numerator (Lagrange shifted form, bit reversed order) +// * domainBig fft domain +// It returns the quotient, in canonical basis +func computeQuotientCanonical(alpha fr.Element, lh, lh0, lhn, lh1h2 []fr.Element, domainBig *fft.Domain) []fr.Element { + + sizeDomainBig := int(domainBig.Cardinality) + res := make([]fr.Element, sizeDomainBig) + + var one fr.Element + one.SetOne() + + numLn := evaluateXnMinusOneDomainBig(domainBig) + numLn[0].Inverse(&numLn[0]) + numLn[1].Inverse(&numLn[1]) + nn := uint64(64 - bits.TrailingZeros64(domainBig.Cardinality)) + + for i := 0; i < sizeDomainBig; i++ { + + _i := int(bits.Reverse64(uint64(i)) >> nn) + + res[_i].Mul(&lh1h2[_i], &alpha). + Add(&res[_i], &lhn[_i]). + Mul(&res[_i], &alpha). + Add(&res[_i], &lh0[_i]). + Mul(&res[_i], &alpha). + Add(&res[_i], &lh[_i]). + Mul(&res[_i], &numLn[i%2]) + } + + domainBig.FFTInverse(res, fft.DIT, true) + + return res +} + +// ProveLookupVector returns proof that the values in f are in t. +// +// /!\IMPORTANT/!\ +// +// If the table t is already commited somewhere (which is the normal workflow +// before generating a lookup proof), the commitment needs to be done on the +// table sorted. Otherwise the commitment in proof.t will not be the same as +// the public commitment: it will contain the same values, but permuted. +// +func ProveLookupVector(srs *kzg.SRS, f, t Table) (ProofLookupVector, error) { + + // res + var proof ProofLookupVector + var err error + + // hash function used for Fiat Shamir + hFunc := sha256.New() + + // transcript to derive the challenge + fs := fiatshamir.NewTranscript(hFunc, "beta", "gamma", "alpha", "nu") + + // create domains + var domainSmall *fft.Domain + if len(t) <= len(f) { + domainSmall = fft.NewDomain(uint64(len(f) + 1)) + } else { + domainSmall = fft.NewDomain(uint64(len(t))) + } + sizeDomainSmall := int(domainSmall.Cardinality) + + // set the size + proof.size = domainSmall.Cardinality + + // set the generator + proof.g.Set(&domainSmall.Generator) + + // resize f and t + // note: the last element of lf does not matter + lf := make([]fr.Element, sizeDomainSmall) + lt := make([]fr.Element, sizeDomainSmall) + cf := make([]fr.Element, sizeDomainSmall) + ct := make([]fr.Element, sizeDomainSmall) + copy(lt, t) + copy(lf, f) + for i := len(f); i < sizeDomainSmall; i++ { + lf[i] = f[len(f)-1] + } + for i := len(t); i < sizeDomainSmall; i++ { + lt[i] = t[len(t)-1] + } + sort.Sort(Table(lt)) + copy(ct, lt) + copy(cf, lf) + domainSmall.FFTInverse(ct, fft.DIF) + domainSmall.FFTInverse(cf, fft.DIF) + fft.BitReverse(ct) + fft.BitReverse(cf) + proof.t, err = kzg.Commit(ct, srs) + if err != nil { + return proof, err + } + proof.f, err = kzg.Commit(cf, srs) + if err != nil { + return proof, err + } + + // write f sorted by t + lfSortedByt := make(Table, 2*domainSmall.Cardinality-1) + copy(lfSortedByt, lt) + copy(lfSortedByt[domainSmall.Cardinality:], lf) + sort.Sort(lfSortedByt) + + // compute h1, h2, commit to them + lh1 := make([]fr.Element, sizeDomainSmall) + lh2 := make([]fr.Element, sizeDomainSmall) + ch1 := make([]fr.Element, sizeDomainSmall) + ch2 := make([]fr.Element, sizeDomainSmall) + copy(lh1, lfSortedByt[:sizeDomainSmall]) + copy(lh2, lfSortedByt[sizeDomainSmall-1:]) + + copy(ch1, lfSortedByt[:sizeDomainSmall]) + copy(ch2, lfSortedByt[sizeDomainSmall-1:]) + domainSmall.FFTInverse(ch1, fft.DIF) + domainSmall.FFTInverse(ch2, fft.DIF) + fft.BitReverse(ch1) + fft.BitReverse(ch2) + + proof.h1, err = kzg.Commit(ch1, srs) + if err != nil { + return proof, err + } + proof.h2, err = kzg.Commit(ch2, srs) + if err != nil { + return proof, err + } + + // derive beta, gamma + beta, err := deriveRandomness(&fs, "beta", &proof.t, &proof.f, &proof.h1, &proof.h2) + if err != nil { + return proof, err + } + gamma, err := deriveRandomness(&fs, "gamma") + if err != nil { + return proof, err + } + + // Compute to Z + lz := evaluateAccumulationPolynomial(lf, lt, lh1, lh2, beta, gamma) + cz := make([]fr.Element, len(lz)) + copy(cz, lz) + domainSmall.FFTInverse(cz, fft.DIF) + fft.BitReverse(cz) + proof.z, err = kzg.Commit(cz, srs) + if err != nil { + return proof, err + } + + // prepare data for computing the quotient + // compute the numerator + s := domainSmall.Cardinality + domainBig := fft.NewDomain(uint64(2 * s)) + + _lz := make([]fr.Element, 2*s) + _lh1 := make([]fr.Element, 2*s) + _lh2 := make([]fr.Element, 2*s) + _lt := make([]fr.Element, 2*s) + _lf := make([]fr.Element, 2*s) + copy(_lz, cz) + copy(_lh1, ch1) + copy(_lh2, ch2) + copy(_lt, ct) + copy(_lf, cf) + domainBig.FFT(_lz, fft.DIF, true) + domainBig.FFT(_lh1, fft.DIF, true) + domainBig.FFT(_lh2, fft.DIF, true) + domainBig.FFT(_lt, fft.DIF, true) + domainBig.FFT(_lf, fft.DIF, true) + + // compute h + lh := evaluateNumBitReversed(_lz, _lh1, _lh2, _lt, _lf, beta, gamma, domainBig) + + // compute l0*(z-1) + lh0 := evaluateZStartsByOneBitReversed(_lz, domainBig) + + // compute ln(z-1) + lhn := evaluateZEndsByOneBitReversed(_lz, domainBig) + + // compute ln*(h1-h2(g*X)) + lh1h2 := evaluateOverlapH1h2BitReversed(_lh1, _lh2, domainBig) + + // compute the quotient + alpha, err := deriveRandomness(&fs, "alpha", &proof.z) + if err != nil { + return proof, err + } + ch := computeQuotientCanonical(alpha, lh, lh0, lhn, lh1h2, domainBig) + proof.h, err = kzg.Commit(ch, srs) + if err != nil { + return proof, err + } + + // build the opening proofs + nu, err := deriveRandomness(&fs, "nu", &proof.h) + if err != nil { + return proof, err + } + proof.BatchedProof, err = kzg.BatchOpenSinglePoint( + [][]fr.Element{ + ch1, + ch2, + ct, + cz, + cf, + ch, + }, + []kzg.Digest{ + proof.h1, + proof.h2, + proof.t, + proof.z, + proof.f, + proof.h, + }, + nu, + hFunc, + srs, + ) + if err != nil { + return proof, err + } + + nu.Mul(&nu, &domainSmall.Generator) + proof.BatchedProofShifted, err = kzg.BatchOpenSinglePoint( + [][]fr.Element{ + ch1, + ch2, + ct, + cz, + }, + []kzg.Digest{ + proof.h1, + proof.h2, + proof.t, + proof.z, + }, + nu, + hFunc, + srs, + ) + if err != nil { + return proof, err + } + + return proof, nil +} + +// VerifyLookupVector verifies that a ProofLookupVector proof is correct +func VerifyLookupVector(srs *kzg.SRS, proof ProofLookupVector) error { + + // hash function that is used for Fiat Shamir + hFunc := sha256.New() + + // transcript to derive the challenge + fs := fiatshamir.NewTranscript(hFunc, "beta", "gamma", "alpha", "nu") + + // derive the various challenges + beta, err := deriveRandomness(&fs, "beta", &proof.t, &proof.f, &proof.h1, &proof.h2) + if err != nil { + return err + } + + gamma, err := deriveRandomness(&fs, "gamma") + if err != nil { + return err + } + + alpha, err := deriveRandomness(&fs, "alpha", &proof.z) + if err != nil { + return err + } + + nu, err := deriveRandomness(&fs, "nu", &proof.h) + if err != nil { + return err + } + + // check opening proofs + err = kzg.BatchVerifySinglePoint( + []kzg.Digest{ + proof.h1, + proof.h2, + proof.t, + proof.z, + proof.f, + proof.h, + }, + &proof.BatchedProof, + nu, + hFunc, + srs, + ) + if err != nil { + return err + } + + // shift the point and verify shifted proof + var shiftedNu fr.Element + shiftedNu.Mul(&nu, &proof.g) + err = kzg.BatchVerifySinglePoint( + []kzg.Digest{ + proof.h1, + proof.h2, + proof.t, + proof.z, + }, + &proof.BatchedProofShifted, + shiftedNu, + hFunc, + srs, + ) + if err != nil { + return err + } + + // check the generator is correct + var checkOrder, one fr.Element + one.SetOne() + checkOrder.Exp(proof.g, big.NewInt(int64(proof.size/2))) + if checkOrder.Equal(&one) { + return ErrGenerator + } + checkOrder.Square(&checkOrder) + if !checkOrder.Equal(&one) { + return ErrGenerator + } + + // check polynomial relation using Schwartz Zippel + var lhs, rhs, nun, g, _g, a, v, w fr.Element + g.Exp(proof.g, big.NewInt(int64(proof.size-1))) + + v.Add(&one, &beta) + w.Mul(&v, &gamma) + + // h(ν) where + // h = (xⁿ⁻¹-1)*z*(1+β)*(γ+f)*(γ(1+β) + t+ β*t(gX)) - + // (xⁿ⁻¹-1)*z(gX)*(γ(1+β) + h₁ + β*h₁(gX))*(γ(1+β) + h₂ + β*h₂(gX) ) + lhs.Sub(&nu, &g). // (ν-gⁿ⁻¹) + Mul(&lhs, &proof.BatchedProof.ClaimedValues[3]). + Mul(&lhs, &v) + a.Add(&gamma, &proof.BatchedProof.ClaimedValues[4]) + lhs.Mul(&lhs, &a) + a.Mul(&beta, &proof.BatchedProofShifted.ClaimedValues[2]). + Add(&a, &proof.BatchedProof.ClaimedValues[2]). + Add(&a, &w) + lhs.Mul(&lhs, &a) + + rhs.Sub(&nu, &g). + Mul(&rhs, &proof.BatchedProofShifted.ClaimedValues[3]) + a.Mul(&beta, &proof.BatchedProofShifted.ClaimedValues[0]). + Add(&a, &proof.BatchedProof.ClaimedValues[0]). + Add(&a, &w) + rhs.Mul(&rhs, &a) + a.Mul(&beta, &proof.BatchedProofShifted.ClaimedValues[1]). + Add(&a, &proof.BatchedProof.ClaimedValues[1]). + Add(&a, &w) + rhs.Mul(&rhs, &a) + + lhs.Sub(&lhs, &rhs) + + // check consistancy of bounds + var l0, ln, d1, d2 fr.Element + l0.Exp(nu, big.NewInt(int64(proof.size))).Sub(&l0, &one) + ln.Set(&l0) + d1.Sub(&nu, &one) + d2.Sub(&nu, &g) + l0.Div(&l0, &d1) // (νⁿ-1)/(ν-1) + ln.Div(&ln, &d2) // (νⁿ-1)/(ν-gⁿ⁻¹) + + // l₀*(z-1) = (νⁿ-1)/(ν-1)*(z-1) + var l0z fr.Element + l0z.Sub(&proof.BatchedProof.ClaimedValues[3], &one). + Mul(&l0z, &l0) + + // lₙ*(z-1) = (νⁿ-1)/(ν-gⁿ⁻¹)*(z-1) + var lnz fr.Element + lnz.Sub(&proof.BatchedProof.ClaimedValues[3], &one). + Mul(&ln, &lnz) + + // lₙ*(h1 - h₂(g.x)) + var lnh1h2 fr.Element + lnh1h2.Sub(&proof.BatchedProof.ClaimedValues[0], &proof.BatchedProofShifted.ClaimedValues[1]). + Mul(&lnh1h2, &ln) + + // fold the numerator + lnh1h2.Mul(&lnh1h2, &alpha). + Add(&lnh1h2, &lnz). + Mul(&lnh1h2, &alpha). + Add(&lnh1h2, &l0z). + Mul(&lnh1h2, &alpha). + Add(&lnh1h2, &lhs) + + // (xⁿ-1) * h(x) evaluated at ν + nun.Exp(nu, big.NewInt(int64(proof.size))) + _g.Sub(&nun, &one) + _g.Mul(&proof.BatchedProof.ClaimedValues[5], &_g) + if !lnh1h2.Equal(&_g) { + return ErrPlookupVerification + } + + return nil +} diff --git a/ecc/bls12-39/fr/polynomial/doc.go b/ecc/bls12-39/fr/polynomial/doc.go new file mode 100644 index 0000000000..83479b058b --- /dev/null +++ b/ecc/bls12-39/fr/polynomial/doc.go @@ -0,0 +1,18 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +// Package polynomial provides polynomial methods and commitment schemes. +package polynomial diff --git a/ecc/bls12-39/fr/polynomial/polynomial.go b/ecc/bls12-39/fr/polynomial/polynomial.go new file mode 100644 index 0000000000..d90940ede4 --- /dev/null +++ b/ecc/bls12-39/fr/polynomial/polynomial.go @@ -0,0 +1,123 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package polynomial + +import ( + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" +) + +// Polynomial polynomial represented by coefficients bn254 fr field. +type Polynomial []fr.Element + +// Degree returns the degree of the polynomial, which is the length of Data. +func (p *Polynomial) Degree() uint64 { + return uint64(len(*p) - 1) +} + +// Eval evaluates p at v +// returns a fr.Element +func (p *Polynomial) Eval(v *fr.Element) fr.Element { + + res := (*p)[len(*p)-1] + for i := len(*p) - 2; i >= 0; i-- { + res.Mul(&res, v) + res.Add(&res, &(*p)[i]) + } + + return res +} + +// Clone returns a copy of the polynomial +func (p *Polynomial) Clone() Polynomial { + _p := make(Polynomial, len(*p)) + copy(_p, *p) + return _p +} + +// AddConstantInPlace adds a constant to the polynomial, modifying p +func (p *Polynomial) AddConstantInPlace(c *fr.Element) { + for i := 0; i < len(*p); i++ { + (*p)[i].Add(&(*p)[i], c) + } +} + +// SubConstantInPlace subs a constant to the polynomial, modifying p +func (p *Polynomial) SubConstantInPlace(c *fr.Element) { + for i := 0; i < len(*p); i++ { + (*p)[i].Sub(&(*p)[i], c) + } +} + +// ScaleInPlace multiplies p by v, modifying p +func (p *Polynomial) ScaleInPlace(c *fr.Element) { + for i := 0; i < len(*p); i++ { + (*p)[i].Mul(&(*p)[i], c) + } +} + +// Add adds p1 to p2 +// This function allocates a new slice unless p == p1 or p == p2 +func (p *Polynomial) Add(p1, p2 Polynomial) *Polynomial { + + bigger := p1 + smaller := p2 + if len(bigger) < len(smaller) { + bigger, smaller = smaller, bigger + } + + if len(*p) == len(bigger) && (&(*p)[0] == &bigger[0]) { + for i := 0; i < len(smaller); i++ { + (*p)[i].Add(&(*p)[i], &smaller[i]) + } + return p + } + + if len(*p) == len(smaller) && (&(*p)[0] == &smaller[0]) { + for i := 0; i < len(smaller); i++ { + (*p)[i].Add(&(*p)[i], &bigger[i]) + } + *p = append(*p, bigger[len(smaller):]...) + return p + } + + res := make(Polynomial, len(bigger)) + copy(res, bigger) + for i := 0; i < len(smaller); i++ { + res[i].Add(&res[i], &smaller[i]) + } + *p = res + return p +} + +// Equal checks equality between two polynomials +func (p *Polynomial) Equal(p1 Polynomial) bool { + if (*p == nil) != (p1 == nil) { + return false + } + + if len(*p) != len(p1) { + return false + } + + for i := range p1 { + if !(*p)[i].Equal(&p1[i]) { + return false + } + } + + return true +} diff --git a/ecc/bls12-39/fr/polynomial/polynomial_test.go b/ecc/bls12-39/fr/polynomial/polynomial_test.go new file mode 100644 index 0000000000..ba015ba7a0 --- /dev/null +++ b/ecc/bls12-39/fr/polynomial/polynomial_test.go @@ -0,0 +1,208 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package polynomial + +import ( + "math/big" + "testing" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" +) + +func TestPolynomialEval(t *testing.T) { + + // build polynomial + f := make(Polynomial, 20) + for i := 0; i < 20; i++ { + f[i].SetOne() + } + + // random value + var point fr.Element + point.SetRandom() + + // compute manually f(val) + var expectedEval, one, den fr.Element + var expo big.Int + one.SetOne() + expo.SetUint64(20) + expectedEval.Exp(point, &expo). + Sub(&expectedEval, &one) + den.Sub(&point, &one) + expectedEval.Div(&expectedEval, &den) + + // compute purported evaluation + purportedEval := f.Eval(&point) + + // check + if !purportedEval.Equal(&expectedEval) { + t.Fatal("polynomial evaluation failed") + } +} + +func TestPolynomialAddConstantInPlace(t *testing.T) { + + // build polynomial + f := make(Polynomial, 20) + for i := 0; i < 20; i++ { + f[i].SetOne() + } + + // constant to add + var c fr.Element + c.SetRandom() + + // add constant + f.AddConstantInPlace(&c) + + // check + var expectedCoeffs, one fr.Element + one.SetOne() + expectedCoeffs.Add(&one, &c) + for i := 0; i < 20; i++ { + if !f[i].Equal(&expectedCoeffs) { + t.Fatal("AddConstantInPlace failed") + } + } +} + +func TestPolynomialSubConstantInPlace(t *testing.T) { + + // build polynomial + f := make(Polynomial, 20) + for i := 0; i < 20; i++ { + f[i].SetOne() + } + + // constant to sub + var c fr.Element + c.SetRandom() + + // sub constant + f.SubConstantInPlace(&c) + + // check + var expectedCoeffs, one fr.Element + one.SetOne() + expectedCoeffs.Sub(&one, &c) + for i := 0; i < 20; i++ { + if !f[i].Equal(&expectedCoeffs) { + t.Fatal("SubConstantInPlace failed") + } + } +} + +func TestPolynomialScaleInPlace(t *testing.T) { + + // build polynomial + f := make(Polynomial, 20) + for i := 0; i < 20; i++ { + f[i].SetOne() + } + + // constant to scale by + var c fr.Element + c.SetRandom() + + // scale by constant + f.ScaleInPlace(&c) + + // check + for i := 0; i < 20; i++ { + if !f[i].Equal(&c) { + t.Fatal("ScaleInPlace failed") + } + } + +} + +func TestPolynomialAdd(t *testing.T) { + + // build unbalanced polynomials + f1 := make(Polynomial, 20) + f1Backup := make(Polynomial, 20) + for i := 0; i < 20; i++ { + f1[i].SetOne() + f1Backup[i].SetOne() + } + f2 := make(Polynomial, 10) + f2Backup := make(Polynomial, 10) + for i := 0; i < 10; i++ { + f2[i].SetOne() + f2Backup[i].SetOne() + } + + // expected result + var one, two fr.Element + one.SetOne() + two.Double(&one) + expectedSum := make(Polynomial, 20) + for i := 0; i < 10; i++ { + expectedSum[i].Set(&two) + } + for i := 10; i < 20; i++ { + expectedSum[i].Set(&one) + } + + // caller is empty + var g Polynomial + g.Add(f1, f2) + if !g.Equal(expectedSum) { + t.Fatal("add polynomials fails") + } + if !f1.Equal(f1Backup) { + t.Fatal("side effect, f1 should not have been modified") + } + if !f2.Equal(f2Backup) { + t.Fatal("side effect, f2 should not have been modified") + } + + // all operands are distincts + _f1 := f1.Clone() + _f1.Add(f1, f2) + if !_f1.Equal(expectedSum) { + t.Fatal("add polynomials fails") + } + if !f1.Equal(f1Backup) { + t.Fatal("side effect, f1 should not have been modified") + } + if !f2.Equal(f2Backup) { + t.Fatal("side effect, f2 should not have been modified") + } + + // first operand = caller + _f1 = f1.Clone() + _f2 := f2.Clone() + _f1.Add(_f1, _f2) + if !_f1.Equal(expectedSum) { + t.Fatal("add polynomials fails") + } + if !_f2.Equal(f2Backup) { + t.Fatal("side effect, _f2 should not have been modified") + } + + // second operand = caller + _f1 = f1.Clone() + _f2 = f2.Clone() + _f1.Add(_f2, _f1) + if !_f1.Equal(expectedSum) { + t.Fatal("add polynomials fails") + } + if !_f2.Equal(f2Backup) { + t.Fatal("side effect, _f2 should not have been modified") + } +} diff --git a/ecc/bls12-39/fuzz.go b/ecc/bls12-39/fuzz.go new file mode 100644 index 0000000000..bbc9817996 --- /dev/null +++ b/ecc/bls12-39/fuzz.go @@ -0,0 +1,76 @@ +//go:build gofuzz +// +build gofuzz + +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package bls1239 + +import ( + "bytes" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fp" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr/mimc" + "math/big" +) + +const ( + fuzzInteresting = 1 + fuzzNormal = 0 + fuzzDiscard = -1 +) + +func Fuzz(data []byte) int { + // TODO separate in multiple FuzzXXX and update continuous fuzzer scripts + // else, we don't really benefits for fuzzer strategy. + fr.Fuzz(data) + fp.Fuzz(data) + mimc.Fuzz(data) + + // fuzz pairing + r := bytes.NewReader(data) + var e1, e2 fr.Element + e1.SetRawBytes(r) + e2.SetRawBytes(r) + + { + var r, r1, r2, r1r2, zero GT + var b1, b2, b1b2 big.Int + e1.ToBigIntRegular(&b1) + e2.ToBigIntRegular(&b2) + b1b2.Mul(&b1, &b2) + + var p1 G1Affine + var p2 G2Affine + + p1.ScalarMultiplication(&g1GenAff, &b1) + p2.ScalarMultiplication(&g2GenAff, &b2) + + r, _ = Pair([]G1Affine{g1GenAff}, []G2Affine{g2GenAff}) + r1, _ = Pair([]G1Affine{p1}, []G2Affine{g2GenAff}) + r2, _ = Pair([]G1Affine{g1GenAff}, []G2Affine{p2}) + + r1r2.Exp(&r, b1b2) + r1.Exp(&r1, b2) + r2.Exp(&r2, b1) + + if !(r1r2.Equal(&r1) && r1r2.Equal(&r2) && !r.Equal(&zero)) { + panic("pairing bilinearity check failed") + } + } + + return fuzzNormal +} diff --git a/ecc/bls12-39/fuzz_test.go b/ecc/bls12-39/fuzz_test.go new file mode 100644 index 0000000000..fe7b24a735 --- /dev/null +++ b/ecc/bls12-39/fuzz_test.go @@ -0,0 +1,56 @@ +//go:build gofuzz +// +build gofuzz + +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package bls1239 + +import ( + "encoding/hex" + "io" + "math/rand" + "runtime/debug" + "testing" + "time" +) + +func TestFuzz(t *testing.T) { + const maxBytes = 1 << 10 + const testCount = 7 + var bytes [maxBytes]byte + var i int + seed := time.Now().UnixNano() + defer func() { + if r := recover(); r != nil { + t.Error(r) + t.Error(string(debug.Stack())) + t.Fatal("test panicked", i, hex.EncodeToString(bytes[:i]), "seed", seed) + } + }() + r := rand.New(rand.NewSource(seed)) + + for i = 1; i < maxBytes; i++ { + for j := 0; j < testCount; j++ { + if _, err := io.ReadFull(r, bytes[:i]); err != nil { + t.Fatal("couldn't read random bytes", err) + } + + Fuzz(bytes[:i]) + } + } + +} diff --git a/ecc/bls12-39/g1.go b/ecc/bls12-39/g1.go new file mode 100644 index 0000000000..5cb3adca7b --- /dev/null +++ b/ecc/bls12-39/g1.go @@ -0,0 +1,964 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package bls1239 + +import ( + "math" + "math/big" + "runtime" + + "github.com/consensys/gnark-crypto/ecc" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fp" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/consensys/gnark-crypto/internal/parallel" +) + +// G1Affine point in affine coordinates +type G1Affine struct { + X, Y fp.Element +} + +// G1Jac is a point with fp.Element coordinates +type G1Jac struct { + X, Y, Z fp.Element +} + +// g1JacExtended parameterized jacobian coordinates (x=X/ZZ, y=Y/ZZZ, ZZ**3=ZZZ**2) +type g1JacExtended struct { + X, Y, ZZ, ZZZ fp.Element +} + +// ------------------------------------------------------------------------------------------------- +// Affine + +// Set sets p to the provided point +func (p *G1Affine) Set(a *G1Affine) *G1Affine { + p.X, p.Y = a.X, a.Y + return p +} + +// ScalarMultiplication computes and returns p = a*s +func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine { + var _p G1Jac + _p.FromAffine(a) + _p.mulGLV(&_p, s) + p.FromJacobian(&_p) + return p +} + +// Add adds two point in affine coordinates. +// This should rarely be used as it is very inneficient compared to Jacobian +// TODO implement affine addition formula +func (p *G1Affine) Add(a, b *G1Affine) *G1Affine { + var p1, p2 G1Jac + p1.FromAffine(a) + p2.FromAffine(b) + p1.AddAssign(&p2) + p.FromJacobian(&p1) + return p +} + +// Sub subs two point in affine coordinates. +// This should rarely be used as it is very inneficient compared to Jacobian +// TODO implement affine addition formula +func (p *G1Affine) Sub(a, b *G1Affine) *G1Affine { + var p1, p2 G1Jac + p1.FromAffine(a) + p2.FromAffine(b) + p1.SubAssign(&p2) + p.FromJacobian(&p1) + return p +} + +// Equal tests if two points (in Affine coordinates) are equal +func (p *G1Affine) Equal(a *G1Affine) bool { + return p.X.Equal(&a.X) && p.Y.Equal(&a.Y) +} + +// Neg computes -G +func (p *G1Affine) Neg(a *G1Affine) *G1Affine { + p.X = a.X + p.Y.Neg(&a.Y) + return p +} + +// FromJacobian rescale a point in Jacobian coord in z=1 plane +func (p *G1Affine) FromJacobian(p1 *G1Jac) *G1Affine { + + var a, b fp.Element + + if p1.Z.IsZero() { + p.X.SetZero() + p.Y.SetZero() + return p + } + + a.Inverse(&p1.Z) + b.Square(&a) + p.X.Mul(&p1.X, &b) + p.Y.Mul(&p1.Y, &b).Mul(&p.Y, &a) + + return p +} + +func (p *G1Affine) String() string { + var x, y fp.Element + x.Set(&p.X) + y.Set(&p.Y) + return "E([" + x.String() + "," + y.String() + "])," +} + +// IsInfinity checks if the point is infinity (in affine, it's encoded as (0,0)) +func (p *G1Affine) IsInfinity() bool { + return p.X.IsZero() && p.Y.IsZero() +} + +// IsOnCurve returns true if p in on the curve +func (p *G1Affine) IsOnCurve() bool { + var point G1Jac + point.FromAffine(p) + return point.IsOnCurve() // call this function to handle infinity point +} + +// IsInSubGroup returns true if p is in the correct subgroup, false otherwise +func (p *G1Affine) IsInSubGroup() bool { + var _p G1Jac + _p.FromAffine(p) + return _p.IsInSubGroup() +} + +// ------------------------------------------------------------------------------------------------- +// Jacobian + +// Set sets p to the provided point +func (p *G1Jac) Set(a *G1Jac) *G1Jac { + p.X, p.Y, p.Z = a.X, a.Y, a.Z + return p +} + +// Equal tests if two points (in Jacobian coordinates) are equal +func (p *G1Jac) Equal(a *G1Jac) bool { + + if p.Z.IsZero() && a.Z.IsZero() { + return true + } + _p := G1Affine{} + _p.FromJacobian(p) + + _a := G1Affine{} + _a.FromJacobian(a) + + return _p.X.Equal(&_a.X) && _p.Y.Equal(&_a.Y) +} + +// Neg computes -G +func (p *G1Jac) Neg(a *G1Jac) *G1Jac { + *p = *a + p.Y.Neg(&a.Y) + return p +} + +// SubAssign subtracts two points on the curve +func (p *G1Jac) SubAssign(a *G1Jac) *G1Jac { + var tmp G1Jac + tmp.Set(a) + tmp.Y.Neg(&tmp.Y) + p.AddAssign(&tmp) + return p +} + +// AddAssign point addition in montgomery form +// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl +func (p *G1Jac) AddAssign(a *G1Jac) *G1Jac { + + // p is infinity, return a + if p.Z.IsZero() { + p.Set(a) + return p + } + + // a is infinity, return p + if a.Z.IsZero() { + return p + } + + var Z1Z1, Z2Z2, U1, U2, S1, S2, H, I, J, r, V fp.Element + Z1Z1.Square(&a.Z) + Z2Z2.Square(&p.Z) + U1.Mul(&a.X, &Z2Z2) + U2.Mul(&p.X, &Z1Z1) + S1.Mul(&a.Y, &p.Z). + Mul(&S1, &Z2Z2) + S2.Mul(&p.Y, &a.Z). + Mul(&S2, &Z1Z1) + + // if p == a, we double instead + if U1.Equal(&U2) && S1.Equal(&S2) { + return p.DoubleAssign() + } + + H.Sub(&U2, &U1) + I.Double(&H). + Square(&I) + J.Mul(&H, &I) + r.Sub(&S2, &S1).Double(&r) + V.Mul(&U1, &I) + p.X.Square(&r). + Sub(&p.X, &J). + Sub(&p.X, &V). + Sub(&p.X, &V) + p.Y.Sub(&V, &p.X). + Mul(&p.Y, &r) + S1.Mul(&S1, &J).Double(&S1) + p.Y.Sub(&p.Y, &S1) + p.Z.Add(&p.Z, &a.Z) + p.Z.Square(&p.Z). + Sub(&p.Z, &Z1Z1). + Sub(&p.Z, &Z2Z2). + Mul(&p.Z, &H) + + return p +} + +// AddMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl +func (p *G1Jac) AddMixed(a *G1Affine) *G1Jac { + + //if a is infinity return p + if a.X.IsZero() && a.Y.IsZero() { + return p + } + // p is infinity, return a + if p.Z.IsZero() { + p.X = a.X + p.Y = a.Y + p.Z.SetOne() + return p + } + + var Z1Z1, U2, S2, H, HH, I, J, r, V fp.Element + Z1Z1.Square(&p.Z) + U2.Mul(&a.X, &Z1Z1) + S2.Mul(&a.Y, &p.Z). + Mul(&S2, &Z1Z1) + + // if p == a, we double instead + if U2.Equal(&p.X) && S2.Equal(&p.Y) { + return p.DoubleAssign() + } + + H.Sub(&U2, &p.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&S2, &p.Y).Double(&r) + V.Mul(&p.X, &I) + p.X.Square(&r). + Sub(&p.X, &J). + Sub(&p.X, &V). + Sub(&p.X, &V) + J.Mul(&J, &p.Y).Double(&J) + p.Y.Sub(&V, &p.X). + Mul(&p.Y, &r) + p.Y.Sub(&p.Y, &J) + p.Z.Add(&p.Z, &H) + p.Z.Square(&p.Z). + Sub(&p.Z, &Z1Z1). + Sub(&p.Z, &HH) + + return p +} + +// Double doubles a point in Jacobian coordinates +// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2007-bl +func (p *G1Jac) Double(q *G1Jac) *G1Jac { + p.Set(q) + p.DoubleAssign() + return p +} + +// DoubleAssign doubles a point in Jacobian coordinates +// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2007-bl +func (p *G1Jac) DoubleAssign() *G1Jac { + + var XX, YY, YYYY, ZZ, S, M, T fp.Element + + XX.Square(&p.X) + YY.Square(&p.Y) + YYYY.Square(&YY) + ZZ.Square(&p.Z) + S.Add(&p.X, &YY) + S.Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX).Add(&M, &XX) + p.Z.Add(&p.Z, &p.Y). + Square(&p.Z). + Sub(&p.Z, &YY). + Sub(&p.Z, &ZZ) + T.Square(&M) + p.X = T + T.Double(&S) + p.X.Sub(&p.X, &T) + p.Y.Sub(&S, &p.X). + Mul(&p.Y, &M) + YYYY.Double(&YYYY).Double(&YYYY).Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + + return p +} + +// ScalarMultiplication computes and returns p = a*s +// see https://www.iacr.org/archive/crypto2001/21390189.pdf +func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac { + return p.mulGLV(a, s) +} + +func (p *G1Jac) String() string { + if p.Z.IsZero() { + return "O" + } + _p := G1Affine{} + _p.FromJacobian(p) + return "E([" + _p.X.String() + "," + _p.Y.String() + "])," +} + +// FromAffine sets p = Q, p in Jacboian, Q in affine +func (p *G1Jac) FromAffine(Q *G1Affine) *G1Jac { + if Q.X.IsZero() && Q.Y.IsZero() { + p.Z.SetZero() + p.X.SetOne() + p.Y.SetOne() + return p + } + p.Z.SetOne() + p.X.Set(&Q.X) + p.Y.Set(&Q.Y) + return p +} + +// IsOnCurve returns true if p in on the curve +func (p *G1Jac) IsOnCurve() bool { + var left, right, tmp fp.Element + left.Square(&p.Y) + right.Square(&p.X).Mul(&right, &p.X) + tmp.Square(&p.Z). + Square(&tmp). + Mul(&tmp, &p.Z). + Mul(&tmp, &p.Z). + Mul(&tmp, &bCurveCoeff) + right.Add(&right, &tmp) + return left.Equal(&right) +} + +// IsInSubGroup returns true if p is on the r-torsion, false otherwise. +// Z[r,0]+Z[-lambdaG1Affine, 1] is the kernel +// of (u,v)->u+lambdaG1Affinev mod r. Expressing r, lambdaG1Affine as +// polynomials in x, a short vector of this Zmodule is +// 1, x**2. So we check that p+x**2*phi(p) +// is the infinity. +func (p *G1Jac) IsInSubGroup() bool { + + var res G1Jac + res.phi(p). + ScalarMultiplication(&res, &xGen). + ScalarMultiplication(&res, &xGen). + AddAssign(p) + + return res.IsOnCurve() && res.Z.IsZero() + +} + +// mulWindowed 2-bits windowed exponentiation +func (p *G1Jac) mulWindowed(a *G1Jac, s *big.Int) *G1Jac { + + var res G1Jac + var ops [3]G1Jac + + res.Set(&g1Infinity) + ops[0].Set(a) + ops[1].Double(&ops[0]) + ops[2].Set(&ops[0]).AddAssign(&ops[1]) + + b := s.Bytes() + for i := range b { + w := b[i] + mask := byte(0xc0) + for j := 0; j < 4; j++ { + res.DoubleAssign().DoubleAssign() + c := (w & mask) >> (6 - 2*j) + if c != 0 { + res.AddAssign(&ops[c-1]) + } + mask = mask >> 2 + } + } + p.Set(&res) + + return p + +} + +// phi assigns p to phi(a) where phi: (x,y)->(ux,y), and returns p +func (p *G1Jac) phi(a *G1Jac) *G1Jac { + p.Set(a) + p.X.Mul(&p.X, &thirdRootOneG1) + return p +} + +// mulGLV performs scalar multiplication using GLV +// see https://www.iacr.org/archive/crypto2001/21390189.pdf +func (p *G1Jac) mulGLV(a *G1Jac, s *big.Int) *G1Jac { + + var table [15]G1Jac + var res G1Jac + var k1, k2 fr.Element + + res.Set(&g1Infinity) + + // table[b3b2b1b0-1] = b3b2*phi(a) + b1b0*a + table[0].Set(a) + table[3].phi(a) + + // split the scalar, modifies +-a, phi(a) accordingly + k := ecc.SplitScalar(s, &glvBasis) + + if k[0].Sign() == -1 { + k[0].Neg(&k[0]) + table[0].Neg(&table[0]) + } + if k[1].Sign() == -1 { + k[1].Neg(&k[1]) + table[3].Neg(&table[3]) + } + + // precompute table (2 bits sliding window) + // table[b3b2b1b0-1] = b3b2*phi(a) + b1b0*a if b3b2b1b0 != 0 + table[1].Double(&table[0]) + table[2].Set(&table[1]).AddAssign(&table[0]) + table[4].Set(&table[3]).AddAssign(&table[0]) + table[5].Set(&table[3]).AddAssign(&table[1]) + table[6].Set(&table[3]).AddAssign(&table[2]) + table[7].Double(&table[3]) + table[8].Set(&table[7]).AddAssign(&table[0]) + table[9].Set(&table[7]).AddAssign(&table[1]) + table[10].Set(&table[7]).AddAssign(&table[2]) + table[11].Set(&table[7]).AddAssign(&table[3]) + table[12].Set(&table[11]).AddAssign(&table[0]) + table[13].Set(&table[11]).AddAssign(&table[1]) + table[14].Set(&table[11]).AddAssign(&table[2]) + + // bounds on the lattice base vectors guarantee that k1, k2 are len(r)/2 bits long max + k1.SetBigInt(&k[0]).FromMont() + k2.SetBigInt(&k[1]).FromMont() + + // loop starts from len(k1)/2 due to the bounds + for i := int(math.Ceil(fr.Limbs/2. - 1)); i >= 0; i-- { + mask := uint64(3) << 62 + for j := 0; j < 32; j++ { + res.Double(&res).Double(&res) + b1 := (k1[i] & mask) >> (62 - 2*j) + b2 := (k2[i] & mask) >> (62 - 2*j) + if b1|b2 != 0 { + s := (b2<<2 | b1) + res.AddAssign(&table[s-1]) + } + mask = mask >> 2 + } + } + + p.Set(&res) + return p +} + +// ClearCofactor maps a point in curve to r-torsion +func (p *G1Affine) ClearCofactor(a *G1Affine) *G1Affine { + var _p G1Jac + _p.FromAffine(a) + _p.ClearCofactor(&_p) + p.FromJacobian(&_p) + return p +} + +// ClearCofactor maps a point in E(Fp) to E(Fp)[r] +func (p *G1Jac) ClearCofactor(a *G1Jac) *G1Jac { + // cf https://eprint.iacr.org/2019/403.pdf, 5 + var res G1Jac + res.ScalarMultiplication(a, &xGen).Neg(&res).AddAssign(a) + p.Set(&res) + return p + +} + +// ------------------------------------------------------------------------------------------------- +// Jacobian extended + +// Set sets p to the provided point +func (p *g1JacExtended) Set(a *g1JacExtended) *g1JacExtended { + p.X, p.Y, p.ZZ, p.ZZZ = a.X, a.Y, a.ZZ, a.ZZZ + return p +} + +// setInfinity sets p to O +func (p *g1JacExtended) setInfinity() *g1JacExtended { + p.X.SetOne() + p.Y.SetOne() + p.ZZ = fp.Element{} + p.ZZZ = fp.Element{} + return p +} + +// fromJacExtended sets Q in affine coords +func (p *G1Affine) fromJacExtended(Q *g1JacExtended) *G1Affine { + if Q.ZZ.IsZero() { + p.X = fp.Element{} + p.Y = fp.Element{} + return p + } + p.X.Inverse(&Q.ZZ).Mul(&p.X, &Q.X) + p.Y.Inverse(&Q.ZZZ).Mul(&p.Y, &Q.Y) + return p +} + +// fromJacExtended sets Q in Jacobian coords +func (p *G1Jac) fromJacExtended(Q *g1JacExtended) *G1Jac { + if Q.ZZ.IsZero() { + p.Set(&g1Infinity) + return p + } + p.X.Mul(&Q.ZZ, &Q.X).Mul(&p.X, &Q.ZZ) + p.Y.Mul(&Q.ZZZ, &Q.Y).Mul(&p.Y, &Q.ZZZ) + p.Z.Set(&Q.ZZZ) + return p +} + +// unsafeFromJacExtended sets p in jacobian coords, but don't check for infinity +func (p *G1Jac) unsafeFromJacExtended(Q *g1JacExtended) *G1Jac { + p.X.Square(&Q.ZZ).Mul(&p.X, &Q.X) + p.Y.Square(&Q.ZZZ).Mul(&p.Y, &Q.Y) + p.Z = Q.ZZZ + return p +} + +// add point in ZZ coords +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-xyzz.html#addition-add-2008-s +func (p *g1JacExtended) add(q *g1JacExtended) *g1JacExtended { + //if q is infinity return p + if q.ZZ.IsZero() { + return p + } + // p is infinity, return q + if p.ZZ.IsZero() { + p.Set(q) + return p + } + + var A, B, X1ZZ2, X2ZZ1, Y1ZZZ2, Y2ZZZ1 fp.Element + + // p2: q, p1: p + X2ZZ1.Mul(&q.X, &p.ZZ) + X1ZZ2.Mul(&p.X, &q.ZZ) + A.Sub(&X2ZZ1, &X1ZZ2) + Y2ZZZ1.Mul(&q.Y, &p.ZZZ) + Y1ZZZ2.Mul(&p.Y, &q.ZZZ) + B.Sub(&Y2ZZZ1, &Y1ZZZ2) + + if A.IsZero() { + if B.IsZero() { + return p.double(q) + + } + p.ZZ = fp.Element{} + p.ZZZ = fp.Element{} + return p + } + + var U1, U2, S1, S2, P, R, PP, PPP, Q, V fp.Element + U1.Mul(&p.X, &q.ZZ) + U2.Mul(&q.X, &p.ZZ) + S1.Mul(&p.Y, &q.ZZZ) + S2.Mul(&q.Y, &p.ZZZ) + P.Sub(&U2, &U1) + R.Sub(&S2, &S1) + PP.Square(&P) + PPP.Mul(&P, &PP) + Q.Mul(&U1, &PP) + V.Mul(&S1, &PPP) + + p.X.Square(&R). + Sub(&p.X, &PPP). + Sub(&p.X, &Q). + Sub(&p.X, &Q) + p.Y.Sub(&Q, &p.X). + Mul(&p.Y, &R). + Sub(&p.Y, &V) + p.ZZ.Mul(&p.ZZ, &q.ZZ). + Mul(&p.ZZ, &PP) + p.ZZZ.Mul(&p.ZZZ, &q.ZZZ). + Mul(&p.ZZZ, &PPP) + + return p +} + +// double point in ZZ coords +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-xyzz.html#doubling-dbl-2008-s-1 +func (p *g1JacExtended) double(q *g1JacExtended) *g1JacExtended { + var U, V, W, S, XX, M fp.Element + + U.Double(&q.Y) + V.Square(&U) + W.Mul(&U, &V) + S.Mul(&q.X, &V) + XX.Square(&q.X) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + U.Mul(&W, &q.Y) + + p.X.Square(&M). + Sub(&p.X, &S). + Sub(&p.X, &S) + p.Y.Sub(&S, &p.X). + Mul(&p.Y, &M). + Sub(&p.Y, &U) + p.ZZ.Mul(&V, &q.ZZ) + p.ZZZ.Mul(&W, &q.ZZZ) + + return p +} + +// subMixed same as addMixed, but will negate a.Y +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-xyzz.html#addition-madd-2008-s +func (p *g1JacExtended) subMixed(a *G1Affine) *g1JacExtended { + + //if a is infinity return p + if a.X.IsZero() && a.Y.IsZero() { + return p + } + // p is infinity, return a + if p.ZZ.IsZero() { + p.X = a.X + p.Y.Neg(&a.Y) + p.ZZ.SetOne() + p.ZZZ.SetOne() + return p + } + + var P, R fp.Element + + // p2: a, p1: p + P.Mul(&a.X, &p.ZZ) + P.Sub(&P, &p.X) + + R.Mul(&a.Y, &p.ZZZ) + R.Neg(&R) + R.Sub(&R, &p.Y) + + if P.IsZero() { + if R.IsZero() { + return p.doubleNegMixed(a) + + } + p.ZZ = fp.Element{} + p.ZZZ = fp.Element{} + return p + } + + var PP, PPP, Q, Q2, RR, X3, Y3 fp.Element + + PP.Square(&P) + PPP.Mul(&P, &PP) + Q.Mul(&p.X, &PP) + RR.Square(&R) + X3.Sub(&RR, &PPP) + Q2.Double(&Q) + p.X.Sub(&X3, &Q2) + Y3.Sub(&Q, &p.X).Mul(&Y3, &R) + R.Mul(&p.Y, &PPP) + p.Y.Sub(&Y3, &R) + p.ZZ.Mul(&p.ZZ, &PP) + p.ZZZ.Mul(&p.ZZZ, &PPP) + + return p + +} + +// addMixed +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-xyzz.html#addition-madd-2008-s +func (p *g1JacExtended) addMixed(a *G1Affine) *g1JacExtended { + + //if a is infinity return p + if a.X.IsZero() && a.Y.IsZero() { + return p + } + // p is infinity, return a + if p.ZZ.IsZero() { + p.X = a.X + p.Y = a.Y + p.ZZ.SetOne() + p.ZZZ.SetOne() + return p + } + + var P, R fp.Element + + // p2: a, p1: p + P.Mul(&a.X, &p.ZZ) + P.Sub(&P, &p.X) + + R.Mul(&a.Y, &p.ZZZ) + R.Sub(&R, &p.Y) + + if P.IsZero() { + if R.IsZero() { + return p.doubleMixed(a) + + } + p.ZZ = fp.Element{} + p.ZZZ = fp.Element{} + return p + } + + var PP, PPP, Q, Q2, RR, X3, Y3 fp.Element + + PP.Square(&P) + PPP.Mul(&P, &PP) + Q.Mul(&p.X, &PP) + RR.Square(&R) + X3.Sub(&RR, &PPP) + Q2.Double(&Q) + p.X.Sub(&X3, &Q2) + Y3.Sub(&Q, &p.X).Mul(&Y3, &R) + R.Mul(&p.Y, &PPP) + p.Y.Sub(&Y3, &R) + p.ZZ.Mul(&p.ZZ, &PP) + p.ZZZ.Mul(&p.ZZZ, &PPP) + + return p + +} + +// doubleNegMixed same as double, but will negate q.Y +func (p *g1JacExtended) doubleNegMixed(q *G1Affine) *g1JacExtended { + + var U, V, W, S, XX, M, S2, L fp.Element + + U.Double(&q.Y) + U.Neg(&U) + V.Square(&U) + W.Mul(&U, &V) + S.Mul(&q.X, &V) + XX.Square(&q.X) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + S2.Double(&S) + L.Mul(&W, &q.Y) + + p.X.Square(&M). + Sub(&p.X, &S2) + p.Y.Sub(&S, &p.X). + Mul(&p.Y, &M). + Add(&p.Y, &L) + p.ZZ.Set(&V) + p.ZZZ.Set(&W) + + return p +} + +// doubleMixed point in ZZ coords +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-xyzz.html#doubling-dbl-2008-s-1 +func (p *g1JacExtended) doubleMixed(q *G1Affine) *g1JacExtended { + + var U, V, W, S, XX, M, S2, L fp.Element + + U.Double(&q.Y) + V.Square(&U) + W.Mul(&U, &V) + S.Mul(&q.X, &V) + XX.Square(&q.X) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + S2.Double(&S) + L.Mul(&W, &q.Y) + + p.X.Square(&M). + Sub(&p.X, &S2) + p.Y.Sub(&S, &p.X). + Mul(&p.Y, &M). + Sub(&p.Y, &L) + p.ZZ.Set(&V) + p.ZZZ.Set(&W) + + return p +} + +// BatchJacobianToAffineG1 converts points in Jacobian coordinates to Affine coordinates +// performing a single field inversion (Montgomery batch inversion trick) +// result must be allocated with len(result) == len(points) +func BatchJacobianToAffineG1(points []G1Jac, result []G1Affine) { + zeroes := make([]bool, len(points)) + accumulator := fp.One() + + // batch invert all points[].Z coordinates with Montgomery batch inversion trick + // (stores points[].Z^-1 in result[i].X to avoid allocating a slice of fr.Elements) + for i := 0; i < len(points); i++ { + if points[i].Z.IsZero() { + zeroes[i] = true + continue + } + result[i].X = accumulator + accumulator.Mul(&accumulator, &points[i].Z) + } + + var accInverse fp.Element + accInverse.Inverse(&accumulator) + + for i := len(points) - 1; i >= 0; i-- { + if zeroes[i] { + // do nothing, X and Y are zeroes in affine. + continue + } + result[i].X.Mul(&result[i].X, &accInverse) + accInverse.Mul(&accInverse, &points[i].Z) + } + + // batch convert to affine. + parallel.Execute(len(points), func(start, end int) { + for i := start; i < end; i++ { + if zeroes[i] { + // do nothing, X and Y are zeroes in affine. + continue + } + var a, b fp.Element + a = result[i].X + b.Square(&a) + result[i].X.Mul(&points[i].X, &b) + result[i].Y.Mul(&points[i].Y, &b). + Mul(&result[i].Y, &a) + } + }) + +} + +// BatchScalarMultiplicationG1 multiplies the same base (generator) by all scalars +// and return resulting points in affine coordinates +// uses a simple windowed-NAF like exponentiation algorithm +func BatchScalarMultiplicationG1(base *G1Affine, scalars []fr.Element) []G1Affine { + + // approximate cost in group ops is + // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) + + nbPoints := uint64(len(scalars)) + min := ^uint64(0) + bestC := 0 + for c := 2; c < 18; c++ { + cost := uint64(1 << (c - 1)) + nbChunks := uint64(fr.Limbs * 64 / c) + if (fr.Limbs*64)%c != 0 { + nbChunks++ + } + cost += nbPoints * ((fr.Limbs * 64) + nbChunks) + if cost < min { + min = cost + bestC = c + } + } + c := uint64(bestC) // window size + nbChunks := int(fr.Limbs * 64 / c) + if (fr.Limbs*64)%c != 0 { + nbChunks++ + } + mask := uint64((1 << c) - 1) // low c bits are 1 + msbWindow := uint64(1 << (c - 1)) + + // precompute all powers of base for our window + // note here that if performance is critical, we can implement as in the msmX methods + // this allocation to be on the stack + baseTable := make([]G1Jac, (1 << (c - 1))) + baseTable[0].Set(&g1Infinity) + baseTable[0].AddMixed(base) + for i := 1; i < len(baseTable); i++ { + baseTable[i] = baseTable[i-1] + baseTable[i].AddMixed(base) + } + + pScalars, _ := partitionScalars(scalars, c, false, runtime.NumCPU()) + + // compute offset and word selector / shift to select the right bits of our windows + selectors := make([]selector, nbChunks) + for chunk := 0; chunk < nbChunks; chunk++ { + jc := uint64(uint64(chunk) * c) + d := selector{} + d.index = jc / 64 + d.shift = jc - (d.index * 64) + d.mask = mask << d.shift + d.multiWordSelect = (64%c) != 0 && d.shift > (64-c) && d.index < (fr.Limbs-1) + if d.multiWordSelect { + nbBitsHigh := d.shift - uint64(64-c) + d.maskHigh = (1 << nbBitsHigh) - 1 + d.shiftHigh = (c - nbBitsHigh) + } + selectors[chunk] = d + } + // convert our base exp table into affine to use AddMixed + baseTableAff := make([]G1Affine, (1 << (c - 1))) + BatchJacobianToAffineG1(baseTable, baseTableAff) + toReturn := make([]G1Jac, len(scalars)) + + // for each digit, take value in the base table, double it c time, voila. + parallel.Execute(len(pScalars), func(start, end int) { + var p G1Jac + for i := start; i < end; i++ { + p.Set(&g1Infinity) + for chunk := nbChunks - 1; chunk >= 0; chunk-- { + s := selectors[chunk] + if chunk != nbChunks-1 { + for j := uint64(0); j < c; j++ { + p.DoubleAssign() + } + } + + bits := (pScalars[i][s.index] & s.mask) >> s.shift + if s.multiWordSelect { + bits += (pScalars[i][s.index+1] & s.maskHigh) << s.shiftHigh + } + + if bits == 0 { + continue + } + + // if msbWindow bit is set, we need to substract + if bits&msbWindow == 0 { + // add + p.AddMixed(&baseTableAff[bits-1]) + } else { + // sub + t := baseTableAff[bits & ^msbWindow] + t.Neg(&t) + p.AddMixed(&t) + } + } + + // set our result point + toReturn[i] = p + + } + }) + toReturnAff := make([]G1Affine, len(scalars)) + BatchJacobianToAffineG1(toReturn, toReturnAff) + return toReturnAff +} diff --git a/ecc/bls12-39/g1_test.go b/ecc/bls12-39/g1_test.go new file mode 100644 index 0000000000..f6a7662ea4 --- /dev/null +++ b/ecc/bls12-39/g1_test.go @@ -0,0 +1,701 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package bls1239 + +import ( + "fmt" + "math/big" + "testing" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fp" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/prop" +) + +func TestG1AffineEndomorphism(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + properties.Property("[BLS12-39] check that phi(P) = lambdaGLV * P", prop.ForAll( + func(a fp.Element) bool { + var p, res1, res2 G1Jac + g := MapToCurveG1Svdw(a) + p.FromAffine(&g) + res1.phi(&p) + res2.mulWindowed(&p, &lambdaGLV) + + return p.IsInSubGroup() && res1.Equal(&res2) + }, + GenFp(), + )) + + properties.Property("[BLS12-39] check that phi^2(P) + phi(P) + P = 0", prop.ForAll( + func(a fp.Element) bool { + var p, res, tmp G1Jac + g := MapToCurveG1Svdw(a) + p.FromAffine(&g) + tmp.phi(&p) + res.phi(&tmp). + AddAssign(&tmp). + AddAssign(&p) + + return res.Z.IsZero() + }, + GenFp(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestMapToCurveG1(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + properties.Property("[G1] Svsw mapping should output point on the curve", prop.ForAll( + func(a fp.Element) bool { + g := MapToCurveG1Svdw(a) + return g.IsInSubGroup() + }, + GenFp(), + )) + + properties.Property("[G1] Svsw mapping should be deterministic", prop.ForAll( + func(a fp.Element) bool { + g1 := MapToCurveG1Svdw(a) + g2 := MapToCurveG1Svdw(a) + return g1.Equal(&g2) + }, + GenFp(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestG1AffineIsOnCurve(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + properties.Property("[BLS12-39] g1Gen (affine) should be on the curve", prop.ForAll( + func(a fp.Element) bool { + var op1, op2 G1Affine + op1.FromJacobian(&g1Gen) + op2.FromJacobian(&g1Gen) + op2.Y.Mul(&op2.Y, &a) + return op1.IsOnCurve() && !op2.IsOnCurve() + }, + GenFp(), + )) + + properties.Property("[BLS12-39] g1Gen (Jacobian) should be on the curve", prop.ForAll( + func(a fp.Element) bool { + var op1, op2, op3 G1Jac + op1.Set(&g1Gen) + op3.Set(&g1Gen) + + op2 = fuzzJacobianG1Affine(&g1Gen, a) + op3.Y.Mul(&op3.Y, &a) + return op1.IsOnCurve() && op2.IsOnCurve() && !op3.IsOnCurve() + }, + GenFp(), + )) + + properties.Property("[BLS12-39] IsInSubGroup and MulBy subgroup order should be the same", prop.ForAll( + func(a fp.Element) bool { + var op1, op2 G1Jac + op1 = fuzzJacobianG1Affine(&g1Gen, a) + _r := fr.Modulus() + op2.ScalarMultiplication(&op1, _r) + return op1.IsInSubGroup() && op2.Z.IsZero() + }, + GenFp(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestG1AffineConversions(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + properties.Property("[BLS12-39] Affine representation should be independent of the Jacobian representative", prop.ForAll( + func(a fp.Element) bool { + g := fuzzJacobianG1Affine(&g1Gen, a) + var op1 G1Affine + op1.FromJacobian(&g) + return op1.X.Equal(&g1Gen.X) && op1.Y.Equal(&g1Gen.Y) + }, + GenFp(), + )) + + properties.Property("[BLS12-39] Affine representation should be independent of a Extended Jacobian representative", prop.ForAll( + func(a fp.Element) bool { + var g g1JacExtended + g.X.Set(&g1Gen.X) + g.Y.Set(&g1Gen.Y) + g.ZZ.Set(&g1Gen.Z) + g.ZZZ.Set(&g1Gen.Z) + gfuzz := fuzzExtendedJacobianG1Affine(&g, a) + + var op1 G1Affine + op1.fromJacExtended(&gfuzz) + return op1.X.Equal(&g1Gen.X) && op1.Y.Equal(&g1Gen.Y) + }, + GenFp(), + )) + + properties.Property("[BLS12-39] Jacobian representation should be the same as the affine representative", prop.ForAll( + func(a fp.Element) bool { + var g G1Jac + var op1 G1Affine + op1.X.Set(&g1Gen.X) + op1.Y.Set(&g1Gen.Y) + + var one fp.Element + one.SetOne() + + g.FromAffine(&op1) + + return g.X.Equal(&g1Gen.X) && g.Y.Equal(&g1Gen.Y) && g.Z.Equal(&one) + }, + GenFp(), + )) + + properties.Property("[BLS12-39] Converting affine symbol for infinity to Jacobian should output correct infinity in Jacobian", prop.ForAll( + func() bool { + var g G1Affine + g.X.SetZero() + g.Y.SetZero() + var op1 G1Jac + op1.FromAffine(&g) + var one, zero fp.Element + one.SetOne() + return op1.X.Equal(&one) && op1.Y.Equal(&one) && op1.Z.Equal(&zero) + }, + )) + + properties.Property("[BLS12-39] Converting infinity in extended Jacobian to affine should output infinity symbol in Affine", prop.ForAll( + func() bool { + var g G1Affine + var op1 g1JacExtended + var zero fp.Element + op1.X.Set(&g1Gen.X) + op1.Y.Set(&g1Gen.Y) + g.fromJacExtended(&op1) + return g.X.Equal(&zero) && g.Y.Equal(&zero) + }, + )) + + properties.Property("[BLS12-39] Converting infinity in extended Jacobian to Jacobian should output infinity in Jacobian", prop.ForAll( + func() bool { + var g G1Jac + var op1 g1JacExtended + var zero, one fp.Element + one.SetOne() + op1.X.Set(&g1Gen.X) + op1.Y.Set(&g1Gen.Y) + g.fromJacExtended(&op1) + return g.X.Equal(&one) && g.Y.Equal(&one) && g.Z.Equal(&zero) + }, + )) + + properties.Property("[BLS12-39] [Jacobian] Two representatives of the same class should be equal", prop.ForAll( + func(a, b fp.Element) bool { + op1 := fuzzJacobianG1Affine(&g1Gen, a) + op2 := fuzzJacobianG1Affine(&g1Gen, b) + return op1.Equal(&op2) + }, + GenFp(), + GenFp(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestG1AffineOps(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + parameters.MinSuccessfulTests = 10 + + properties := gopter.NewProperties(parameters) + + genScalar := GenFr() + + properties.Property("[BLS12-39] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + func(a, b fp.Element) bool { + fop1 := fuzzJacobianG1Affine(&g1Gen, a) + fop2 := fuzzJacobianG1Affine(&g1Gen, b) + var op1, op2 G1Jac + op1.Set(&fop1).AddAssign(&fop2) + op2.Double(&fop2) + return op1.Equal(&op2) + }, + GenFp(), + GenFp(), + )) + + properties.Property("[BLS12-39] [Jacobian] Adding the opposite of a point to itself should output inf", prop.ForAll( + func(a, b fp.Element) bool { + fop1 := fuzzJacobianG1Affine(&g1Gen, a) + fop2 := fuzzJacobianG1Affine(&g1Gen, b) + fop2.Neg(&fop2) + fop1.AddAssign(&fop2) + return fop1.Equal(&g1Infinity) + }, + GenFp(), + GenFp(), + )) + + properties.Property("[BLS12-39] [Jacobian] Adding the inf to a point should not modify the point", prop.ForAll( + func(a fp.Element) bool { + fop1 := fuzzJacobianG1Affine(&g1Gen, a) + fop1.AddAssign(&g1Infinity) + var op2 G1Jac + op2.Set(&g1Infinity) + op2.AddAssign(&g1Gen) + return fop1.Equal(&g1Gen) && op2.Equal(&g1Gen) + }, + GenFp(), + )) + + properties.Property("[BLS12-39] [Jacobian Extended] addMixed (-G) should equal subMixed(G)", prop.ForAll( + func(a fp.Element) bool { + fop1 := fuzzJacobianG1Affine(&g1Gen, a) + var p1, p1Neg G1Affine + p1.FromJacobian(&fop1) + p1Neg = p1 + p1Neg.Y.Neg(&p1Neg.Y) + var o1, o2 g1JacExtended + o1.addMixed(&p1Neg) + o2.subMixed(&p1) + + return o1.X.Equal(&o2.X) && + o1.Y.Equal(&o2.Y) && + o1.ZZ.Equal(&o2.ZZ) && + o1.ZZZ.Equal(&o2.ZZZ) + }, + GenFp(), + )) + + properties.Property("[BLS12-39] [Jacobian Extended] doubleMixed (-G) should equal doubleNegMixed(G)", prop.ForAll( + func(a fp.Element) bool { + fop1 := fuzzJacobianG1Affine(&g1Gen, a) + var p1, p1Neg G1Affine + p1.FromJacobian(&fop1) + p1Neg = p1 + p1Neg.Y.Neg(&p1Neg.Y) + var o1, o2 g1JacExtended + o1.doubleMixed(&p1Neg) + o2.doubleNegMixed(&p1) + + return o1.X.Equal(&o2.X) && + o1.Y.Equal(&o2.Y) && + o1.ZZ.Equal(&o2.ZZ) && + o1.ZZZ.Equal(&o2.ZZZ) + }, + GenFp(), + )) + + properties.Property("[BLS12-39] [Jacobian] Addmix the negation to itself should output 0", prop.ForAll( + func(a fp.Element) bool { + fop1 := fuzzJacobianG1Affine(&g1Gen, a) + fop1.Neg(&fop1) + var op2 G1Affine + op2.FromJacobian(&g1Gen) + fop1.AddMixed(&op2) + return fop1.Equal(&g1Infinity) + }, + GenFp(), + )) + + properties.Property("[BLS12-39] scalar multiplication (double and add) should depend only on the scalar mod r", prop.ForAll( + func(s fr.Element) bool { + + r := fr.Modulus() + var g G1Jac + g.mulGLV(&g1Gen, r) + + var scalar, blindedScalar, rminusone big.Int + var op1, op2, op3, gneg G1Jac + rminusone.SetUint64(1).Sub(r, &rminusone) + op3.mulWindowed(&g1Gen, &rminusone) + gneg.Neg(&g1Gen) + s.ToBigIntRegular(&scalar) + blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) + op1.mulWindowed(&g1Gen, &scalar) + op2.mulWindowed(&g1Gen, &blindedScalar) + + return op1.Equal(&op2) && g.Equal(&g1Infinity) && !op1.Equal(&g1Infinity) && gneg.Equal(&op3) + + }, + genScalar, + )) + + properties.Property("[BLS12-39] scalar multiplication (GLV) should depend only on the scalar mod r", prop.ForAll( + func(s fr.Element) bool { + + r := fr.Modulus() + var g G1Jac + g.mulGLV(&g1Gen, r) + + var scalar, blindedScalar, rminusone big.Int + var op1, op2, op3, gneg G1Jac + rminusone.SetUint64(1).Sub(r, &rminusone) + op3.ScalarMultiplication(&g1Gen, &rminusone) + gneg.Neg(&g1Gen) + s.ToBigIntRegular(&scalar) + blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) + op1.ScalarMultiplication(&g1Gen, &scalar) + op2.ScalarMultiplication(&g1Gen, &blindedScalar) + + return op1.Equal(&op2) && g.Equal(&g1Infinity) && !op1.Equal(&g1Infinity) && gneg.Equal(&op3) + + }, + genScalar, + )) + + properties.Property("[BLS12-39] GLV and Double and Add should output the same result", prop.ForAll( + func(s fr.Element) bool { + + var r big.Int + var op1, op2 G1Jac + s.ToBigIntRegular(&r) + op1.mulWindowed(&g1Gen, &r) + op2.mulGLV(&g1Gen, &r) + return op1.Equal(&op2) && !op1.Equal(&g1Infinity) + + }, + genScalar, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestG1AffineCofactorCleaning(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + properties.Property("[BLS12-39] Clearing the cofactor of a random point should set it in the r-torsion", prop.ForAll( + func() bool { + var a, x, b fp.Element + a.SetRandom() + + x.Square(&a).Mul(&x, &a).Add(&x, &bCurveCoeff) + + for x.Legendre() != 1 { + a.SetRandom() + + x.Square(&a).Mul(&x, &a).Add(&x, &bCurveCoeff) + + } + + b.Sqrt(&x) + var point, pointCleared, infinity G1Jac + point.X.Set(&a) + point.Y.Set(&b) + point.Z.SetOne() + pointCleared.ClearCofactor(&point) + infinity.Set(&g1Infinity) + return point.IsOnCurve() && pointCleared.IsInSubGroup() && !pointCleared.Equal(&infinity) + }, + )) + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestG1AffineBatchScalarMultiplication(t *testing.T) { + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzzShort + } + + properties := gopter.NewProperties(parameters) + + genScalar := GenFr() + + // size of the multiExps + const nbSamples = 10 + + properties.Property("[BLS12-39] BatchScalarMultiplication should be consistant with individual scalar multiplications", prop.ForAll( + func(mixer fr.Element) bool { + // mixer ensures that all the words of a fpElement are set + var sampleScalars [nbSamples]fr.Element + + for i := 1; i <= nbSamples; i++ { + sampleScalars[i-1].SetUint64(uint64(i)). + Mul(&sampleScalars[i-1], &mixer). + FromMont() + } + + result := BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:]) + + if len(result) != len(sampleScalars) { + return false + } + + for i := 0; i < len(result); i++ { + var expectedJac G1Jac + var expected G1Affine + var b big.Int + expectedJac.mulGLV(&g1Gen, sampleScalars[i].ToBigInt(&b)) + expected.FromJacobian(&expectedJac) + if !result[i].Equal(&expected) { + return false + } + } + return true + }, + genScalar, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +// ------------------------------------------------------------ +// benches + +func BenchmarkG1JacIsInSubGroup(b *testing.B) { + var a G1Jac + a.Set(&g1Gen) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.IsInSubGroup() + } + +} + +func BenchmarkG1AffineBatchScalarMul(b *testing.B) { + // ensure every words of the scalars are filled + var mixer fr.Element + mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487") + + const pow = 15 + const nbSamples = 1 << pow + + var sampleScalars [nbSamples]fr.Element + + for i := 1; i <= nbSamples; i++ { + sampleScalars[i-1].SetUint64(uint64(i)). + Mul(&sampleScalars[i-1], &mixer). + FromMont() + } + + for i := 5; i <= pow; i++ { + using := 1 << i + + b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { + b.ResetTimer() + for j := 0; j < b.N; j++ { + _ = BatchScalarMultiplicationG1(&g1GenAff, sampleScalars[:using]) + } + }) + } +} + +func BenchmarkG1JacScalarMul(b *testing.B) { + + var scalar big.Int + r := fr.Modulus() + scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) + scalar.Add(&scalar, r) + + var doubleAndAdd G1Jac + + b.Run("double and add", func(b *testing.B) { + b.ResetTimer() + for j := 0; j < b.N; j++ { + doubleAndAdd.mulWindowed(&g1Gen, &scalar) + } + }) + + var glv G1Jac + b.Run("GLV", func(b *testing.B) { + b.ResetTimer() + for j := 0; j < b.N; j++ { + glv.mulGLV(&g1Gen, &scalar) + } + }) + +} + +func BenchmarkG1AffineCofactorClearing(b *testing.B) { + var a G1Jac + a.Set(&g1Gen) + for i := 0; i < b.N; i++ { + a.ClearCofactor(&a) + } +} + +func BenchmarkG1JacAdd(b *testing.B) { + var a G1Jac + a.Double(&g1Gen) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.AddAssign(&g1Gen) + } +} + +func BenchmarkG1JacAddMixed(b *testing.B) { + var a G1Jac + a.Double(&g1Gen) + + var c G1Affine + c.FromJacobian(&g1Gen) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.AddMixed(&c) + } + +} + +func BenchmarkG1JacDouble(b *testing.B) { + var a G1Jac + a.Set(&g1Gen) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.DoubleAssign() + } + +} + +func BenchmarkG1JacExtAddMixed(b *testing.B) { + var a g1JacExtended + a.doubleMixed(&g1GenAff) + + var c G1Affine + c.FromJacobian(&g1Gen) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.addMixed(&c) + } +} + +func BenchmarkG1JacExtSubMixed(b *testing.B) { + var a g1JacExtended + a.doubleMixed(&g1GenAff) + + var c G1Affine + c.FromJacobian(&g1Gen) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.subMixed(&c) + } +} + +func BenchmarkG1JacExtDoubleMixed(b *testing.B) { + var a g1JacExtended + a.doubleMixed(&g1GenAff) + + var c G1Affine + c.FromJacobian(&g1Gen) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.doubleMixed(&c) + } +} + +func BenchmarkG1JacExtDoubleNegMixed(b *testing.B) { + var a g1JacExtended + a.doubleMixed(&g1GenAff) + + var c G1Affine + c.FromJacobian(&g1Gen) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.doubleNegMixed(&c) + } +} + +func BenchmarkG1JacExtAdd(b *testing.B) { + var a, c g1JacExtended + a.doubleMixed(&g1GenAff) + c.double(&a) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.add(&c) + } +} + +func BenchmarkG1JacExtDouble(b *testing.B) { + var a g1JacExtended + a.doubleMixed(&g1GenAff) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.double(&a) + } +} + +func fuzzJacobianG1Affine(p *G1Jac, f fp.Element) G1Jac { + var res G1Jac + res.X.Mul(&p.X, &f).Mul(&res.X, &f) + res.Y.Mul(&p.Y, &f).Mul(&res.Y, &f).Mul(&res.Y, &f) + res.Z.Mul(&p.Z, &f) + return res +} + +func fuzzExtendedJacobianG1Affine(p *g1JacExtended, f fp.Element) g1JacExtended { + var res g1JacExtended + var ff, fff fp.Element + ff.Square(&f) + fff.Mul(&ff, &f) + res.X.Mul(&p.X, &ff) + res.Y.Mul(&p.Y, &fff) + res.ZZ.Mul(&p.ZZ, &ff) + res.ZZZ.Mul(&p.ZZZ, &fff) + return res +} diff --git a/ecc/bls12-39/g2.go b/ecc/bls12-39/g2.go new file mode 100644 index 0000000000..ee0a94fb94 --- /dev/null +++ b/ecc/bls12-39/g2.go @@ -0,0 +1,978 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package bls1239 + +import ( + "math" + "math/big" + "runtime" + + "github.com/consensys/gnark-crypto/ecc" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/consensys/gnark-crypto/ecc/bls12-39/internal/fptower" + "github.com/consensys/gnark-crypto/internal/parallel" +) + +// G2Affine point in affine coordinates +type G2Affine struct { + X, Y fptower.E2 +} + +// G2Jac is a point with fptower.E2 coordinates +type G2Jac struct { + X, Y, Z fptower.E2 +} + +// g2JacExtended parameterized jacobian coordinates (x=X/ZZ, y=Y/ZZZ, ZZ**3=ZZZ**2) +type g2JacExtended struct { + X, Y, ZZ, ZZZ fptower.E2 +} + +// g2Proj point in projective coordinates +type g2Proj struct { + x, y, z fptower.E2 +} + +// ------------------------------------------------------------------------------------------------- +// Affine + +// Set sets p to the provided point +func (p *G2Affine) Set(a *G2Affine) *G2Affine { + p.X, p.Y = a.X, a.Y + return p +} + +// ScalarMultiplication computes and returns p = a*s +func (p *G2Affine) ScalarMultiplication(a *G2Affine, s *big.Int) *G2Affine { + var _p G2Jac + _p.FromAffine(a) + _p.mulGLV(&_p, s) + p.FromJacobian(&_p) + return p +} + +// Add adds two point in affine coordinates. +// This should rarely be used as it is very inneficient compared to Jacobian +// TODO implement affine addition formula +func (p *G2Affine) Add(a, b *G2Affine) *G2Affine { + var p1, p2 G2Jac + p1.FromAffine(a) + p2.FromAffine(b) + p1.AddAssign(&p2) + p.FromJacobian(&p1) + return p +} + +// Sub subs two point in affine coordinates. +// This should rarely be used as it is very inneficient compared to Jacobian +// TODO implement affine addition formula +func (p *G2Affine) Sub(a, b *G2Affine) *G2Affine { + var p1, p2 G2Jac + p1.FromAffine(a) + p2.FromAffine(b) + p1.SubAssign(&p2) + p.FromJacobian(&p1) + return p +} + +// Equal tests if two points (in Affine coordinates) are equal +func (p *G2Affine) Equal(a *G2Affine) bool { + return p.X.Equal(&a.X) && p.Y.Equal(&a.Y) +} + +// Neg computes -G +func (p *G2Affine) Neg(a *G2Affine) *G2Affine { + p.X = a.X + p.Y.Neg(&a.Y) + return p +} + +// FromJacobian rescale a point in Jacobian coord in z=1 plane +func (p *G2Affine) FromJacobian(p1 *G2Jac) *G2Affine { + + var a, b fptower.E2 + + if p1.Z.IsZero() { + p.X.SetZero() + p.Y.SetZero() + return p + } + + a.Inverse(&p1.Z) + b.Square(&a) + p.X.Mul(&p1.X, &b) + p.Y.Mul(&p1.Y, &b).Mul(&p.Y, &a) + + return p +} + +func (p *G2Affine) String() string { + var x, y fptower.E2 + x.Set(&p.X) + y.Set(&p.Y) + return "E([" + x.String() + "," + y.String() + "])," +} + +// IsInfinity checks if the point is infinity (in affine, it's encoded as (0,0)) +func (p *G2Affine) IsInfinity() bool { + return p.X.IsZero() && p.Y.IsZero() +} + +// IsOnCurve returns true if p in on the curve +func (p *G2Affine) IsOnCurve() bool { + var point G2Jac + point.FromAffine(p) + return point.IsOnCurve() // call this function to handle infinity point +} + +// IsInSubGroup returns true if p is in the correct subgroup, false otherwise +func (p *G2Affine) IsInSubGroup() bool { + var _p G2Jac + _p.FromAffine(p) + return _p.IsInSubGroup() +} + +// ------------------------------------------------------------------------------------------------- +// Jacobian + +// Set sets p to the provided point +func (p *G2Jac) Set(a *G2Jac) *G2Jac { + p.X, p.Y, p.Z = a.X, a.Y, a.Z + return p +} + +// Equal tests if two points (in Jacobian coordinates) are equal +func (p *G2Jac) Equal(a *G2Jac) bool { + + if p.Z.IsZero() && a.Z.IsZero() { + return true + } + _p := G2Affine{} + _p.FromJacobian(p) + + _a := G2Affine{} + _a.FromJacobian(a) + + return _p.X.Equal(&_a.X) && _p.Y.Equal(&_a.Y) +} + +// Neg computes -G +func (p *G2Jac) Neg(a *G2Jac) *G2Jac { + *p = *a + p.Y.Neg(&a.Y) + return p +} + +// SubAssign subtracts two points on the curve +func (p *G2Jac) SubAssign(a *G2Jac) *G2Jac { + var tmp G2Jac + tmp.Set(a) + tmp.Y.Neg(&tmp.Y) + p.AddAssign(&tmp) + return p +} + +// AddAssign point addition in montgomery form +// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl +func (p *G2Jac) AddAssign(a *G2Jac) *G2Jac { + + // p is infinity, return a + if p.Z.IsZero() { + p.Set(a) + return p + } + + // a is infinity, return p + if a.Z.IsZero() { + return p + } + + var Z1Z1, Z2Z2, U1, U2, S1, S2, H, I, J, r, V fptower.E2 + Z1Z1.Square(&a.Z) + Z2Z2.Square(&p.Z) + U1.Mul(&a.X, &Z2Z2) + U2.Mul(&p.X, &Z1Z1) + S1.Mul(&a.Y, &p.Z). + Mul(&S1, &Z2Z2) + S2.Mul(&p.Y, &a.Z). + Mul(&S2, &Z1Z1) + + // if p == a, we double instead + if U1.Equal(&U2) && S1.Equal(&S2) { + return p.DoubleAssign() + } + + H.Sub(&U2, &U1) + I.Double(&H). + Square(&I) + J.Mul(&H, &I) + r.Sub(&S2, &S1).Double(&r) + V.Mul(&U1, &I) + p.X.Square(&r). + Sub(&p.X, &J). + Sub(&p.X, &V). + Sub(&p.X, &V) + p.Y.Sub(&V, &p.X). + Mul(&p.Y, &r) + S1.Mul(&S1, &J).Double(&S1) + p.Y.Sub(&p.Y, &S1) + p.Z.Add(&p.Z, &a.Z) + p.Z.Square(&p.Z). + Sub(&p.Z, &Z1Z1). + Sub(&p.Z, &Z2Z2). + Mul(&p.Z, &H) + + return p +} + +// AddMixed point addition +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-madd-2007-bl +func (p *G2Jac) AddMixed(a *G2Affine) *G2Jac { + + //if a is infinity return p + if a.X.IsZero() && a.Y.IsZero() { + return p + } + // p is infinity, return a + if p.Z.IsZero() { + p.X = a.X + p.Y = a.Y + p.Z.SetOne() + return p + } + + var Z1Z1, U2, S2, H, HH, I, J, r, V fptower.E2 + Z1Z1.Square(&p.Z) + U2.Mul(&a.X, &Z1Z1) + S2.Mul(&a.Y, &p.Z). + Mul(&S2, &Z1Z1) + + // if p == a, we double instead + if U2.Equal(&p.X) && S2.Equal(&p.Y) { + return p.DoubleAssign() + } + + H.Sub(&U2, &p.X) + HH.Square(&H) + I.Double(&HH).Double(&I) + J.Mul(&H, &I) + r.Sub(&S2, &p.Y).Double(&r) + V.Mul(&p.X, &I) + p.X.Square(&r). + Sub(&p.X, &J). + Sub(&p.X, &V). + Sub(&p.X, &V) + J.Mul(&J, &p.Y).Double(&J) + p.Y.Sub(&V, &p.X). + Mul(&p.Y, &r) + p.Y.Sub(&p.Y, &J) + p.Z.Add(&p.Z, &H) + p.Z.Square(&p.Z). + Sub(&p.Z, &Z1Z1). + Sub(&p.Z, &HH) + + return p +} + +// Double doubles a point in Jacobian coordinates +// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2007-bl +func (p *G2Jac) Double(q *G2Jac) *G2Jac { + p.Set(q) + p.DoubleAssign() + return p +} + +// DoubleAssign doubles a point in Jacobian coordinates +// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2007-bl +func (p *G2Jac) DoubleAssign() *G2Jac { + + var XX, YY, YYYY, ZZ, S, M, T fptower.E2 + + XX.Square(&p.X) + YY.Square(&p.Y) + YYYY.Square(&YY) + ZZ.Square(&p.Z) + S.Add(&p.X, &YY) + S.Square(&S). + Sub(&S, &XX). + Sub(&S, &YYYY). + Double(&S) + M.Double(&XX).Add(&M, &XX) + p.Z.Add(&p.Z, &p.Y). + Square(&p.Z). + Sub(&p.Z, &YY). + Sub(&p.Z, &ZZ) + T.Square(&M) + p.X = T + T.Double(&S) + p.X.Sub(&p.X, &T) + p.Y.Sub(&S, &p.X). + Mul(&p.Y, &M) + YYYY.Double(&YYYY).Double(&YYYY).Double(&YYYY) + p.Y.Sub(&p.Y, &YYYY) + + return p +} + +// ScalarMultiplication computes and returns p = a*s +// see https://www.iacr.org/archive/crypto2001/21390189.pdf +func (p *G2Jac) ScalarMultiplication(a *G2Jac, s *big.Int) *G2Jac { + return p.mulGLV(a, s) +} + +func (p *G2Jac) String() string { + if p.Z.IsZero() { + return "O" + } + _p := G2Affine{} + _p.FromJacobian(p) + return "E([" + _p.X.String() + "," + _p.Y.String() + "])," +} + +// FromAffine sets p = Q, p in Jacboian, Q in affine +func (p *G2Jac) FromAffine(Q *G2Affine) *G2Jac { + if Q.X.IsZero() && Q.Y.IsZero() { + p.Z.SetZero() + p.X.SetOne() + p.Y.SetOne() + return p + } + p.Z.SetOne() + p.X.Set(&Q.X) + p.Y.Set(&Q.Y) + return p +} + +// IsOnCurve returns true if p in on the curve +func (p *G2Jac) IsOnCurve() bool { + var left, right, tmp fptower.E2 + left.Square(&p.Y) + right.Square(&p.X).Mul(&right, &p.X) + tmp.Square(&p.Z). + Square(&tmp). + Mul(&tmp, &p.Z). + Mul(&tmp, &p.Z). + Mul(&tmp, &bTwistCurveCoeff) + right.Add(&right, &tmp) + return left.Equal(&right) +} + +// https://eprint.iacr.org/2021/1130.pdf, sec.4 +// psi(p) = u*P +func (p *G2Jac) IsInSubGroup() bool { + var res, tmp G2Jac + tmp.psi(p) + res.ScalarMultiplication(p, &xGen). + SubAssign(&tmp) + + return res.IsOnCurve() && res.Z.IsZero() +} + +// mulWindowed 2-bits windowed exponentiation +func (p *G2Jac) mulWindowed(a *G2Jac, s *big.Int) *G2Jac { + + var res G2Jac + var ops [3]G2Jac + + res.Set(&g2Infinity) + ops[0].Set(a) + ops[1].Double(&ops[0]) + ops[2].Set(&ops[0]).AddAssign(&ops[1]) + + b := s.Bytes() + for i := range b { + w := b[i] + mask := byte(0xc0) + for j := 0; j < 4; j++ { + res.DoubleAssign().DoubleAssign() + c := (w & mask) >> (6 - 2*j) + if c != 0 { + res.AddAssign(&ops[c-1]) + } + mask = mask >> 2 + } + } + p.Set(&res) + + return p + +} + +// psi(p) = u o frob o u**-1 where u:E'->E iso from the twist to E +func (p *G2Jac) psi(a *G2Jac) *G2Jac { + p.Set(a) + p.X.Conjugate(&p.X).Mul(&p.X, &endo.u) + p.Y.Conjugate(&p.Y).Mul(&p.Y, &endo.v) + p.Z.Conjugate(&p.Z) + return p +} + +// phi assigns p to phi(a) where phi: (x,y)->(ux,y), and returns p +func (p *G2Jac) phi(a *G2Jac) *G2Jac { + p.Set(a) + p.X.MulByElement(&p.X, &thirdRootOneG2) + return p +} + +// mulGLV performs scalar multiplication using GLV +// see https://www.iacr.org/archive/crypto2001/21390189.pdf +func (p *G2Jac) mulGLV(a *G2Jac, s *big.Int) *G2Jac { + + var table [15]G2Jac + var res G2Jac + var k1, k2 fr.Element + + res.Set(&g2Infinity) + + // table[b3b2b1b0-1] = b3b2*phi(a) + b1b0*a + table[0].Set(a) + table[3].phi(a) + + // split the scalar, modifies +-a, phi(a) accordingly + k := ecc.SplitScalar(s, &glvBasis) + + if k[0].Sign() == -1 { + k[0].Neg(&k[0]) + table[0].Neg(&table[0]) + } + if k[1].Sign() == -1 { + k[1].Neg(&k[1]) + table[3].Neg(&table[3]) + } + + // precompute table (2 bits sliding window) + // table[b3b2b1b0-1] = b3b2*phi(a) + b1b0*a if b3b2b1b0 != 0 + table[1].Double(&table[0]) + table[2].Set(&table[1]).AddAssign(&table[0]) + table[4].Set(&table[3]).AddAssign(&table[0]) + table[5].Set(&table[3]).AddAssign(&table[1]) + table[6].Set(&table[3]).AddAssign(&table[2]) + table[7].Double(&table[3]) + table[8].Set(&table[7]).AddAssign(&table[0]) + table[9].Set(&table[7]).AddAssign(&table[1]) + table[10].Set(&table[7]).AddAssign(&table[2]) + table[11].Set(&table[7]).AddAssign(&table[3]) + table[12].Set(&table[11]).AddAssign(&table[0]) + table[13].Set(&table[11]).AddAssign(&table[1]) + table[14].Set(&table[11]).AddAssign(&table[2]) + + // bounds on the lattice base vectors guarantee that k1, k2 are len(r)/2 bits long max + k1.SetBigInt(&k[0]).FromMont() + k2.SetBigInt(&k[1]).FromMont() + + // loop starts from len(k1)/2 due to the bounds + for i := int(math.Ceil(fr.Limbs/2. - 1)); i >= 0; i-- { + mask := uint64(3) << 62 + for j := 0; j < 32; j++ { + res.Double(&res).Double(&res) + b1 := (k1[i] & mask) >> (62 - 2*j) + b2 := (k2[i] & mask) >> (62 - 2*j) + if b1|b2 != 0 { + s := (b2<<2 | b1) + res.AddAssign(&table[s-1]) + } + mask = mask >> 2 + } + } + + p.Set(&res) + return p +} + +// ClearCofactor maps a point in curve to r-torsion +func (p *G2Affine) ClearCofactor(a *G2Affine) *G2Affine { + var _p G2Jac + _p.FromAffine(a) + _p.ClearCofactor(&_p) + p.FromJacobian(&_p) + return p +} + +// ClearCofactor maps a point in curve to r-torsion +func (p *G2Jac) ClearCofactor(a *G2Jac) *G2Jac { + // https://eprint.iacr.org/2017/419.pdf, 4.1 + var xg, xxg, res, t G2Jac + xg.ScalarMultiplication(a, &xGen) + xxg.ScalarMultiplication(&xg, &xGen) + + res.Set(&xxg). + SubAssign(&xg). + SubAssign(a) + + t.Set(&xg). + SubAssign(a). + psi(&t) + + res.AddAssign(&t) + + t.Double(a) + t.X.MulByElement(&t.X, &thirdRootOneG1) + + res.SubAssign(&t) + + p.Set(&res) + + return p + +} + +// ------------------------------------------------------------------------------------------------- +// Jacobian extended + +// Set sets p to the provided point +func (p *g2JacExtended) Set(a *g2JacExtended) *g2JacExtended { + p.X, p.Y, p.ZZ, p.ZZZ = a.X, a.Y, a.ZZ, a.ZZZ + return p +} + +// setInfinity sets p to O +func (p *g2JacExtended) setInfinity() *g2JacExtended { + p.X.SetOne() + p.Y.SetOne() + p.ZZ = fptower.E2{} + p.ZZZ = fptower.E2{} + return p +} + +// fromJacExtended sets Q in affine coords +func (p *G2Affine) fromJacExtended(Q *g2JacExtended) *G2Affine { + if Q.ZZ.IsZero() { + p.X = fptower.E2{} + p.Y = fptower.E2{} + return p + } + p.X.Inverse(&Q.ZZ).Mul(&p.X, &Q.X) + p.Y.Inverse(&Q.ZZZ).Mul(&p.Y, &Q.Y) + return p +} + +// fromJacExtended sets Q in Jacobian coords +func (p *G2Jac) fromJacExtended(Q *g2JacExtended) *G2Jac { + if Q.ZZ.IsZero() { + p.Set(&g2Infinity) + return p + } + p.X.Mul(&Q.ZZ, &Q.X).Mul(&p.X, &Q.ZZ) + p.Y.Mul(&Q.ZZZ, &Q.Y).Mul(&p.Y, &Q.ZZZ) + p.Z.Set(&Q.ZZZ) + return p +} + +// unsafeFromJacExtended sets p in jacobian coords, but don't check for infinity +func (p *G2Jac) unsafeFromJacExtended(Q *g2JacExtended) *G2Jac { + p.X.Square(&Q.ZZ).Mul(&p.X, &Q.X) + p.Y.Square(&Q.ZZZ).Mul(&p.Y, &Q.Y) + p.Z = Q.ZZZ + return p +} + +// add point in ZZ coords +// https://www.hyperelliptic.org/EFD/g1p/auto-shortw-xyzz.html#addition-add-2008-s +func (p *g2JacExtended) add(q *g2JacExtended) *g2JacExtended { + //if q is infinity return p + if q.ZZ.IsZero() { + return p + } + // p is infinity, return q + if p.ZZ.IsZero() { + p.Set(q) + return p + } + + var A, B, X1ZZ2, X2ZZ1, Y1ZZZ2, Y2ZZZ1 fptower.E2 + + // p2: q, p1: p + X2ZZ1.Mul(&q.X, &p.ZZ) + X1ZZ2.Mul(&p.X, &q.ZZ) + A.Sub(&X2ZZ1, &X1ZZ2) + Y2ZZZ1.Mul(&q.Y, &p.ZZZ) + Y1ZZZ2.Mul(&p.Y, &q.ZZZ) + B.Sub(&Y2ZZZ1, &Y1ZZZ2) + + if A.IsZero() { + if B.IsZero() { + return p.double(q) + + } + p.ZZ = fptower.E2{} + p.ZZZ = fptower.E2{} + return p + } + + var U1, U2, S1, S2, P, R, PP, PPP, Q, V fptower.E2 + U1.Mul(&p.X, &q.ZZ) + U2.Mul(&q.X, &p.ZZ) + S1.Mul(&p.Y, &q.ZZZ) + S2.Mul(&q.Y, &p.ZZZ) + P.Sub(&U2, &U1) + R.Sub(&S2, &S1) + PP.Square(&P) + PPP.Mul(&P, &PP) + Q.Mul(&U1, &PP) + V.Mul(&S1, &PPP) + + p.X.Square(&R). + Sub(&p.X, &PPP). + Sub(&p.X, &Q). + Sub(&p.X, &Q) + p.Y.Sub(&Q, &p.X). + Mul(&p.Y, &R). + Sub(&p.Y, &V) + p.ZZ.Mul(&p.ZZ, &q.ZZ). + Mul(&p.ZZ, &PP) + p.ZZZ.Mul(&p.ZZZ, &q.ZZZ). + Mul(&p.ZZZ, &PPP) + + return p +} + +// double point in ZZ coords +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-xyzz.html#doubling-dbl-2008-s-1 +func (p *g2JacExtended) double(q *g2JacExtended) *g2JacExtended { + var U, V, W, S, XX, M fptower.E2 + + U.Double(&q.Y) + V.Square(&U) + W.Mul(&U, &V) + S.Mul(&q.X, &V) + XX.Square(&q.X) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + U.Mul(&W, &q.Y) + + p.X.Square(&M). + Sub(&p.X, &S). + Sub(&p.X, &S) + p.Y.Sub(&S, &p.X). + Mul(&p.Y, &M). + Sub(&p.Y, &U) + p.ZZ.Mul(&V, &q.ZZ) + p.ZZZ.Mul(&W, &q.ZZZ) + + return p +} + +// subMixed same as addMixed, but will negate a.Y +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-xyzz.html#addition-madd-2008-s +func (p *g2JacExtended) subMixed(a *G2Affine) *g2JacExtended { + + //if a is infinity return p + if a.X.IsZero() && a.Y.IsZero() { + return p + } + // p is infinity, return a + if p.ZZ.IsZero() { + p.X = a.X + p.Y.Neg(&a.Y) + p.ZZ.SetOne() + p.ZZZ.SetOne() + return p + } + + var P, R fptower.E2 + + // p2: a, p1: p + P.Mul(&a.X, &p.ZZ) + P.Sub(&P, &p.X) + + R.Mul(&a.Y, &p.ZZZ) + R.Neg(&R) + R.Sub(&R, &p.Y) + + if P.IsZero() { + if R.IsZero() { + return p.doubleNegMixed(a) + + } + p.ZZ = fptower.E2{} + p.ZZZ = fptower.E2{} + return p + } + + var PP, PPP, Q, Q2, RR, X3, Y3 fptower.E2 + + PP.Square(&P) + PPP.Mul(&P, &PP) + Q.Mul(&p.X, &PP) + RR.Square(&R) + X3.Sub(&RR, &PPP) + Q2.Double(&Q) + p.X.Sub(&X3, &Q2) + Y3.Sub(&Q, &p.X).Mul(&Y3, &R) + R.Mul(&p.Y, &PPP) + p.Y.Sub(&Y3, &R) + p.ZZ.Mul(&p.ZZ, &PP) + p.ZZZ.Mul(&p.ZZZ, &PPP) + + return p + +} + +// addMixed +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-xyzz.html#addition-madd-2008-s +func (p *g2JacExtended) addMixed(a *G2Affine) *g2JacExtended { + + //if a is infinity return p + if a.X.IsZero() && a.Y.IsZero() { + return p + } + // p is infinity, return a + if p.ZZ.IsZero() { + p.X = a.X + p.Y = a.Y + p.ZZ.SetOne() + p.ZZZ.SetOne() + return p + } + + var P, R fptower.E2 + + // p2: a, p1: p + P.Mul(&a.X, &p.ZZ) + P.Sub(&P, &p.X) + + R.Mul(&a.Y, &p.ZZZ) + R.Sub(&R, &p.Y) + + if P.IsZero() { + if R.IsZero() { + return p.doubleMixed(a) + + } + p.ZZ = fptower.E2{} + p.ZZZ = fptower.E2{} + return p + } + + var PP, PPP, Q, Q2, RR, X3, Y3 fptower.E2 + + PP.Square(&P) + PPP.Mul(&P, &PP) + Q.Mul(&p.X, &PP) + RR.Square(&R) + X3.Sub(&RR, &PPP) + Q2.Double(&Q) + p.X.Sub(&X3, &Q2) + Y3.Sub(&Q, &p.X).Mul(&Y3, &R) + R.Mul(&p.Y, &PPP) + p.Y.Sub(&Y3, &R) + p.ZZ.Mul(&p.ZZ, &PP) + p.ZZZ.Mul(&p.ZZZ, &PPP) + + return p + +} + +// doubleNegMixed same as double, but will negate q.Y +func (p *g2JacExtended) doubleNegMixed(q *G2Affine) *g2JacExtended { + + var U, V, W, S, XX, M, S2, L fptower.E2 + + U.Double(&q.Y) + U.Neg(&U) + V.Square(&U) + W.Mul(&U, &V) + S.Mul(&q.X, &V) + XX.Square(&q.X) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + S2.Double(&S) + L.Mul(&W, &q.Y) + + p.X.Square(&M). + Sub(&p.X, &S2) + p.Y.Sub(&S, &p.X). + Mul(&p.Y, &M). + Add(&p.Y, &L) + p.ZZ.Set(&V) + p.ZZZ.Set(&W) + + return p +} + +// doubleMixed point in ZZ coords +// http://www.hyperelliptic.org/EFD/g1p/auto-shortw-xyzz.html#doubling-dbl-2008-s-1 +func (p *g2JacExtended) doubleMixed(q *G2Affine) *g2JacExtended { + + var U, V, W, S, XX, M, S2, L fptower.E2 + + U.Double(&q.Y) + V.Square(&U) + W.Mul(&U, &V) + S.Mul(&q.X, &V) + XX.Square(&q.X) + M.Double(&XX). + Add(&M, &XX) // -> + a, but a=0 here + S2.Double(&S) + L.Mul(&W, &q.Y) + + p.X.Square(&M). + Sub(&p.X, &S2) + p.Y.Sub(&S, &p.X). + Mul(&p.Y, &M). + Sub(&p.Y, &L) + p.ZZ.Set(&V) + p.ZZZ.Set(&W) + + return p +} + +// ------------------------------------------------------------------------------------------------- +// Homogenous projective + +// Set sets p to the provided point +func (p *g2Proj) Set(a *g2Proj) *g2Proj { + p.x, p.y, p.z = a.x, a.y, a.z + return p +} + +// Neg computes -G +func (p *g2Proj) Neg(a *g2Proj) *g2Proj { + *p = *a + p.y.Neg(&a.y) + return p +} + +// FromJacobian converts a point from Jacobian to projective coordinates +func (p *g2Proj) FromJacobian(Q *G2Jac) *g2Proj { + var buf fptower.E2 + buf.Square(&Q.Z) + + p.x.Mul(&Q.X, &Q.Z) + p.y.Set(&Q.Y) + p.z.Mul(&Q.Z, &buf) + + return p +} + +// FromAffine sets p = Q, p in homogenous projective, Q in affine +func (p *g2Proj) FromAffine(Q *G2Affine) *g2Proj { + if Q.X.IsZero() && Q.Y.IsZero() { + p.z.SetZero() + p.x.SetOne() + p.y.SetOne() + return p + } + p.z.SetOne() + p.x.Set(&Q.X) + p.y.Set(&Q.Y) + return p +} + +// BatchScalarMultiplicationG2 multiplies the same base (generator) by all scalars +// and return resulting points in affine coordinates +// uses a simple windowed-NAF like exponentiation algorithm +func BatchScalarMultiplicationG2(base *G2Affine, scalars []fr.Element) []G2Affine { + + // approximate cost in group ops is + // cost = 2^{c-1} + n(scalar.nbBits+nbChunks) + + nbPoints := uint64(len(scalars)) + min := ^uint64(0) + bestC := 0 + for c := 2; c < 18; c++ { + cost := uint64(1 << (c - 1)) + nbChunks := uint64(fr.Limbs * 64 / c) + if (fr.Limbs*64)%c != 0 { + nbChunks++ + } + cost += nbPoints * ((fr.Limbs * 64) + nbChunks) + if cost < min { + min = cost + bestC = c + } + } + c := uint64(bestC) // window size + nbChunks := int(fr.Limbs * 64 / c) + if (fr.Limbs*64)%c != 0 { + nbChunks++ + } + mask := uint64((1 << c) - 1) // low c bits are 1 + msbWindow := uint64(1 << (c - 1)) + + // precompute all powers of base for our window + // note here that if performance is critical, we can implement as in the msmX methods + // this allocation to be on the stack + baseTable := make([]G2Jac, (1 << (c - 1))) + baseTable[0].Set(&g2Infinity) + baseTable[0].AddMixed(base) + for i := 1; i < len(baseTable); i++ { + baseTable[i] = baseTable[i-1] + baseTable[i].AddMixed(base) + } + + pScalars, _ := partitionScalars(scalars, c, false, runtime.NumCPU()) + + // compute offset and word selector / shift to select the right bits of our windows + selectors := make([]selector, nbChunks) + for chunk := 0; chunk < nbChunks; chunk++ { + jc := uint64(uint64(chunk) * c) + d := selector{} + d.index = jc / 64 + d.shift = jc - (d.index * 64) + d.mask = mask << d.shift + d.multiWordSelect = (64%c) != 0 && d.shift > (64-c) && d.index < (fr.Limbs-1) + if d.multiWordSelect { + nbBitsHigh := d.shift - uint64(64-c) + d.maskHigh = (1 << nbBitsHigh) - 1 + d.shiftHigh = (c - nbBitsHigh) + } + selectors[chunk] = d + } + toReturn := make([]G2Affine, len(scalars)) + + // for each digit, take value in the base table, double it c time, voila. + parallel.Execute(len(pScalars), func(start, end int) { + var p G2Jac + for i := start; i < end; i++ { + p.Set(&g2Infinity) + for chunk := nbChunks - 1; chunk >= 0; chunk-- { + s := selectors[chunk] + if chunk != nbChunks-1 { + for j := uint64(0); j < c; j++ { + p.DoubleAssign() + } + } + + bits := (pScalars[i][s.index] & s.mask) >> s.shift + if s.multiWordSelect { + bits += (pScalars[i][s.index+1] & s.maskHigh) << s.shiftHigh + } + + if bits == 0 { + continue + } + + // if msbWindow bit is set, we need to substract + if bits&msbWindow == 0 { + // add + p.AddAssign(&baseTable[bits-1]) + } else { + // sub + t := baseTable[bits & ^msbWindow] + t.Neg(&t) + p.AddAssign(&t) + } + } + + // set our result point + toReturn[i].FromJacobian(&p) + + } + }) + return toReturn +} diff --git a/ecc/bls12-39/g2_test.go b/ecc/bls12-39/g2_test.go new file mode 100644 index 0000000000..4d76d97df6 --- /dev/null +++ b/ecc/bls12-39/g2_test.go @@ -0,0 +1,720 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package bls1239 + +import ( + "fmt" + "math/big" + "testing" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/internal/fptower" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/prop" +) + +func TestG2AffineEndomorphism(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + properties.Property("[BLS12-39] check that phi(P) = lambdaGLV * P", prop.ForAll( + func(a fptower.E2) bool { + var p, res1, res2 G2Jac + g := MapToCurveG2Svdw(a) + p.FromAffine(&g) + res1.phi(&p) + res2.mulWindowed(&p, &lambdaGLV) + + return p.IsInSubGroup() && res1.Equal(&res2) + }, + GenE2(), + )) + + properties.Property("[BLS12-39] check that phi^2(P) + phi(P) + P = 0", prop.ForAll( + func(a fptower.E2) bool { + var p, res, tmp G2Jac + g := MapToCurveG2Svdw(a) + p.FromAffine(&g) + tmp.phi(&p) + res.phi(&tmp). + AddAssign(&tmp). + AddAssign(&p) + + return res.Z.IsZero() + }, + GenE2(), + )) + + properties.Property("[BLS12-39] check that psi^2(P) = -phi(P)", prop.ForAll( + func(a fptower.E2) bool { + var p, res1, res2 G2Jac + g := MapToCurveG2Svdw(a) + p.FromAffine(&g) + res1.psi(&p).psi(&res1).Neg(&res1) + res2.Set(&p) + res2.X.MulByElement(&res2.X, &thirdRootOneG1) + + return p.IsInSubGroup() && res1.Equal(&res2) + }, + GenE2(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestMapToCurveG2(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + properties.Property("[G2] Svsw mapping should output point on the curve", prop.ForAll( + func(a fptower.E2) bool { + g := MapToCurveG2Svdw(a) + return g.IsInSubGroup() + }, + GenE2(), + )) + + properties.Property("[G2] Svsw mapping should be deterministic", prop.ForAll( + func(a fptower.E2) bool { + g1 := MapToCurveG2Svdw(a) + g2 := MapToCurveG2Svdw(a) + return g1.Equal(&g2) + }, + GenE2(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestG2AffineIsOnCurve(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + properties.Property("[BLS12-39] g2Gen (affine) should be on the curve", prop.ForAll( + func(a fptower.E2) bool { + var op1, op2 G2Affine + op1.FromJacobian(&g2Gen) + op2.FromJacobian(&g2Gen) + op2.Y.Mul(&op2.Y, &a) + return op1.IsOnCurve() && !op2.IsOnCurve() + }, + GenE2(), + )) + + properties.Property("[BLS12-39] g2Gen (Jacobian) should be on the curve", prop.ForAll( + func(a fptower.E2) bool { + var op1, op2, op3 G2Jac + op1.Set(&g2Gen) + op3.Set(&g2Gen) + + op2 = fuzzJacobianG2Affine(&g2Gen, a) + op3.Y.Mul(&op3.Y, &a) + return op1.IsOnCurve() && op2.IsOnCurve() && !op3.IsOnCurve() + }, + GenE2(), + )) + + properties.Property("[BLS12-39] IsInSubGroup and MulBy subgroup order should be the same", prop.ForAll( + func(a fptower.E2) bool { + var op1, op2 G2Jac + op1 = fuzzJacobianG2Affine(&g2Gen, a) + _r := fr.Modulus() + op2.ScalarMultiplication(&op1, _r) + return op1.IsInSubGroup() && op2.Z.IsZero() + }, + GenE2(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestG2AffineConversions(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + properties.Property("[BLS12-39] Affine representation should be independent of the Jacobian representative", prop.ForAll( + func(a fptower.E2) bool { + g := fuzzJacobianG2Affine(&g2Gen, a) + var op1 G2Affine + op1.FromJacobian(&g) + return op1.X.Equal(&g2Gen.X) && op1.Y.Equal(&g2Gen.Y) + }, + GenE2(), + )) + + properties.Property("[BLS12-39] Affine representation should be independent of a Extended Jacobian representative", prop.ForAll( + func(a fptower.E2) bool { + var g g2JacExtended + g.X.Set(&g2Gen.X) + g.Y.Set(&g2Gen.Y) + g.ZZ.Set(&g2Gen.Z) + g.ZZZ.Set(&g2Gen.Z) + gfuzz := fuzzExtendedJacobianG2Affine(&g, a) + + var op1 G2Affine + op1.fromJacExtended(&gfuzz) + return op1.X.Equal(&g2Gen.X) && op1.Y.Equal(&g2Gen.Y) + }, + GenE2(), + )) + + properties.Property("[BLS12-39] Jacobian representation should be the same as the affine representative", prop.ForAll( + func(a fptower.E2) bool { + var g G2Jac + var op1 G2Affine + op1.X.Set(&g2Gen.X) + op1.Y.Set(&g2Gen.Y) + + var one fptower.E2 + one.SetOne() + + g.FromAffine(&op1) + + return g.X.Equal(&g2Gen.X) && g.Y.Equal(&g2Gen.Y) && g.Z.Equal(&one) + }, + GenE2(), + )) + + properties.Property("[BLS12-39] Converting affine symbol for infinity to Jacobian should output correct infinity in Jacobian", prop.ForAll( + func() bool { + var g G2Affine + g.X.SetZero() + g.Y.SetZero() + var op1 G2Jac + op1.FromAffine(&g) + var one, zero fptower.E2 + one.SetOne() + return op1.X.Equal(&one) && op1.Y.Equal(&one) && op1.Z.Equal(&zero) + }, + )) + + properties.Property("[BLS12-39] Converting infinity in extended Jacobian to affine should output infinity symbol in Affine", prop.ForAll( + func() bool { + var g G2Affine + var op1 g2JacExtended + var zero fptower.E2 + op1.X.Set(&g2Gen.X) + op1.Y.Set(&g2Gen.Y) + g.fromJacExtended(&op1) + return g.X.Equal(&zero) && g.Y.Equal(&zero) + }, + )) + + properties.Property("[BLS12-39] Converting infinity in extended Jacobian to Jacobian should output infinity in Jacobian", prop.ForAll( + func() bool { + var g G2Jac + var op1 g2JacExtended + var zero, one fptower.E2 + one.SetOne() + op1.X.Set(&g2Gen.X) + op1.Y.Set(&g2Gen.Y) + g.fromJacExtended(&op1) + return g.X.Equal(&one) && g.Y.Equal(&one) && g.Z.Equal(&zero) + }, + )) + + properties.Property("[BLS12-39] [Jacobian] Two representatives of the same class should be equal", prop.ForAll( + func(a, b fptower.E2) bool { + op1 := fuzzJacobianG2Affine(&g2Gen, a) + op2 := fuzzJacobianG2Affine(&g2Gen, b) + return op1.Equal(&op2) + }, + GenE2(), + GenE2(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestG2AffineOps(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + parameters.MinSuccessfulTests = 10 + + properties := gopter.NewProperties(parameters) + + genScalar := GenFr() + + properties.Property("[BLS12-39] [Jacobian] Add should call double when having adding the same point", prop.ForAll( + func(a, b fptower.E2) bool { + fop1 := fuzzJacobianG2Affine(&g2Gen, a) + fop2 := fuzzJacobianG2Affine(&g2Gen, b) + var op1, op2 G2Jac + op1.Set(&fop1).AddAssign(&fop2) + op2.Double(&fop2) + return op1.Equal(&op2) + }, + GenE2(), + GenE2(), + )) + + properties.Property("[BLS12-39] [Jacobian] Adding the opposite of a point to itself should output inf", prop.ForAll( + func(a, b fptower.E2) bool { + fop1 := fuzzJacobianG2Affine(&g2Gen, a) + fop2 := fuzzJacobianG2Affine(&g2Gen, b) + fop2.Neg(&fop2) + fop1.AddAssign(&fop2) + return fop1.Equal(&g2Infinity) + }, + GenE2(), + GenE2(), + )) + + properties.Property("[BLS12-39] [Jacobian] Adding the inf to a point should not modify the point", prop.ForAll( + func(a fptower.E2) bool { + fop1 := fuzzJacobianG2Affine(&g2Gen, a) + fop1.AddAssign(&g2Infinity) + var op2 G2Jac + op2.Set(&g2Infinity) + op2.AddAssign(&g2Gen) + return fop1.Equal(&g2Gen) && op2.Equal(&g2Gen) + }, + GenE2(), + )) + + properties.Property("[BLS12-39] [Jacobian Extended] addMixed (-G) should equal subMixed(G)", prop.ForAll( + func(a fptower.E2) bool { + fop1 := fuzzJacobianG2Affine(&g2Gen, a) + var p1, p1Neg G2Affine + p1.FromJacobian(&fop1) + p1Neg = p1 + p1Neg.Y.Neg(&p1Neg.Y) + var o1, o2 g2JacExtended + o1.addMixed(&p1Neg) + o2.subMixed(&p1) + + return o1.X.Equal(&o2.X) && + o1.Y.Equal(&o2.Y) && + o1.ZZ.Equal(&o2.ZZ) && + o1.ZZZ.Equal(&o2.ZZZ) + }, + GenE2(), + )) + + properties.Property("[BLS12-39] [Jacobian Extended] doubleMixed (-G) should equal doubleNegMixed(G)", prop.ForAll( + func(a fptower.E2) bool { + fop1 := fuzzJacobianG2Affine(&g2Gen, a) + var p1, p1Neg G2Affine + p1.FromJacobian(&fop1) + p1Neg = p1 + p1Neg.Y.Neg(&p1Neg.Y) + var o1, o2 g2JacExtended + o1.doubleMixed(&p1Neg) + o2.doubleNegMixed(&p1) + + return o1.X.Equal(&o2.X) && + o1.Y.Equal(&o2.Y) && + o1.ZZ.Equal(&o2.ZZ) && + o1.ZZZ.Equal(&o2.ZZZ) + }, + GenE2(), + )) + + properties.Property("[BLS12-39] [Jacobian] Addmix the negation to itself should output 0", prop.ForAll( + func(a fptower.E2) bool { + fop1 := fuzzJacobianG2Affine(&g2Gen, a) + fop1.Neg(&fop1) + var op2 G2Affine + op2.FromJacobian(&g2Gen) + fop1.AddMixed(&op2) + return fop1.Equal(&g2Infinity) + }, + GenE2(), + )) + + properties.Property("[BLS12-39] scalar multiplication (double and add) should depend only on the scalar mod r", prop.ForAll( + func(s fr.Element) bool { + + r := fr.Modulus() + var g G2Jac + g.mulGLV(&g2Gen, r) + + var scalar, blindedScalar, rminusone big.Int + var op1, op2, op3, gneg G2Jac + rminusone.SetUint64(1).Sub(r, &rminusone) + op3.mulWindowed(&g2Gen, &rminusone) + gneg.Neg(&g2Gen) + s.ToBigIntRegular(&scalar) + blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) + op1.mulWindowed(&g2Gen, &scalar) + op2.mulWindowed(&g2Gen, &blindedScalar) + + return op1.Equal(&op2) && g.Equal(&g2Infinity) && !op1.Equal(&g2Infinity) && gneg.Equal(&op3) + + }, + genScalar, + )) + + properties.Property("[BLS12-39] psi should map points from E' to itself", prop.ForAll( + func() bool { + var a G2Jac + a.psi(&g2Gen) + return a.IsOnCurve() && !a.Equal(&g2Gen) + }, + )) + + properties.Property("[BLS12-39] scalar multiplication (GLV) should depend only on the scalar mod r", prop.ForAll( + func(s fr.Element) bool { + + r := fr.Modulus() + var g G2Jac + g.mulGLV(&g2Gen, r) + + var scalar, blindedScalar, rminusone big.Int + var op1, op2, op3, gneg G2Jac + rminusone.SetUint64(1).Sub(r, &rminusone) + op3.ScalarMultiplication(&g2Gen, &rminusone) + gneg.Neg(&g2Gen) + s.ToBigIntRegular(&scalar) + blindedScalar.Mul(&scalar, r).Add(&blindedScalar, &scalar) + op1.ScalarMultiplication(&g2Gen, &scalar) + op2.ScalarMultiplication(&g2Gen, &blindedScalar) + + return op1.Equal(&op2) && g.Equal(&g2Infinity) && !op1.Equal(&g2Infinity) && gneg.Equal(&op3) + + }, + genScalar, + )) + + properties.Property("[BLS12-39] GLV and Double and Add should output the same result", prop.ForAll( + func(s fr.Element) bool { + + var r big.Int + var op1, op2 G2Jac + s.ToBigIntRegular(&r) + op1.mulWindowed(&g2Gen, &r) + op2.mulGLV(&g2Gen, &r) + return op1.Equal(&op2) && !op1.Equal(&g2Infinity) + + }, + genScalar, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestG2AffineCofactorCleaning(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + properties.Property("[BLS12-39] Clearing the cofactor of a random point should set it in the r-torsion", prop.ForAll( + func() bool { + var a, x, b fptower.E2 + a.SetRandom() + + x.Square(&a).Mul(&x, &a).Add(&x, &bTwistCurveCoeff) + for x.Legendre() != 1 { + a.SetRandom() + x.Square(&a).Mul(&x, &a).Add(&x, &bTwistCurveCoeff) + } + + b.Sqrt(&x) + var point, pointCleared, infinity G2Jac + point.X.Set(&a) + point.Y.Set(&b) + point.Z.SetOne() + pointCleared.ClearCofactor(&point) + infinity.Set(&g2Infinity) + return point.IsOnCurve() && pointCleared.IsInSubGroup() && !pointCleared.Equal(&infinity) + }, + )) + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestG2AffineBatchScalarMultiplication(t *testing.T) { + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzzShort + } + + properties := gopter.NewProperties(parameters) + + genScalar := GenFr() + + // size of the multiExps + const nbSamples = 10 + + properties.Property("[BLS12-39] BatchScalarMultiplication should be consistant with individual scalar multiplications", prop.ForAll( + func(mixer fr.Element) bool { + // mixer ensures that all the words of a fpElement are set + var sampleScalars [nbSamples]fr.Element + + for i := 1; i <= nbSamples; i++ { + sampleScalars[i-1].SetUint64(uint64(i)). + Mul(&sampleScalars[i-1], &mixer). + FromMont() + } + + result := BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:]) + + if len(result) != len(sampleScalars) { + return false + } + + for i := 0; i < len(result); i++ { + var expectedJac G2Jac + var expected G2Affine + var b big.Int + expectedJac.mulGLV(&g2Gen, sampleScalars[i].ToBigInt(&b)) + expected.FromJacobian(&expectedJac) + if !result[i].Equal(&expected) { + return false + } + } + return true + }, + genScalar, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +// ------------------------------------------------------------ +// benches + +func BenchmarkG2JacIsInSubGroup(b *testing.B) { + var a G2Jac + a.Set(&g2Gen) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.IsInSubGroup() + } + +} + +func BenchmarkG2AffineBatchScalarMul(b *testing.B) { + // ensure every words of the scalars are filled + var mixer fr.Element + mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487") + + const pow = 15 + const nbSamples = 1 << pow + + var sampleScalars [nbSamples]fr.Element + + for i := 1; i <= nbSamples; i++ { + sampleScalars[i-1].SetUint64(uint64(i)). + Mul(&sampleScalars[i-1], &mixer). + FromMont() + } + + for i := 5; i <= pow; i++ { + using := 1 << i + + b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { + b.ResetTimer() + for j := 0; j < b.N; j++ { + _ = BatchScalarMultiplicationG2(&g2GenAff, sampleScalars[:using]) + } + }) + } +} + +func BenchmarkG2JacScalarMul(b *testing.B) { + + var scalar big.Int + r := fr.Modulus() + scalar.SetString("5243587517512619047944770508185965837690552500527637822603658699938581184513", 10) + scalar.Add(&scalar, r) + + var doubleAndAdd G2Jac + + b.Run("double and add", func(b *testing.B) { + b.ResetTimer() + for j := 0; j < b.N; j++ { + doubleAndAdd.mulWindowed(&g2Gen, &scalar) + } + }) + + var glv G2Jac + b.Run("GLV", func(b *testing.B) { + b.ResetTimer() + for j := 0; j < b.N; j++ { + glv.mulGLV(&g2Gen, &scalar) + } + }) + +} + +func BenchmarkG2AffineCofactorClearing(b *testing.B) { + var a G2Jac + a.Set(&g2Gen) + for i := 0; i < b.N; i++ { + a.ClearCofactor(&a) + } +} + +func BenchmarkG2JacAdd(b *testing.B) { + var a G2Jac + a.Double(&g2Gen) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.AddAssign(&g2Gen) + } +} + +func BenchmarkG2JacAddMixed(b *testing.B) { + var a G2Jac + a.Double(&g2Gen) + + var c G2Affine + c.FromJacobian(&g2Gen) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.AddMixed(&c) + } + +} + +func BenchmarkG2JacDouble(b *testing.B) { + var a G2Jac + a.Set(&g2Gen) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.DoubleAssign() + } + +} + +func BenchmarkG2JacExtAddMixed(b *testing.B) { + var a g2JacExtended + a.doubleMixed(&g2GenAff) + + var c G2Affine + c.FromJacobian(&g2Gen) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.addMixed(&c) + } +} + +func BenchmarkG2JacExtSubMixed(b *testing.B) { + var a g2JacExtended + a.doubleMixed(&g2GenAff) + + var c G2Affine + c.FromJacobian(&g2Gen) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.subMixed(&c) + } +} + +func BenchmarkG2JacExtDoubleMixed(b *testing.B) { + var a g2JacExtended + a.doubleMixed(&g2GenAff) + + var c G2Affine + c.FromJacobian(&g2Gen) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.doubleMixed(&c) + } +} + +func BenchmarkG2JacExtDoubleNegMixed(b *testing.B) { + var a g2JacExtended + a.doubleMixed(&g2GenAff) + + var c G2Affine + c.FromJacobian(&g2Gen) + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.doubleNegMixed(&c) + } +} + +func BenchmarkG2JacExtAdd(b *testing.B) { + var a, c g2JacExtended + a.doubleMixed(&g2GenAff) + c.double(&a) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.add(&c) + } +} + +func BenchmarkG2JacExtDouble(b *testing.B) { + var a g2JacExtended + a.doubleMixed(&g2GenAff) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.double(&a) + } +} + +func fuzzJacobianG2Affine(p *G2Jac, f fptower.E2) G2Jac { + var res G2Jac + res.X.Mul(&p.X, &f).Mul(&res.X, &f) + res.Y.Mul(&p.Y, &f).Mul(&res.Y, &f).Mul(&res.Y, &f) + res.Z.Mul(&p.Z, &f) + return res +} + +func fuzzExtendedJacobianG2Affine(p *g2JacExtended, f fptower.E2) g2JacExtended { + var res g2JacExtended + var ff, fff fptower.E2 + ff.Square(&f) + fff.Mul(&ff, &f) + res.X.Mul(&p.X, &ff) + res.Y.Mul(&p.Y, &fff) + res.ZZ.Mul(&p.ZZ, &ff) + res.ZZZ.Mul(&p.ZZZ, &fff) + return res +} diff --git a/ecc/bls12-39/hash_to_curve.go b/ecc/bls12-39/hash_to_curve.go new file mode 100644 index 0000000000..11b4e3aaf1 --- /dev/null +++ b/ecc/bls12-39/hash_to_curve.go @@ -0,0 +1,276 @@ +// Copyright 2020 ConsenSys AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bls1239 + +import ( + "math/big" + + "github.com/consensys/gnark-crypto/ecc" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fp" + "github.com/consensys/gnark-crypto/ecc/bls12-39/internal/fptower" +) + +// hashToFp hashes msg to count prime field elements. +// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-5.2 +func hashToFp(msg, dst []byte, count int) ([]fp.Element, error) { + + // 128 bits of security + // L = ceil((ceil(log2(p)) + k) / 8), where k is the security parameter = 128 + L := 64 + + lenInBytes := count * L + pseudoRandomBytes, err := ecc.ExpandMsgXmd(msg, dst, lenInBytes) + if err != nil { + return nil, err + } + + res := make([]fp.Element, count) + for i := 0; i < count; i++ { + res[i].SetBytes(pseudoRandomBytes[i*L : (i+1)*L]) + } + return res, nil +} + +// returns false if u>-u when seen as a bigInt +func sign0(u fp.Element) bool { + var a, b big.Int + u.ToBigIntRegular(&a) + u.Neg(&u) + u.ToBigIntRegular(&b) + return a.Cmp(&b) <= 0 +} + +// ---------------------------------------------------------------------------------------- +// G1Affine + +// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-4.1 +// Shallue and van de Woestijne method, works for any elliptic curve in Weierstrass curve +func svdwMapG1(u fp.Element) G1Affine { + + var res G1Affine + + // constants + // sage script to find z: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#appendix-E.1 + var z, c1, c2, c3, c4 fp.Element + z.SetOne().Neg(&z) + c1.SetOne() + c2.SetString("163333666684") + c3.SetString("19405999803") + c4.SetString("108889111121") + + var tv1, tv2, tv3, tv4, one, x1, gx1, x2, gx2, x3, x, gx, y fp.Element + one.SetOne() + tv1.Square(&u).Mul(&tv1, &c1) + tv2.Add(&one, &tv1) + tv1.Sub(&one, &tv1) + tv3.Mul(&tv2, &tv1).Inverse(&tv3) + tv4.Mul(&u, &tv1) + tv4.Mul(&tv4, &tv3) + tv4.Mul(&tv4, &c3) + x1.Sub(&c2, &tv4) + gx1.Square(&x1) + // 12. gx1 = gx1 + A + gx1.Mul(&gx1, &x1) + gx1.Add(&gx1, &bCurveCoeff) + e1 := gx1.Legendre() + x2.Add(&c2, &tv4) + gx2.Square(&x2) + // 18. gx2 = gx2 + A + gx2.Mul(&gx2, &x2) + gx2.Add(&gx2, &bCurveCoeff) + e2 := gx2.Legendre() - e1 // 2 if is_square(gx2) AND NOT e1 + x3.Square(&tv2) + x3.Mul(&x3, &tv3) + x3.Square(&x3) + x3.Mul(&x3, &c4) + x3.Add(&x3, &z) + if e1 == 1 { + x.Set(&x1) + } else { + x.Set(&x3) + } + if e2 == 2 { + x.Set(&x2) + } + gx.Square(&x) + // gx = gx + A + gx.Mul(&gx, &x) + gx.Add(&gx, &bCurveCoeff) + y.Sqrt(&gx) + e3 := sign0(u) && sign0(y) + if !e3 { + y.Neg(&y) + } + res.X.Set(&x) + res.Y.Set(&y) + + return res +} + +// MapToCurveG1Svdw maps an fp.Element to a point on the curve using the Shallue and van de Woestijne map +// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-2.2.1 +func MapToCurveG1Svdw(t fp.Element) G1Affine { + res := svdwMapG1(t) + res.ClearCofactor(&res) + return res +} + +// EncodeToCurveG1Svdw maps an fp.Element to a point on the curve using the Shallue and van de Woestijne map +// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-2.2.2 +func EncodeToCurveG1Svdw(msg, dst []byte) (G1Affine, error) { + var res G1Affine + t, err := hashToFp(msg, dst, 1) + if err != nil { + return res, err + } + res = MapToCurveG1Svdw(t[0]) + return res, nil +} + +// HashToCurveG1Svdw maps an fp.Element to a point on the curve using the Shallue and van de Woestijne map +// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3 +func HashToCurveG1Svdw(msg, dst []byte) (G1Affine, error) { + var res G1Affine + u, err := hashToFp(msg, dst, 2) + if err != nil { + return res, err + } + Q0 := MapToCurveG1Svdw(u[0]) + Q1 := MapToCurveG1Svdw(u[1]) + var _Q0, _Q1, _res G1Jac + _Q0.FromAffine(&Q0) + _Q1.FromAffine(&Q1) + _res.Set(&_Q1).AddAssign(&_Q0) + res.FromJacobian(&_res) + return res, nil +} + +// ---------------------------------------------------------------------------------------- +// G2Affine + +// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-4.1 +// Shallue and van de Woestijne method, works for any elliptic curve in Weierstrass curve +func svdwMapG2(u fptower.E2) G2Affine { + + var res G2Affine + + // constants + // sage script to find z: https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#appendix-E.1 + var z, c1, c2, c3, c4 fptower.E2 + z.A0.SetZero() + z.A1.SetOne() + c1.A0.SetString("326667333366") + c1.A1.SetString("4") + c2.A0.SetZero() + c2.A1.SetString("163333666683") + c3.A0.SetString("260978509113") + c3.A1.SetString("249832021055") + c4.A0.SetString("181481851871") + c4.A1.SetString("254074592617") + + var tv1, tv2, tv3, tv4, one, x1, gx1, x2, gx2, x3, x, gx, y fptower.E2 + one.SetOne() + tv1.Square(&u).Mul(&tv1, &c1) + tv2.Add(&one, &tv1) + tv1.Sub(&one, &tv1) + tv3.Mul(&tv2, &tv1).Inverse(&tv3) + tv4.Mul(&u, &tv1) + tv4.Mul(&tv4, &tv3) + tv4.Mul(&tv4, &c3) + x1.Sub(&c2, &tv4) + gx1.Square(&x1) + // 12. gx1 = gx1 + A + gx1.Mul(&gx1, &x1) + gx1.Add(&gx1, &bTwistCurveCoeff) + e1 := gx1.Legendre() + x2.Add(&c2, &tv4) + gx2.Square(&x2) + // 18. gx2 = gx2 + A + gx2.Mul(&gx2, &x2) + gx2.Add(&gx2, &bTwistCurveCoeff) + e2 := gx2.Legendre() - e1 // 2 if is_square(gx2) AND NOT e1 + x3.Square(&tv2) + x3.Mul(&x3, &tv3) + x3.Square(&x3) + x3.Mul(&x3, &c4) + x3.Add(&x3, &z) + if e1 == 1 { + x.Set(&x1) + } else { + x.Set(&x3) + } + if e2 == 2 { + x.Set(&x2) + } + gx.Square(&x) + // gx = gx + A + gx.Mul(&gx, &x) + gx.Add(&gx, &bTwistCurveCoeff) + y.Sqrt(&gx) + e3 := sign0(u.A0) && sign0(y.A0) + if !e3 { + y.Neg(&y) + } + res.X.Set(&x) + res.Y.Set(&y) + + return res +} + +// MapToCurveG2Svdw maps an fp.Element to a point on the curve using the Shallue and van de Woestijne map +// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-2.2.1 +func MapToCurveG2Svdw(t fptower.E2) G2Affine { + res := svdwMapG2(t) + res.ClearCofactor(&res) + return res +} + +// EncodeToCurveG2Svdw maps an fp.Element to a point on the curve using the Shallue and van de Woestijne map +// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-2.2.2 +func EncodeToCurveG2Svdw(msg, dst []byte) (G2Affine, error) { + var res G2Affine + _t, err := hashToFp(msg, dst, 2) + if err != nil { + return res, err + } + var t fptower.E2 + t.A0.Set(&_t[0]) + t.A1.Set(&_t[1]) + res = MapToCurveG2Svdw(t) + return res, nil +} + +// HashToCurveG2Svdw maps an fp.Element to a point on the curve using the Shallue and van de Woestijne map +// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#section-3 +func HashToCurveG2Svdw(msg, dst []byte) (G2Affine, error) { + var res G2Affine + u, err := hashToFp(msg, dst, 4) + if err != nil { + return res, err + } + var u0, u1 fptower.E2 + u0.A0.Set(&u[0]) + u0.A1.Set(&u[1]) + u1.A0.Set(&u[2]) + u1.A1.Set(&u[3]) + Q0 := MapToCurveG2Svdw(u0) + Q1 := MapToCurveG2Svdw(u1) + var _Q0, _Q1, _res G2Jac + _Q0.FromAffine(&Q0) + _Q1.FromAffine(&Q1) + _res.Set(&_Q1).AddAssign(&_Q0) + res.FromJacobian(&_res) + return res, nil +} diff --git a/ecc/bls12-39/internal/fptower/asm.go b/ecc/bls12-39/internal/fptower/asm.go new file mode 100644 index 0000000000..0ec192019d --- /dev/null +++ b/ecc/bls12-39/internal/fptower/asm.go @@ -0,0 +1,28 @@ +//go:build !noadx +// +build !noadx + +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fptower + +import "golang.org/x/sys/cpu" + +// supportAdx will be set only on amd64 that has MULX and ADDX instructions +var ( + supportAdx = cpu.X86.HasADX && cpu.X86.HasBMI2 + _ = supportAdx // used in asm +) diff --git a/ecc/bls12-39/internal/fptower/asm_noadx.go b/ecc/bls12-39/internal/fptower/asm_noadx.go new file mode 100644 index 0000000000..6a09c11c49 --- /dev/null +++ b/ecc/bls12-39/internal/fptower/asm_noadx.go @@ -0,0 +1,25 @@ +//go:build noadx +// +build noadx + +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fptower + +// note: this is needed for test purposes, as dynamically changing supportAdx doesn't flag +// certain errors (like fatal error: missing stackmap) +// this ensures we test all asm path. +var supportAdx = false diff --git a/ecc/bls12-39/internal/fptower/e12.go b/ecc/bls12-39/internal/fptower/e12.go new file mode 100644 index 0000000000..64ae043c64 --- /dev/null +++ b/ecc/bls12-39/internal/fptower/e12.go @@ -0,0 +1,516 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fptower + +import ( + "encoding/binary" + "errors" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fp" + "math/big" +) + +// E12 is a degree two finite field extension of fp6 +type E12 struct { + C0, C1 E6 +} + +// Equal returns true if z equals x, fasle otherwise +func (z *E12) Equal(x *E12) bool { + return z.C0.Equal(&x.C0) && z.C1.Equal(&x.C1) +} + +// String puts E12 in string form +func (z *E12) String() string { + return (z.C0.String() + "+(" + z.C1.String() + ")*w") +} + +// SetString sets a E12 from string +func (z *E12) SetString(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11 string) *E12 { + z.C0.SetString(s0, s1, s2, s3, s4, s5) + z.C1.SetString(s6, s7, s8, s9, s10, s11) + return z +} + +// Set copies x into z and returns z +func (z *E12) Set(x *E12) *E12 { + z.C0 = x.C0 + z.C1 = x.C1 + return z +} + +// SetOne sets z to 1 in Montgomery form and returns z +func (z *E12) SetOne() *E12 { + *z = E12{} + z.C0.B0.A0.SetOne() + return z +} + +// ToMont converts to Mont form +func (z *E12) ToMont() *E12 { + z.C0.ToMont() + z.C1.ToMont() + return z +} + +// FromMont converts from Mont form +func (z *E12) FromMont() *E12 { + z.C0.FromMont() + z.C1.FromMont() + return z +} + +// Add set z=x+y in E12 and return z +func (z *E12) Add(x, y *E12) *E12 { + z.C0.Add(&x.C0, &y.C0) + z.C1.Add(&x.C1, &y.C1) + return z +} + +// Sub sets z to x sub y and return z +func (z *E12) Sub(x, y *E12) *E12 { + z.C0.Sub(&x.C0, &y.C0) + z.C1.Sub(&x.C1, &y.C1) + return z +} + +// Double sets z=2*x and returns z +func (z *E12) Double(x *E12) *E12 { + z.C0.Double(&x.C0) + z.C1.Double(&x.C1) + return z +} + +// SetRandom used only in tests +func (z *E12) SetRandom() (*E12, error) { + if _, err := z.C0.SetRandom(); err != nil { + return nil, err + } + if _, err := z.C1.SetRandom(); err != nil { + return nil, err + } + return z, nil +} + +// Mul set z=x*y in E12 and return z +func (z *E12) Mul(x, y *E12) *E12 { + var a, b, c E6 + a.Add(&x.C0, &x.C1) + b.Add(&y.C0, &y.C1) + a.Mul(&a, &b) + b.Mul(&x.C0, &y.C0) + c.Mul(&x.C1, &y.C1) + z.C1.Sub(&a, &b).Sub(&z.C1, &c) + z.C0.MulByNonResidue(&c).Add(&z.C0, &b) + return z +} + +// Square set z=x*x in E12 and return z +func (z *E12) Square(x *E12) *E12 { + + //Algorithm 22 from https://eprint.iacr.org/2010/354.pdf + var c0, c2, c3 E6 + c0.Sub(&x.C0, &x.C1) + c3.MulByNonResidue(&x.C1).Neg(&c3).Add(&x.C0, &c3) + c2.Mul(&x.C0, &x.C1) + c0.Mul(&c0, &c3).Add(&c0, &c2) + z.C1.Double(&c2) + c2.MulByNonResidue(&c2) + z.C0.Add(&c0, &c2) + + return z +} + +// Karabina's compressed cyclotomic square +// https://eprint.iacr.org/2010/542.pdf +// Th. 3.2 with minor modifications to fit our tower +func (z *E12) CyclotomicSquareCompressed(x *E12) *E12 { + + var t [7]E2 + + // t0 = g1^2 + t[0].Square(&x.C0.B1) + // t1 = g5^2 + t[1].Square(&x.C1.B2) + // t5 = g1 + g5 + t[5].Add(&x.C0.B1, &x.C1.B2) + // t2 = (g1 + g5)^2 + t[2].Square(&t[5]) + + // t3 = g1^2 + g5^2 + t[3].Add(&t[0], &t[1]) + // t5 = 2 * g1 * g5 + t[5].Sub(&t[2], &t[3]) + + // t6 = g3 + g2 + t[6].Add(&x.C1.B0, &x.C0.B2) + // t3 = (g3 + g2)^2 + t[3].Square(&t[6]) + // t2 = g3^2 + t[2].Square(&x.C1.B0) + + // t6 = 2 * nr * g1 * g5 + t[6].MulByNonResidue(&t[5]) + // t5 = 4 * nr * g1 * g5 + 2 * g3 + t[5].Add(&t[6], &x.C1.B0). + Double(&t[5]) + // z3 = 6 * nr * g1 * g5 + 2 * g3 + z.C1.B0.Add(&t[5], &t[6]) + + // t4 = nr * g5^2 + t[4].MulByNonResidue(&t[1]) + // t5 = nr * g5^2 + g1^2 + t[5].Add(&t[0], &t[4]) + // t6 = nr * g5^2 + g1^2 - g2 + t[6].Sub(&t[5], &x.C0.B2) + + // t1 = g2^2 + t[1].Square(&x.C0.B2) + + // t6 = 2 * nr * g5^2 + 2 * g1^2 - 2*g2 + t[6].Double(&t[6]) + // z2 = 3 * nr * g5^2 + 3 * g1^2 - 2*g2 + z.C0.B2.Add(&t[6], &t[5]) + + // t4 = nr * g2^2 + t[4].MulByNonResidue(&t[1]) + // t5 = g3^2 + nr * g2^2 + t[5].Add(&t[2], &t[4]) + // t6 = g3^2 + nr * g2^2 - g1 + t[6].Sub(&t[5], &x.C0.B1) + // t6 = 2 * g3^2 + 2 * nr * g2^2 - 2 * g1 + t[6].Double(&t[6]) + // z1 = 3 * g3^2 + 3 * nr * g2^2 - 2 * g1 + z.C0.B1.Add(&t[6], &t[5]) + + // t0 = g2^2 + g3^2 + t[0].Add(&t[2], &t[1]) + // t5 = 2 * g3 * g2 + t[5].Sub(&t[3], &t[0]) + // t6 = 2 * g3 * g2 + g5 + t[6].Add(&t[5], &x.C1.B2) + // t6 = 4 * g3 * g2 + 2 * g5 + t[6].Double(&t[6]) + // z5 = 6 * g3 * g2 + 2 * g5 + z.C1.B2.Add(&t[5], &t[6]) + + return z +} + +// Decompress Karabina's cyclotomic square result +func (z *E12) Decompress(x *E12) *E12 { + + var t [3]E2 + var one E2 + one.SetOne() + + // t0 = g1^2 + t[0].Square(&x.C0.B1) + // t1 = 3 * g1^2 - 2 * g2 + t[1].Sub(&t[0], &x.C0.B2). + Double(&t[1]). + Add(&t[1], &t[0]) + // t0 = E * g5^2 + t1 + t[2].Square(&x.C1.B2) + t[0].MulByNonResidue(&t[2]). + Add(&t[0], &t[1]) + // t1 = 1/(4 * g3) + t[1].Double(&x.C1.B0). + Double(&t[1]). + Inverse(&t[1]) // costly + // z4 = g4 + z.C1.B1.Mul(&t[0], &t[1]) + + // t1 = g2 * g1 + t[1].Mul(&x.C0.B2, &x.C0.B1) + // t2 = 2 * g4^2 - 3 * g2 * g1 + t[2].Square(&z.C1.B1). + Sub(&t[2], &t[1]). + Double(&t[2]). + Sub(&t[2], &t[1]) + // t1 = g3 * g5 + t[1].Mul(&x.C1.B0, &x.C1.B2) + // c_0 = E * (2 * g4^2 + g3 * g5 - 3 * g2 * g1) + 1 + t[2].Add(&t[2], &t[1]) + z.C0.B0.MulByNonResidue(&t[2]). + Add(&z.C0.B0, &one) + + z.C0.B1.Set(&x.C0.B1) + z.C0.B2.Set(&x.C0.B2) + z.C1.B0.Set(&x.C1.B0) + z.C1.B2.Set(&x.C1.B2) + + return z +} + +// BatchDecompress multiple Karabina's cyclotomic square results +func BatchDecompress(x []E12) []E12 { + + n := len(x) + if n == 0 { + return x + } + + t0 := make([]E2, n) + t1 := make([]E2, n) + t2 := make([]E2, n) + + var one E2 + one.SetOne() + + for i := 0; i < n; i++ { + // t0 = g1^2 + t0[i].Square(&x[i].C0.B1) + // t1 = 3 * g1^2 - 2 * g2 + t1[i].Sub(&t0[i], &x[i].C0.B2). + Double(&t1[i]). + Add(&t1[i], &t0[i]) + // t0 = E * g5^2 + t1 + t2[i].Square(&x[i].C1.B2) + t0[i].MulByNonResidue(&t2[i]). + Add(&t0[i], &t1[i]) + // t1 = 4 * g3 + t1[i].Double(&x[i].C1.B0). + Double(&t1[i]) + } + + t1 = BatchInvert(t1) // costs 1 inverse + + for i := 0; i < n; i++ { + // z4 = g4 + x[i].C1.B1.Mul(&t0[i], &t1[i]) + + // t1 = g2 * g1 + t1[i].Mul(&x[i].C0.B2, &x[i].C0.B1) + // t2 = 2 * g4^2 - 3 * g2 * g1 + t2[i].Square(&x[i].C1.B1) + t2[i].Sub(&t2[i], &t1[i]) + t2[i].Double(&t2[i]) + t2[i].Sub(&t2[i], &t1[i]) + + // t1 = g3 * g5 + t1[i].Mul(&x[i].C1.B0, &x[i].C1.B2) + // z0 = E * (2 * g4^2 + g3 * g5 - 3 * g2 * g1) + 1 + t2[i].Add(&t2[i], &t1[i]) + x[i].C0.B0.MulByNonResidue(&t2[i]). + Add(&x[i].C0.B0, &one) + } + + return x +} + +// Granger-Scott's cyclotomic square +// https://eprint.iacr.org/2009/565.pdf, 3.2 +func (z *E12) CyclotomicSquare(x *E12) *E12 { + + // x=(x0,x1,x2,x3,x4,x5,x6,x7) in E2^6 + // cyclosquare(x)=(3*x4^2*u + 3*x0^2 - 2*x0, + // 3*x2^2*u + 3*x3^2 - 2*x1, + // 3*x5^2*u + 3*x1^2 - 2*x2, + // 6*x1*x5*u + 2*x3, + // 6*x0*x4 + 2*x4, + // 6*x2*x3 + 2*x5) + + var t [9]E2 + + t[0].Square(&x.C1.B1) + t[1].Square(&x.C0.B0) + t[6].Add(&x.C1.B1, &x.C0.B0).Square(&t[6]).Sub(&t[6], &t[0]).Sub(&t[6], &t[1]) // 2*x4*x0 + t[2].Square(&x.C0.B2) + t[3].Square(&x.C1.B0) + t[7].Add(&x.C0.B2, &x.C1.B0).Square(&t[7]).Sub(&t[7], &t[2]).Sub(&t[7], &t[3]) // 2*x2*x3 + t[4].Square(&x.C1.B2) + t[5].Square(&x.C0.B1) + t[8].Add(&x.C1.B2, &x.C0.B1).Square(&t[8]).Sub(&t[8], &t[4]).Sub(&t[8], &t[5]).MulByNonResidue(&t[8]) // 2*x5*x1*u + + t[0].MulByNonResidue(&t[0]).Add(&t[0], &t[1]) // x4^2*u + x0^2 + t[2].MulByNonResidue(&t[2]).Add(&t[2], &t[3]) // x2^2*u + x3^2 + t[4].MulByNonResidue(&t[4]).Add(&t[4], &t[5]) // x5^2*u + x1^2 + + z.C0.B0.Sub(&t[0], &x.C0.B0).Double(&z.C0.B0).Add(&z.C0.B0, &t[0]) + z.C0.B1.Sub(&t[2], &x.C0.B1).Double(&z.C0.B1).Add(&z.C0.B1, &t[2]) + z.C0.B2.Sub(&t[4], &x.C0.B2).Double(&z.C0.B2).Add(&z.C0.B2, &t[4]) + + z.C1.B0.Add(&t[8], &x.C1.B0).Double(&z.C1.B0).Add(&z.C1.B0, &t[8]) + z.C1.B1.Add(&t[6], &x.C1.B1).Double(&z.C1.B1).Add(&z.C1.B1, &t[6]) + z.C1.B2.Add(&t[7], &x.C1.B2).Double(&z.C1.B2).Add(&z.C1.B2, &t[7]) + + return z +} + +// Inverse set z to the inverse of x in E12 and return z +func (z *E12) Inverse(x *E12) *E12 { + // Algorithm 23 from https://eprint.iacr.org/2010/354.pdf + + var t0, t1, tmp E6 + t0.Square(&x.C0) + t1.Square(&x.C1) + tmp.MulByNonResidue(&t1) + t0.Sub(&t0, &tmp) + t1.Inverse(&t0) + z.C0.Mul(&x.C0, &t1) + z.C1.Mul(&x.C1, &t1).Neg(&z.C1) + + return z +} + +// Exp sets z=x**e and returns it +func (z *E12) Exp(x *E12, e big.Int) *E12 { + var res E12 + res.SetOne() + b := e.Bytes() + for i := range b { + w := b[i] + mask := byte(0x80) + for j := 7; j >= 0; j-- { + res.Square(&res) + if (w&mask)>>j != 0 { + res.Mul(&res, x) + } + mask = mask >> 1 + } + } + z.Set(&res) + return z +} + +// InverseUnitary inverse a unitary element +func (z *E12) InverseUnitary(x *E12) *E12 { + return z.Conjugate(x) +} + +// Conjugate set z to x conjugated and return z +func (z *E12) Conjugate(x *E12) *E12 { + *z = *x + z.C1.Neg(&z.C1) + return z +} + +// SizeOfGT represents the size in bytes that a GT element need in binary form +const SizeOfGT = 8 * 12 + +// Marshal converts z to a byte slice +func (z *E12) Marshal() []byte { + b := z.Bytes() + return b[:] +} + +// Unmarshal is an allias to SetBytes() +func (z *E12) Unmarshal(buf []byte) error { + return z.SetBytes(buf) +} + +// Bytes returns the regular (non montgomery) value +// of z as a big-endian byte array. +// z.C1.B2.A1 | z.C1.B2.A0 | z.C1.B1.A1 | ... +func (z *E12) Bytes() (r [SizeOfGT]byte) { + _z := *z + _z.FromMont() + binary.BigEndian.PutUint64(r[88:96], _z.C0.B0.A0[0]) + + binary.BigEndian.PutUint64(r[80:88], _z.C0.B0.A1[0]) + + binary.BigEndian.PutUint64(r[72:80], _z.C0.B1.A0[0]) + + binary.BigEndian.PutUint64(r[64:72], _z.C0.B1.A1[0]) + + binary.BigEndian.PutUint64(r[56:64], _z.C0.B2.A0[0]) + + binary.BigEndian.PutUint64(r[48:56], _z.C0.B2.A1[0]) + + binary.BigEndian.PutUint64(r[40:48], _z.C1.B0.A0[0]) + + binary.BigEndian.PutUint64(r[32:40], _z.C1.B0.A1[0]) + + binary.BigEndian.PutUint64(r[24:32], _z.C1.B1.A0[0]) + + binary.BigEndian.PutUint64(r[16:24], _z.C1.B1.A1[0]) + + binary.BigEndian.PutUint64(r[8:16], _z.C1.B2.A0[0]) + + binary.BigEndian.PutUint64(r[0:8], _z.C1.B2.A1[0]) + + return +} + +// SetBytes interprets e as the bytes of a big-endian GT +// sets z to that value (in Montgomery form), and returns z. +// size(e) == 8 * 12 +// z.C1.B2.A1 | z.C1.B2.A0 | z.C1.B1.A1 | ... +func (z *E12) SetBytes(e []byte) error { + if len(e) != SizeOfGT { + return errors.New("invalid buffer size") + } + z.C0.B0.A0.SetBytes(e[88 : 88+fp.Bytes]) + + z.C0.B0.A1.SetBytes(e[80 : 80+fp.Bytes]) + + z.C0.B1.A0.SetBytes(e[72 : 72+fp.Bytes]) + + z.C0.B1.A1.SetBytes(e[64 : 64+fp.Bytes]) + + z.C0.B2.A0.SetBytes(e[56 : 56+fp.Bytes]) + + z.C0.B2.A1.SetBytes(e[48 : 48+fp.Bytes]) + + z.C1.B0.A0.SetBytes(e[40 : 40+fp.Bytes]) + + z.C1.B0.A1.SetBytes(e[32 : 32+fp.Bytes]) + + z.C1.B1.A0.SetBytes(e[24 : 24+fp.Bytes]) + + z.C1.B1.A1.SetBytes(e[16 : 16+fp.Bytes]) + + z.C1.B2.A0.SetBytes(e[8 : 8+fp.Bytes]) + + z.C1.B2.A1.SetBytes(e[0 : 0+fp.Bytes]) + + return nil +} + +// IsInSubGroup ensures GT/E12 is in correct sugroup +func (z *E12) IsInSubGroup() bool { + var a, b E12 + + // check z^(Phi_k(p)) == 1 + a.FrobeniusSquare(z) + b.FrobeniusSquare(&a).Mul(&b, z) + + if !a.Equal(&b) { + return false + } + + // check z^(p+1-t) == 1 + a.Frobenius(z) + b.Expt(z) + + return a.Equal(&b) +} + +func (z *E12) Select(cond int, caseZ *E12, caseNz *E12) *E12 { + //Might be able to save a nanosecond or two by an aggregate implementation + + z.C0.Select(cond, &caseZ.C0, &caseNz.C0) + z.C1.Select(cond, &caseZ.C1, &caseNz.C1) + + return z +} + +func (z *E12) Div(x *E12, y *E12) *E12 { + var r E12 + r.Inverse(y).Mul(x, &r) + return z.Set(&r) +} diff --git a/ecc/bls12-39/internal/fptower/e12_pairing.go b/ecc/bls12-39/internal/fptower/e12_pairing.go new file mode 100644 index 0000000000..70daafd7cd --- /dev/null +++ b/ecc/bls12-39/internal/fptower/e12_pairing.go @@ -0,0 +1,49 @@ +package fptower + +func (z *E12) nSquareCompressed(n int) { + for i := 0; i < n; i++ { + z.CyclotomicSquareCompressed(z) + } +} + +func (z *E12) nSquare(n int) { + for i := 0; i < n; i++ { + z.CyclotomicSquare(z) + } +} + +// Expt set z to x^t in E12 and return z +func (z *E12) Expt(x *E12) *E12 { + + var result E12 + + result.Set(x) + result.nSquare(1) + result.Mul(&result, x) + result.nSquare(3) + result.Mul(&result, x) + result.nSquare(2) + + z.Set(&result) + return z +} + +// MulBy034 multiplication by sparse element (c0,0,0,c3,c4,0) +func (z *E12) MulBy034(c0, c3, c4 *E2) *E12 { + + var a, b, d E6 + + a.MulByE2(&z.C0, c0) + + b.Set(&z.C1) + b.MulBy01(c3, c4) + + c0.Add(c0, c3) + d.Add(&z.C0, &z.C1) + d.MulBy01(c0, c4) + + z.C1.Add(&a, &b).Neg(&z.C1).Add(&z.C1, &d) + z.C0.MulByNonResidue(&b).Add(&z.C0, &a) + + return z +} diff --git a/ecc/bls12-39/internal/fptower/e12_test.go b/ecc/bls12-39/internal/fptower/e12_test.go new file mode 100644 index 0000000000..87101dfcbd --- /dev/null +++ b/ecc/bls12-39/internal/fptower/e12_test.go @@ -0,0 +1,519 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fptower + +import ( + "testing" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fp" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/prop" +) + +// ------------------------------------------------------------ +// tests + +func TestE12Serialization(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := GenE12() + + properties.Property("[BLS12-39] SetBytes(Bytes()) should stay constant", prop.ForAll( + func(a *E12) bool { + var b E12 + buf := a.Bytes() + if err := b.SetBytes(buf[:]); err != nil { + return false + } + return a.Equal(&b) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestE12ReceiverIsOperand(t *testing.T) { + + parameters := gopter.DefaultTestParameters() + parameters.MinSuccessfulTests = 100 + + properties := gopter.NewProperties(parameters) + + genA := GenE12() + genB := GenE12() + + properties.Property("[BLS12-39] Having the receiver as operand (addition) should output the same result", prop.ForAll( + func(a, b *E12) bool { + var c, d E12 + d.Set(a) + c.Add(a, b) + a.Add(a, b) + b.Add(&d, b) + return a.Equal(b) && a.Equal(&c) && b.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (sub) should output the same result", prop.ForAll( + func(a, b *E12) bool { + var c, d E12 + d.Set(a) + c.Sub(a, b) + a.Sub(a, b) + b.Sub(&d, b) + return a.Equal(b) && a.Equal(&c) && b.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (mul) should output the same result", prop.ForAll( + func(a, b *E12) bool { + var c, d E12 + d.Set(a) + c.Mul(a, b) + a.Mul(a, b) + b.Mul(&d, b) + return a.Equal(b) && a.Equal(&c) && b.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (square) should output the same result", prop.ForAll( + func(a *E12) bool { + var b E12 + b.Square(a) + a.Square(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (double) should output the same result", prop.ForAll( + func(a *E12) bool { + var b E12 + b.Double(a) + a.Double(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (Inverse) should output the same result", prop.ForAll( + func(a *E12) bool { + var b E12 + b.Inverse(a) + a.Inverse(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (Cyclotomic square) should output the same result", prop.ForAll( + func(a *E12) bool { + var b E12 + b.CyclotomicSquare(a) + a.CyclotomicSquare(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (Conjugate) should output the same result", prop.ForAll( + func(a *E12) bool { + var b E12 + b.Conjugate(a) + a.Conjugate(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (Frobenius) should output the same result", prop.ForAll( + func(a *E12) bool { + var b E12 + b.Frobenius(a) + a.Frobenius(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (FrobeniusSquare) should output the same result", prop.ForAll( + func(a *E12) bool { + var b E12 + b.FrobeniusSquare(a) + a.FrobeniusSquare(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (FrobeniusCube) should output the same result", prop.ForAll( + func(a *E12) bool { + var b E12 + b.FrobeniusCube(a) + a.FrobeniusCube(a) + return a.Equal(&b) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestE12Ops(t *testing.T) { + + parameters := gopter.DefaultTestParameters() + parameters.MinSuccessfulTests = 100 + + properties := gopter.NewProperties(parameters) + + genA := GenE12() + genB := GenE12() + + properties.Property("[BLS12-39] sub & add should leave an element invariant", prop.ForAll( + func(a, b *E12) bool { + var c E12 + c.Set(a) + c.Add(&c, b).Sub(&c, b) + return c.Equal(a) + }, + genA, + genB, + )) + + properties.Property("[BLS12-39] mul & inverse should leave an element invariant", prop.ForAll( + func(a, b *E12) bool { + var c, d E12 + d.Inverse(b) + c.Set(a) + c.Mul(&c, b).Mul(&c, &d) + return c.Equal(a) + }, + genA, + genB, + )) + + properties.Property("[BLS12-39] inverse twice should leave an element invariant", prop.ForAll( + func(a *E12) bool { + var b E12 + b.Inverse(a).Inverse(&b) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] square and mul should output the same result", prop.ForAll( + func(a *E12) bool { + var b, c E12 + b.Mul(a, a) + c.Square(a) + return b.Equal(&c) + }, + genA, + )) + + properties.Property("[BLS12-39] a + pi(a), a-pi(a) should be real", prop.ForAll( + func(a *E12) bool { + var b, c, d E12 + var e, f, g E6 + b.Conjugate(a) + c.Add(a, &b) + d.Sub(a, &b) + e.Double(&a.C0) + f.Double(&a.C1) + return c.C1.Equal(&g) && d.C0.Equal(&g) && e.Equal(&c.C0) && f.Equal(&d.C1) + }, + genA, + )) + + properties.Property("[BLS12-39] pi**12=id", prop.ForAll( + func(a *E12) bool { + var b E12 + b.Frobenius(a). + Frobenius(&b). + Frobenius(&b). + Frobenius(&b). + Frobenius(&b). + Frobenius(&b). + Frobenius(&b). + Frobenius(&b). + Frobenius(&b). + Frobenius(&b). + Frobenius(&b). + Frobenius(&b) + return b.Equal(a) + }, + genA, + )) + + properties.Property("[BLS12-39] (pi**2)**6=id", prop.ForAll( + func(a *E12) bool { + var b E12 + b.FrobeniusSquare(a). + FrobeniusSquare(&b). + FrobeniusSquare(&b). + FrobeniusSquare(&b). + FrobeniusSquare(&b). + FrobeniusSquare(&b) + return b.Equal(a) + }, + genA, + )) + + properties.Property("[BLS12-39] (pi**3)**4=id", prop.ForAll( + func(a *E12) bool { + var b E12 + b.FrobeniusCube(a). + FrobeniusCube(&b). + FrobeniusCube(&b). + FrobeniusCube(&b) + return b.Equal(a) + }, + genA, + )) + + properties.Property("[BLS12-39] cyclotomic square (Granger-Scott) and square should be the same in the cyclotomic subgroup", prop.ForAll( + func(a *E12) bool { + var b, c, d E12 + b.Conjugate(a) + a.Inverse(a) + b.Mul(&b, a) + a.FrobeniusSquare(&b).Mul(a, &b) + c.Square(a) + d.CyclotomicSquare(a) + return c.Equal(&d) + }, + genA, + )) + + properties.Property("[BLS12-39] compressed cyclotomic square (Karabina) and square should be the same in the cyclotomic subgroup", prop.ForAll( + func(a *E12) bool { + var b, c, d E12 + b.Conjugate(a) + a.Inverse(a) + b.Mul(&b, a) + a.FrobeniusSquare(&b).Mul(a, &b) + c.Square(a) + d.CyclotomicSquareCompressed(a).Decompress(&d) + return c.Equal(&d) + }, + genA, + )) + + properties.Property("[BLS12-39] batch decompress and individual decompress (Karabina) should be the same", prop.ForAll( + func(a *E12) bool { + var b E12 + // put in the cyclotomic subgroup + b.Conjugate(a) + a.Inverse(a) + b.Mul(&b, a) + a.FrobeniusSquare(&b).Mul(a, &b) + + var a2, a4, a17 E12 + a2.Set(a) + a4.Set(a) + a17.Set(a) + a2.nSquareCompressed(2) + a4.nSquareCompressed(4) + a17.nSquareCompressed(17) + batch := BatchDecompress([]E12{a2, a4, a17}) + a2.Decompress(&a2) + a4.Decompress(&a4) + a17.Decompress(&a17) + + return a2.Equal(&batch[0]) && a4.Equal(&batch[1]) && a17.Equal(&batch[2]) + }, + genA, + )) + + properties.Property("[BLS12-39] Frobenius of x in E12 should be equal to x^q", prop.ForAll( + func(a *E12) bool { + var b, c E12 + q := fp.Modulus() + b.Frobenius(a) + c.Exp(a, *q) + return c.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] FrobeniusSquare of x in E12 should be equal to x^(q^2)", prop.ForAll( + func(a *E12) bool { + var b, c E12 + q := fp.Modulus() + b.FrobeniusSquare(a) + c.Exp(a, *q).Exp(&c, *q) + return c.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] FrobeniusCube of x in E12 should be equal to x^(q^3)", prop.ForAll( + func(a *E12) bool { + var b, c E12 + q := fp.Modulus() + b.FrobeniusCube(a) + c.Exp(a, *q).Exp(&c, *q).Exp(&c, *q) + return c.Equal(&b) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +// ------------------------------------------------------------ +// benches + +func BenchmarkE12Add(b *testing.B) { + var a, c E12 + a.SetRandom() + c.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &c) + } +} + +func BenchmarkE12Sub(b *testing.B) { + var a, c E12 + a.SetRandom() + c.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Sub(&a, &c) + } +} + +func BenchmarkE12Mul(b *testing.B) { + var a, c E12 + a.SetRandom() + c.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Mul(&a, &c) + } +} + +func BenchmarkE12Cyclosquare(b *testing.B) { + var a E12 + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.CyclotomicSquare(&a) + } +} + +func BenchmarkE12Square(b *testing.B) { + var a E12 + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Square(&a) + } +} + +func BenchmarkE12Inverse(b *testing.B) { + var a E12 + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Inverse(&a) + } +} + +func BenchmarkE12Conjugate(b *testing.B) { + var a E12 + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Conjugate(&a) + } +} + +func BenchmarkE12Frobenius(b *testing.B) { + var a E12 + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Frobenius(&a) + } +} + +func BenchmarkE12FrobeniusSquare(b *testing.B) { + var a E12 + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.FrobeniusSquare(&a) + } +} + +func BenchmarkE12FrobeniusCube(b *testing.B) { + var a E12 + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.FrobeniusCube(&a) + } +} + +func BenchmarkE12Expt(b *testing.B) { + var a E12 + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Expt(&a) + } +} + +func TestE12Div(t *testing.T) { + + parameters := gopter.DefaultTestParameters() + properties := gopter.NewProperties(parameters) + + genA := GenE12() + genB := GenE12() + + properties.Property("[BLS12-39] dividing then multiplying by the same element does nothing", prop.ForAll( + func(a, b *E12) bool { + var c E12 + c.Div(a, b) + c.Mul(&c, b) + return c.Equal(a) + }, + genA, + genB, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} diff --git a/ecc/bls12-39/internal/fptower/e2.go b/ecc/bls12-39/internal/fptower/e2.go new file mode 100644 index 0000000000..4e2c2da218 --- /dev/null +++ b/ecc/bls12-39/internal/fptower/e2.go @@ -0,0 +1,278 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fptower + +import ( + "github.com/consensys/gnark-crypto/ecc/bls12-39/fp" + "math/big" +) + +// E2 is a degree two finite field extension of fp.Element +type E2 struct { + A0, A1 fp.Element +} + +// Equal returns true if z equals x, false otherwise +func (z *E2) Equal(x *E2) bool { + return z.A0.Equal(&x.A0) && z.A1.Equal(&x.A1) +} + +// Cmp compares (lexicographic order) z and x and returns: +// +// -1 if z < x +// 0 if z == x +// +1 if z > x +// +func (z *E2) Cmp(x *E2) int { + if a1 := z.A1.Cmp(&x.A1); a1 != 0 { + return a1 + } + return z.A0.Cmp(&x.A0) +} + +// LexicographicallyLargest returns true if this element is strictly lexicographically +// larger than its negation, false otherwise +func (z *E2) LexicographicallyLargest() bool { + // adapted from github.com/zkcrypto/bls12_381 + if z.A1.IsZero() { + return z.A0.LexicographicallyLargest() + } + return z.A1.LexicographicallyLargest() +} + +// SetString sets a E2 element from strings +func (z *E2) SetString(s1, s2 string) *E2 { + z.A0.SetString(s1) + z.A1.SetString(s2) + return z +} + +// SetZero sets an E2 elmt to zero +func (z *E2) SetZero() *E2 { + z.A0.SetZero() + z.A1.SetZero() + return z +} + +// Set sets an E2 from x +func (z *E2) Set(x *E2) *E2 { + z.A0 = x.A0 + z.A1 = x.A1 + return z +} + +// SetOne sets z to 1 in Montgomery form and returns z +func (z *E2) SetOne() *E2 { + z.A0.SetOne() + z.A1.SetZero() + return z +} + +// SetRandom sets a0 and a1 to random values +func (z *E2) SetRandom() (*E2, error) { + if _, err := z.A0.SetRandom(); err != nil { + return nil, err + } + if _, err := z.A1.SetRandom(); err != nil { + return nil, err + } + return z, nil +} + +// IsZero returns true if the two elements are equal, false otherwise +func (z *E2) IsZero() bool { + return z.A0.IsZero() && z.A1.IsZero() +} + +// Add adds two elements of E2 +func (z *E2) Add(x, y *E2) *E2 { + addE2(z, x, y) + return z +} + +// Sub two elements of E2 +func (z *E2) Sub(x, y *E2) *E2 { + subE2(z, x, y) + return z +} + +// Double doubles an E2 element +func (z *E2) Double(x *E2) *E2 { + doubleE2(z, x) + return z +} + +// Neg negates an E2 element +func (z *E2) Neg(x *E2) *E2 { + negE2(z, x) + return z +} + +// String implements Stringer interface for fancy printing +func (z *E2) String() string { + return z.A0.String() + "+" + z.A1.String() + "*u" +} + +// ToMont converts to mont form +func (z *E2) ToMont() *E2 { + z.A0.ToMont() + z.A1.ToMont() + return z +} + +// FromMont converts from mont form +func (z *E2) FromMont() *E2 { + z.A0.FromMont() + z.A1.FromMont() + return z +} + +// MulByElement multiplies an element in E2 by an element in fp +func (z *E2) MulByElement(x *E2, y *fp.Element) *E2 { + var yCopy fp.Element + yCopy.Set(y) + z.A0.Mul(&x.A0, &yCopy) + z.A1.Mul(&x.A1, &yCopy) + return z +} + +// Conjugate conjugates an element in E2 +func (z *E2) Conjugate(x *E2) *E2 { + z.A0 = x.A0 + z.A1.Neg(&x.A1) + return z +} + +// Halve sets z = z / 2 +func (z *E2) Halve() { + z.A0.Halve() + z.A1.Halve() +} + +// Legendre returns the Legendre symbol of z +func (z *E2) Legendre() int { + var n fp.Element + z.norm(&n) + return n.Legendre() +} + +// Exp sets z=x**e and returns it +func (z *E2) Exp(x E2, exponent *big.Int) *E2 { + z.SetOne() + b := exponent.Bytes() + for i := 0; i < len(b); i++ { + w := b[i] + for j := 0; j < 8; j++ { + z.Square(z) + if (w & (0b10000000 >> j)) != 0 { + z.Mul(z, &x) + } + } + } + + return z +} + +func init() { + q := fp.Modulus() + tmp := big.NewInt(3) + sqrtExp1.Set(q).Sub(&sqrtExp1, tmp).Rsh(&sqrtExp1, 2) + + tmp.SetUint64(1) + sqrtExp2.Set(q).Sub(&sqrtExp2, tmp).Rsh(&sqrtExp2, 1) +} + +var sqrtExp1, sqrtExp2 big.Int + +// Sqrt sets z to the square root of and returns z +// The function does not test wether the square root +// exists or not, it's up to the caller to call +// Legendre beforehand. +// cf https://eprint.iacr.org/2012/685.pdf (algo 9) +func (z *E2) Sqrt(x *E2) *E2 { + + var a1, alpha, b, x0, minusone E2 + + minusone.SetOne().Neg(&minusone) + + a1.Exp(*x, &sqrtExp1) + alpha.Square(&a1). + Mul(&alpha, x) + x0.Mul(x, &a1) + if alpha.Equal(&minusone) { + var c fp.Element + c.Set(&x0.A0) + z.A0.Neg(&x0.A1) + z.A1.Set(&c) + return z + } + a1.SetOne() + b.Add(&a1, &alpha) + + b.Exp(b, &sqrtExp2).Mul(&x0, &b) + z.Set(&b) + return z +} + +// BatchInvert returns a new slice with every element inverted. +// Uses Montgomery batch inversion trick +func BatchInvert(a []E2) []E2 { + res := make([]E2, len(a)) + if len(a) == 0 { + return res + } + + zeroes := make([]bool, len(a)) + var accumulator E2 + accumulator.SetOne() + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + zeroes[i] = true + continue + } + res[i].Set(&accumulator) + accumulator.Mul(&accumulator, &a[i]) + } + + accumulator.Inverse(&accumulator) + + for i := len(a) - 1; i >= 0; i-- { + if zeroes[i] { + continue + } + res[i].Mul(&res[i], &accumulator) + accumulator.Mul(&accumulator, &a[i]) + } + + return res +} + +func (z *E2) Select(cond int, caseZ *E2, caseNz *E2) *E2 { + //Might be able to save a nanosecond or two by an aggregate implementation + + z.A0.Select(cond, &caseZ.A0, &caseNz.A0) + z.A1.Select(cond, &caseZ.A1, &caseNz.A1) + + return z +} + +func (z *E2) Div(x *E2, y *E2) *E2 { + var r E2 + r.Inverse(y).Mul(x, &r) + return z.Set(&r) +} diff --git a/ecc/bls12-39/internal/fptower/e2_amd64.go b/ecc/bls12-39/internal/fptower/e2_amd64.go new file mode 100644 index 0000000000..027fac2b6b --- /dev/null +++ b/ecc/bls12-39/internal/fptower/e2_amd64.go @@ -0,0 +1,40 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fptower + +// q (modulus) +var qE2 = [1]uint64{ + 326667333367, +} + +// q'[0], see montgommery multiplication algorithm +var ( + qE2Inv0 uint64 = 14763286405836492089 + _ = qE2Inv0 // used in asm +) + +//go:noescape +func addE2(res, x, y *E2) + +//go:noescape +func subE2(res, x, y *E2) + +//go:noescape +func doubleE2(res, x *E2) + +//go:noescape +func negE2(res, x *E2) diff --git a/ecc/bls12-39/internal/fptower/e2_amd64.s b/ecc/bls12-39/internal/fptower/e2_amd64.s new file mode 100644 index 0000000000..fc8177c5b7 --- /dev/null +++ b/ecc/bls12-39/internal/fptower/e2_amd64.s @@ -0,0 +1,120 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "textflag.h" +#include "funcdata.h" + +// modulus q +DATA q<>+0(SB)/8, $0x0000004c0ee3eef7 +GLOBL q<>(SB), (RODATA+NOPTR), $8 + +// qInv0 q'[0] +DATA qInv0<>(SB)/8, $0xcce1bac4513ccd39 +GLOBL qInv0<>(SB), (RODATA+NOPTR), $8 + +#define REDUCE(ra0, rb0) \ + MOVQ ra0, rb0; \ + SUBQ q<>(SB), ra0; \ + CMOVQCS rb0, ra0; \ + +TEXT ·addE2(SB), NOSPLIT, $0-24 + MOVQ x+8(FP), AX + MOVQ 0(AX), BX + MOVQ y+16(FP), DX + ADDQ 0(DX), BX + + // reduce element(BX) using temp registers (SI) + REDUCE(BX,SI) + + MOVQ res+0(FP), CX + MOVQ BX, 0(CX) + MOVQ 8(AX), BX + ADDQ 8(DX), BX + + // reduce element(BX) using temp registers (DI) + REDUCE(BX,DI) + + MOVQ BX, 8(CX) + RET + +TEXT ·doubleE2(SB), NOSPLIT, $0-16 + MOVQ res+0(FP), DX + MOVQ x+8(FP), AX + MOVQ 0(AX), CX + ADDQ CX, CX + + // reduce element(CX) using temp registers (BX) + REDUCE(CX,BX) + + MOVQ CX, 0(DX) + MOVQ 8(AX), CX + ADDQ CX, CX + + // reduce element(CX) using temp registers (SI) + REDUCE(CX,SI) + + MOVQ CX, 8(DX) + RET + +TEXT ·subE2(SB), NOSPLIT, $0-24 + XORQ CX, CX + MOVQ x+8(FP), DX + MOVQ 0(DX), AX + MOVQ y+16(FP), DX + SUBQ 0(DX), AX + MOVQ x+8(FP), DX + MOVQ $0x0000004c0ee3eef7, BX + CMOVQCC CX, BX + ADDQ BX, AX + MOVQ res+0(FP), SI + MOVQ AX, 0(SI) + MOVQ 8(DX), AX + MOVQ y+16(FP), DX + SUBQ 8(DX), AX + MOVQ $0x0000004c0ee3eef7, DI + CMOVQCC CX, DI + ADDQ DI, AX + MOVQ res+0(FP), DX + MOVQ AX, 8(DX) + RET + +TEXT ·negE2(SB), NOSPLIT, $0-16 + MOVQ res+0(FP), DX + MOVQ x+8(FP), AX + MOVQ 0(AX), BX + MOVQ BX, AX + TESTQ AX, AX + JNE l1 + MOVQ AX, 0(DX) + JMP l3 + +l1: + MOVQ $0x0000004c0ee3eef7, CX + SUBQ BX, CX + MOVQ CX, 0(DX) + +l3: + MOVQ x+8(FP), AX + MOVQ 8(AX), BX + MOVQ BX, AX + TESTQ AX, AX + JNE l2 + MOVQ AX, 8(DX) + RET + +l2: + MOVQ $0x0000004c0ee3eef7, CX + SUBQ BX, CX + MOVQ CX, 8(DX) + RET diff --git a/ecc/bls12-39/internal/fptower/e2_bls39.go b/ecc/bls12-39/internal/fptower/e2_bls39.go new file mode 100644 index 0000000000..3449602080 --- /dev/null +++ b/ecc/bls12-39/internal/fptower/e2_bls39.go @@ -0,0 +1,109 @@ +// Copyright 2020 ConsenSys AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fptower + +import ( + "github.com/consensys/gnark-crypto/ecc/bls12-39/fp" +) + +// Mul sets z to the E2-product of x,y, returns z +func (z *E2) Mul(x, y *E2) *E2 { + var a, b, c fp.Element + a.Add(&x.A0, &x.A1) + b.Add(&y.A0, &y.A1) + a.Mul(&a, &b) + b.Mul(&x.A0, &y.A0) + c.Mul(&x.A1, &y.A1) + z.A1.Sub(&a, &b).Sub(&z.A1, &c) + fp.MulBy3(&c) + z.A0.Add(&b, &c) + return z +} + +// Square sets z to the E2-product of x,x returns z +func (z *E2) Square(x *E2) *E2 { + //algo 22 https://eprint.iacr.org/2010/354.pdf + var c0, c1, c2, c3 fp.Element + c0.Sub(&x.A0, &x.A1) + c1.Neg(&x.A1) + fp.MulBy3(&c1) + c3.Add(&x.A0, &c1) + c2.Mul(&x.A0, &x.A1) + c0.Mul(&c0, &c3). + Add(&c0, &c2) + z.A1.Double(&c2) + fp.MulBy3(&c2) + z.A0.Add(&c0, &c2) + + return z +} + +// MulByNonResidue multiplies a E2 by (1,1) +func (z *E2) MulByNonResidue(x *E2) *E2 { + var res E2 + res.A0.Set(&x.A1) + fp.MulBy3(&res.A0) + res.A0.Add(&x.A0, &res.A0) + res.A1.Add(&x.A0, &x.A1) + + z.Set(&res) + return z +} + +// Inverse sets z to the E2-inverse of x, returns z +func (z *E2) Inverse(x *E2) *E2 { + // Algorithm 8 from https://eprint.iacr.org/2010/354.pdf + //var a, b, t0, t1, tmp fp.Element + var t0, t1, tmp fp.Element + a := &x.A0 // creating the buffers a, b is faster than querying &x.A0, &x.A1 in the functions call below + b := &x.A1 + t0.Square(a) + t1.Square(b) + tmp.Set(&t1) + fp.MulBy3(&tmp) + t0.Sub(&t0, &tmp) + t1.Inverse(&t0) + z.A0.Mul(a, &t1) + z.A1.Mul(b, &t1).Neg(&z.A1) + + return z +} + +// norm sets x to the norm of z +func (z *E2) norm(x *fp.Element) { + var tmp fp.Element + x.Square(&z.A1) + tmp.Set(x) + fp.MulBy3(&tmp) + x.Square(&z.A0).Sub(x, &tmp) +} + +// MulByNonResidueInv multiplies a E2 by (1,1)^{-1} +func (z *E2) MulByNonResidueInv(x *E2) *E2 { + var a E2 + a.A0.SetString("163333666683") + a.A1.SetString("163333666684") + + z.Mul(x, &a) + return z +} + +// MulBybTwistCurveCoeff multiplies by 2/(1,1) +func (z *E2) MulBybTwistCurveCoeff(x *E2) *E2 { + + z.MulByNonResidueInv(x).Double(z) + + return z +} diff --git a/ecc/bls12-39/internal/fptower/e2_fallback.go b/ecc/bls12-39/internal/fptower/e2_fallback.go new file mode 100644 index 0000000000..0ce4d83334 --- /dev/null +++ b/ecc/bls12-39/internal/fptower/e2_fallback.go @@ -0,0 +1,40 @@ +//go:build !amd64 +// +build !amd64 + +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fptower + +func addE2(z, x, y *E2) { + z.A0.Add(&x.A0, &y.A0) + z.A1.Add(&x.A1, &y.A1) +} + +func subE2(z, x, y *E2) { + z.A0.Sub(&x.A0, &y.A0) + z.A1.Sub(&x.A1, &y.A1) +} + +func doubleE2(z, x *E2) { + z.A0.Double(&x.A0) + z.A1.Double(&x.A1) +} + +func negE2(z, x *E2) { + z.A0.Neg(&x.A0) + z.A1.Neg(&x.A1) +} diff --git a/ecc/bls12-39/internal/fptower/e2_test.go b/ecc/bls12-39/internal/fptower/e2_test.go new file mode 100644 index 0000000000..7df8f17454 --- /dev/null +++ b/ecc/bls12-39/internal/fptower/e2_test.go @@ -0,0 +1,538 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fptower + +import ( + "crypto/rand" + "testing" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fp" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/prop" +) + +// ------------------------------------------------------------ +// tests + +const ( + nbFuzzShort = 20 + nbFuzz = 100 +) + +func TestE2ReceiverIsOperand(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := GenE2() + genB := GenE2() + genfp := GenFp() + + properties.Property("[BLS12-39] Having the receiver as operand (addition) should output the same result", prop.ForAll( + func(a, b *E2) bool { + var c, d E2 + d.Set(a) + c.Add(a, b) + a.Add(a, b) + b.Add(&d, b) + return a.Equal(b) && a.Equal(&c) && b.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (sub) should output the same result", prop.ForAll( + func(a, b *E2) bool { + var c, d E2 + d.Set(a) + c.Sub(a, b) + a.Sub(a, b) + b.Sub(&d, b) + return a.Equal(b) && a.Equal(&c) && b.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (mul) should output the same result", prop.ForAll( + func(a, b *E2) bool { + var c, d E2 + d.Set(a) + c.Mul(a, b) + a.Mul(a, b) + b.Mul(&d, b) + return a.Equal(b) && a.Equal(&c) && b.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (square) should output the same result", prop.ForAll( + func(a *E2) bool { + var b E2 + b.Square(a) + a.Square(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (neg) should output the same result", prop.ForAll( + func(a *E2) bool { + var b E2 + b.Neg(a) + a.Neg(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (double) should output the same result", prop.ForAll( + func(a *E2) bool { + var b E2 + b.Double(a) + a.Double(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (mul by non residue) should output the same result", prop.ForAll( + func(a *E2) bool { + var b E2 + b.MulByNonResidue(a) + a.MulByNonResidue(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (mul by non residue inverse) should output the same result", prop.ForAll( + func(a *E2) bool { + var b E2 + b.MulByNonResidueInv(a) + a.MulByNonResidueInv(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (Inverse) should output the same result", prop.ForAll( + func(a *E2) bool { + var b E2 + b.Inverse(a) + a.Inverse(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (Conjugate) should output the same result", prop.ForAll( + func(a *E2) bool { + var b E2 + b.Conjugate(a) + a.Conjugate(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (mul by element) should output the same result", prop.ForAll( + func(a *E2, b fp.Element) bool { + var c E2 + c.MulByElement(a, &b) + a.MulByElement(a, &b) + return a.Equal(&c) + }, + genA, + genfp, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (Sqrt) should output the same result", prop.ForAll( + func(a *E2) bool { + var b, c, d, s E2 + + s.Square(a) + a.Set(&s) + b.Set(&s) + + a.Sqrt(a) + b.Sqrt(&b) + + c.Square(a) + d.Square(&b) + return c.Equal(&d) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + + if supportAdx { + t.Log("disabling ADX") + supportAdx = false + properties.TestingRun(t, gopter.ConsoleReporter(false)) + supportAdx = true + } +} + +func TestE2MulMaxed(t *testing.T) { + // let's pick a and b, with maxed A0 and A1 + var a, b E2 + fpMaxValue := fp.Element{ + 326667333367, + } + fpMaxValue[0]-- + + a.A0 = fpMaxValue + a.A1 = fpMaxValue + b.A0 = fpMaxValue + b.A1 = fpMaxValue + + var c, d E2 + d.Inverse(&b) + c.Set(&a) + c.Mul(&c, &b).Mul(&c, &d) + if !c.Equal(&a) { + t.Fatal("mul with max fp failed") + } +} + +func TestE2Ops(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := GenE2() + genB := GenE2() + genfp := GenFp() + + properties.Property("[BLS12-39] sub & add should leave an element invariant", prop.ForAll( + func(a, b *E2) bool { + var c E2 + c.Set(a) + c.Add(&c, b).Sub(&c, b) + return c.Equal(a) + }, + genA, + genB, + )) + + properties.Property("[BLS12-39] mul & inverse should leave an element invariant", prop.ForAll( + func(a, b *E2) bool { + var c, d E2 + d.Inverse(b) + c.Set(a) + c.Mul(&c, b).Mul(&c, &d) + return c.Equal(a) + }, + genA, + genB, + )) + + properties.Property("[BLS12-39] BatchInvert should output the same result as Inverse", prop.ForAll( + func(a, b, c *E2) bool { + + batch := BatchInvert([]E2{*a, *b, *c}) + a.Inverse(a) + b.Inverse(b) + c.Inverse(c) + return a.Equal(&batch[0]) && b.Equal(&batch[1]) && c.Equal(&batch[2]) + }, + genA, + genA, + genA, + )) + + properties.Property("[BLS12-39] inverse twice should leave an element invariant", prop.ForAll( + func(a *E2) bool { + var b E2 + b.Inverse(a).Inverse(&b) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] neg twice should leave an element invariant", prop.ForAll( + func(a *E2) bool { + var b E2 + b.Neg(a).Neg(&b) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] square and mul should output the same result", prop.ForAll( + func(a *E2) bool { + var b, c E2 + b.Mul(a, a) + c.Square(a) + return b.Equal(&c) + }, + genA, + )) + + properties.Property("[BLS12-39] MulByElement MulByElement inverse should leave an element invariant", prop.ForAll( + func(a *E2, b fp.Element) bool { + var c E2 + var d fp.Element + d.Inverse(&b) + c.MulByElement(a, &b).MulByElement(&c, &d) + return c.Equal(a) + }, + genA, + genfp, + )) + + properties.Property("[BLS12-39] Double and mul by 2 should output the same result", prop.ForAll( + func(a *E2) bool { + var b E2 + var c fp.Element + c.SetUint64(2) + b.Double(a) + a.MulByElement(a, &c) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Mulbynonres mulbynonresinv should leave the element invariant", prop.ForAll( + func(a *E2) bool { + var b E2 + b.MulByNonResidue(a).MulByNonResidueInv(&b) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] a + pi(a), a-pi(a) should be real", prop.ForAll( + func(a *E2) bool { + var b, c, d E2 + var e, f fp.Element + b.Conjugate(a) + c.Add(a, &b) + d.Sub(a, &b) + e.Double(&a.A0) + f.Double(&a.A1) + return c.A1.IsZero() && d.A0.IsZero() && e.Equal(&c.A0) && f.Equal(&d.A1) + }, + genA, + )) + + properties.Property("[BLS12-39] Legendre on square should output 1", prop.ForAll( + func(a *E2) bool { + var b E2 + b.Square(a) + c := b.Legendre() + return c == 1 + }, + genA, + )) + + properties.Property("[BLS12-39] square(sqrt) should leave an element invariant", prop.ForAll( + func(a *E2) bool { + var b, c, d, e E2 + b.Square(a) + c.Sqrt(&b) + d.Square(&c) + e.Neg(a) + return (c.Equal(a) || c.Equal(&e)) && d.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] neg(E2) == neg(E2.A0, E2.A1)", prop.ForAll( + func(a *E2) bool { + var b, c E2 + b.Neg(a) + c.A0.Neg(&a.A0) + c.A1.Neg(&a.A1) + return c.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Cmp and LexicographicallyLargest should be consistant", prop.ForAll( + func(a *E2) bool { + var negA E2 + negA.Neg(a) + cmpResult := a.Cmp(&negA) + lResult := a.LexicographicallyLargest() + if lResult && cmpResult == 1 { + return true + } + if !lResult && cmpResult != 1 { + return true + } + return false + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + + if supportAdx { + t.Log("disabling ADX") + supportAdx = false + properties.TestingRun(t, gopter.ConsoleReporter(false)) + supportAdx = true + } +} + +// ------------------------------------------------------------ +// benches + +func BenchmarkE2Add(b *testing.B) { + var a, c E2 + a.SetRandom() + c.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &c) + } +} + +func BenchmarkE2Sub(b *testing.B) { + var a, c E2 + a.SetRandom() + c.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Sub(&a, &c) + } +} + +func BenchmarkE2Mul(b *testing.B) { + var a, c E2 + a.SetRandom() + c.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Mul(&a, &c) + } +} + +func BenchmarkE2MulByElement(b *testing.B) { + var a E2 + var c fp.Element + c.SetRandom() + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.MulByElement(&a, &c) + } +} + +func BenchmarkE2Square(b *testing.B) { + var a E2 + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Square(&a) + } +} + +func BenchmarkE2Sqrt(b *testing.B) { + var a E2 + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Sqrt(&a) + } +} + +func BenchmarkE2Exp(b *testing.B) { + var x E2 + x.SetRandom() + b1, _ := rand.Int(rand.Reader, fp.Modulus()) + b.ResetTimer() + for i := 0; i < b.N; i++ { + x.Exp(x, b1) + } +} + +func BenchmarkE2Inverse(b *testing.B) { + var a E2 + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Inverse(&a) + } +} + +func BenchmarkE2MulNonRes(b *testing.B) { + var a E2 + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.MulByNonResidue(&a) + } +} + +func BenchmarkE2MulNonResInv(b *testing.B) { + var a E2 + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.MulByNonResidueInv(&a) + } +} + +func BenchmarkE2Conjugate(b *testing.B) { + var a E2 + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Conjugate(&a) + } +} + +func TestE2Div(t *testing.T) { + + parameters := gopter.DefaultTestParameters() + properties := gopter.NewProperties(parameters) + + genA := GenE2() + genB := GenE2() + + properties.Property("[BLS12-39] dividing then multiplying by the same element does nothing", prop.ForAll( + func(a, b *E2) bool { + var c E2 + c.Div(a, b) + c.Mul(&c, b) + return c.Equal(a) + }, + genA, + genB, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} diff --git a/ecc/bls12-39/internal/fptower/e6.go b/ecc/bls12-39/internal/fptower/e6.go new file mode 100644 index 0000000000..2ef96c129b --- /dev/null +++ b/ecc/bls12-39/internal/fptower/e6.go @@ -0,0 +1,280 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fptower + +// E6 is a degree three finite field extension of fp2 +type E6 struct { + B0, B1, B2 E2 +} + +// Equal returns true if z equals x, fasle otherwise +func (z *E6) Equal(x *E6) bool { + return z.B0.Equal(&x.B0) && z.B1.Equal(&x.B1) && z.B2.Equal(&x.B2) +} + +// SetString sets a E6 elmt from stringf +func (z *E6) SetString(s1, s2, s3, s4, s5, s6 string) *E6 { + z.B0.SetString(s1, s2) + z.B1.SetString(s3, s4) + z.B2.SetString(s5, s6) + return z +} + +// Set Sets a E6 elmt form another E6 elmt +func (z *E6) Set(x *E6) *E6 { + z.B0 = x.B0 + z.B1 = x.B1 + z.B2 = x.B2 + return z +} + +// SetOne sets z to 1 in Montgomery form and returns z +func (z *E6) SetOne() *E6 { + *z = E6{} + z.B0.A0.SetOne() + return z +} + +// SetRandom set z to a random elmt +func (z *E6) SetRandom() (*E6, error) { + if _, err := z.B0.SetRandom(); err != nil { + return nil, err + } + if _, err := z.B1.SetRandom(); err != nil { + return nil, err + } + if _, err := z.B2.SetRandom(); err != nil { + return nil, err + } + return z, nil +} + +// ToMont converts to Mont form +func (z *E6) ToMont() *E6 { + z.B0.ToMont() + z.B1.ToMont() + z.B2.ToMont() + return z +} + +// FromMont converts from Mont form +func (z *E6) FromMont() *E6 { + z.B0.FromMont() + z.B1.FromMont() + z.B2.FromMont() + return z +} + +// Add adds two elements of E6 +func (z *E6) Add(x, y *E6) *E6 { + z.B0.Add(&x.B0, &y.B0) + z.B1.Add(&x.B1, &y.B1) + z.B2.Add(&x.B2, &y.B2) + return z +} + +// Neg negates the E6 number +func (z *E6) Neg(x *E6) *E6 { + z.B0.Neg(&x.B0) + z.B1.Neg(&x.B1) + z.B2.Neg(&x.B2) + return z +} + +// Sub two elements of E6 +func (z *E6) Sub(x, y *E6) *E6 { + z.B0.Sub(&x.B0, &y.B0) + z.B1.Sub(&x.B1, &y.B1) + z.B2.Sub(&x.B2, &y.B2) + return z +} + +// Double doubles an element in E6 +func (z *E6) Double(x *E6) *E6 { + z.B0.Double(&x.B0) + z.B1.Double(&x.B1) + z.B2.Double(&x.B2) + return z +} + +// String puts E6 elmt in string form +func (z *E6) String() string { + return (z.B0.String() + "+(" + z.B1.String() + ")*v+(" + z.B2.String() + ")*v**2") +} + +// MulByNonResidue mul x by (0,1,0) +func (z *E6) MulByNonResidue(x *E6) *E6 { + z.B2, z.B1, z.B0 = x.B1, x.B0, x.B2 + z.B0.MulByNonResidue(&z.B0) + return z +} + +// MulByE2 multiplies an element in E6 by an element in E2 +func (z *E6) MulByE2(x *E6, y *E2) *E6 { + var yCopy E2 + yCopy.Set(y) + z.B0.Mul(&x.B0, &yCopy) + z.B1.Mul(&x.B1, &yCopy) + z.B2.Mul(&x.B2, &yCopy) + return z +} + +// MulBy01 multiplication by sparse element (c0,c1,0) +func (z *E6) MulBy01(c0, c1 *E2) *E6 { + + var a, b, tmp, t0, t1, t2 E2 + + a.Mul(&z.B0, c0) + b.Mul(&z.B1, c1) + + tmp.Add(&z.B1, &z.B2) + t0.Mul(c1, &tmp) + t0.Sub(&t0, &b) + t0.MulByNonResidue(&t0) + t0.Add(&t0, &a) + + tmp.Add(&z.B0, &z.B2) + t2.Mul(c0, &tmp) + t2.Sub(&t2, &a) + t2.Add(&t2, &b) + + t1.Add(c0, c1) + tmp.Add(&z.B0, &z.B1) + t1.Mul(&t1, &tmp) + t1.Sub(&t1, &a) + t1.Sub(&t1, &b) + + z.B0.Set(&t0) + z.B1.Set(&t1) + z.B2.Set(&t2) + + return z +} + +// MulBy1 multiplication of E6 by sparse element (0, c1, 0) +func (z *E6) MulBy1(c1 *E2) *E6 { + + var b, tmp, t0, t1 E2 + b.Mul(&z.B1, c1) + + tmp.Add(&z.B1, &z.B2) + t0.Mul(c1, &tmp) + t0.Sub(&t0, &b) + t0.MulByNonResidue(&t0) + + tmp.Add(&z.B0, &z.B1) + t1.Mul(c1, &tmp) + t1.Sub(&t1, &b) + + z.B0.Set(&t0) + z.B1.Set(&t1) + z.B2.Set(&b) + + return z +} + +// Mul sets z to the E6 product of x,y, returns z +func (z *E6) Mul(x, y *E6) *E6 { + // Algorithm 13 from https://eprint.iacr.org/2010/354.pdf + var t0, t1, t2, c0, c1, c2, tmp E2 + t0.Mul(&x.B0, &y.B0) + t1.Mul(&x.B1, &y.B1) + t2.Mul(&x.B2, &y.B2) + + c0.Add(&x.B1, &x.B2) + tmp.Add(&y.B1, &y.B2) + c0.Mul(&c0, &tmp).Sub(&c0, &t1).Sub(&c0, &t2).MulByNonResidue(&c0).Add(&c0, &t0) + + c1.Add(&x.B0, &x.B1) + tmp.Add(&y.B0, &y.B1) + c1.Mul(&c1, &tmp).Sub(&c1, &t0).Sub(&c1, &t1) + tmp.MulByNonResidue(&t2) + c1.Add(&c1, &tmp) + + tmp.Add(&x.B0, &x.B2) + c2.Add(&y.B0, &y.B2).Mul(&c2, &tmp).Sub(&c2, &t0).Sub(&c2, &t2).Add(&c2, &t1) + + z.B0.Set(&c0) + z.B1.Set(&c1) + z.B2.Set(&c2) + + return z +} + +// Square sets z to the E6 product of x,x, returns z +func (z *E6) Square(x *E6) *E6 { + + // Algorithm 16 from https://eprint.iacr.org/2010/354.pdf + var c4, c5, c1, c2, c3, c0 E2 + c4.Mul(&x.B0, &x.B1).Double(&c4) + c5.Square(&x.B2) + c1.MulByNonResidue(&c5).Add(&c1, &c4) + c2.Sub(&c4, &c5) + c3.Square(&x.B0) + c4.Sub(&x.B0, &x.B1).Add(&c4, &x.B2) + c5.Mul(&x.B1, &x.B2).Double(&c5) + c4.Square(&c4) + c0.MulByNonResidue(&c5).Add(&c0, &c3) + z.B2.Add(&c2, &c4).Add(&z.B2, &c5).Sub(&z.B2, &c3) + z.B0.Set(&c0) + z.B1.Set(&c1) + + return z +} + +// Inverse an element in E6 +func (z *E6) Inverse(x *E6) *E6 { + // Algorithm 17 from https://eprint.iacr.org/2010/354.pdf + // step 9 is wrong in the paper it's t1-t4 + var t0, t1, t2, t3, t4, t5, t6, c0, c1, c2, d1, d2 E2 + t0.Square(&x.B0) + t1.Square(&x.B1) + t2.Square(&x.B2) + t3.Mul(&x.B0, &x.B1) + t4.Mul(&x.B0, &x.B2) + t5.Mul(&x.B1, &x.B2) + c0.MulByNonResidue(&t5).Neg(&c0).Add(&c0, &t0) + c1.MulByNonResidue(&t2).Sub(&c1, &t3) + c2.Sub(&t1, &t4) + t6.Mul(&x.B0, &c0) + d1.Mul(&x.B2, &c1) + d2.Mul(&x.B1, &c2) + d1.Add(&d1, &d2).MulByNonResidue(&d1) + t6.Add(&t6, &d1) + t6.Inverse(&t6) + z.B0.Mul(&c0, &t6) + z.B1.Mul(&c1, &t6) + z.B2.Mul(&c2, &t6) + + return z +} + +func (z *E6) Select(cond int, caseZ *E6, caseNz *E6) *E6 { + //Might be able to save a nanosecond or two by an aggregate implementation + + z.B0.Select(cond, &caseZ.B0, &caseNz.B0) + z.B1.Select(cond, &caseZ.B1, &caseNz.B1) + z.B2.Select(cond, &caseZ.B2, &caseNz.B2) + + return z +} + +func (z *E6) Div(x *E6, y *E6) *E6 { + var r E6 + r.Inverse(y).Mul(x, &r) + return z.Set(&r) +} diff --git a/ecc/bls12-39/internal/fptower/e6_test.go b/ecc/bls12-39/internal/fptower/e6_test.go new file mode 100644 index 0000000000..27affcf50b --- /dev/null +++ b/ecc/bls12-39/internal/fptower/e6_test.go @@ -0,0 +1,349 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package fptower + +import ( + "testing" + + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/prop" +) + +// ------------------------------------------------------------ +// tests + +func TestE6ReceiverIsOperand(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := GenE6() + genB := GenE6() + genE2 := GenE2() + + properties.Property("[BLS12-39] Having the receiver as operand (addition) should output the same result", prop.ForAll( + func(a, b *E6) bool { + var c, d E6 + d.Set(a) + c.Add(a, b) + a.Add(a, b) + b.Add(&d, b) + return a.Equal(b) && a.Equal(&c) && b.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (sub) should output the same result", prop.ForAll( + func(a, b *E6) bool { + var c, d E6 + d.Set(a) + c.Sub(a, b) + a.Sub(a, b) + b.Sub(&d, b) + return a.Equal(b) && a.Equal(&c) && b.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (mul) should output the same result", prop.ForAll( + func(a, b *E6) bool { + var c, d E6 + d.Set(a) + c.Mul(a, b) + a.Mul(a, b) + b.Mul(&d, b) + return a.Equal(b) && a.Equal(&c) && b.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (square) should output the same result", prop.ForAll( + func(a *E6) bool { + var b E6 + b.Square(a) + a.Square(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (neg) should output the same result", prop.ForAll( + func(a *E6) bool { + var b E6 + b.Neg(a) + a.Neg(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (double) should output the same result", prop.ForAll( + func(a *E6) bool { + var b E6 + b.Double(a) + a.Double(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (mul by non residue) should output the same result", prop.ForAll( + func(a *E6) bool { + var b E6 + b.MulByNonResidue(a) + a.MulByNonResidue(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (Inverse) should output the same result", prop.ForAll( + func(a *E6) bool { + var b E6 + b.Inverse(a) + a.Inverse(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Having the receiver as operand (mul by E2) should output the same result", prop.ForAll( + func(a *E6, b *E2) bool { + var c E6 + c.MulByE2(a, b) + a.MulByE2(a, b) + return a.Equal(&c) + }, + genA, + genE2, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestE6Ops(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := GenE6() + genB := GenE6() + genE2 := GenE2() + + properties.Property("[BLS12-39] sub & add should leave an element invariant", prop.ForAll( + func(a, b *E6) bool { + var c E6 + c.Set(a) + c.Add(&c, b).Sub(&c, b) + return c.Equal(a) + }, + genA, + genB, + )) + + properties.Property("[BLS12-39] mul & inverse should leave an element invariant", prop.ForAll( + func(a, b *E6) bool { + var c, d E6 + d.Inverse(b) + c.Set(a) + c.Mul(&c, b).Mul(&c, &d) + return c.Equal(a) + }, + genA, + genB, + )) + + properties.Property("[BLS12-39] inverse twice should leave an element invariant", prop.ForAll( + func(a *E6) bool { + var b E6 + b.Inverse(a).Inverse(&b) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] neg twice should leave an element invariant", prop.ForAll( + func(a *E6) bool { + var b E6 + b.Neg(a).Neg(&b) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] square and mul should output the same result", prop.ForAll( + func(a *E6) bool { + var b, c E6 + b.Mul(a, a) + c.Square(a) + return b.Equal(&c) + }, + genA, + )) + + properties.Property("[BLS12-39] Double and add twice should output the same result", prop.ForAll( + func(a *E6) bool { + var b E6 + b.Add(a, a) + a.Double(a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Mul by non residue should be the same as multiplying by (0,1,0)", prop.ForAll( + func(a *E6) bool { + var b, c E6 + b.B1.A0.SetOne() + c.Mul(a, &b) + a.MulByNonResidue(a) + return a.Equal(&c) + }, + genA, + )) + + properties.Property("[BLS12-39] MulByE2 MulByE2 inverse should leave an element invariant", prop.ForAll( + func(a *E6, b *E2) bool { + var c E6 + var d E2 + d.Inverse(b) + c.MulByE2(a, b).MulByE2(&c, &d) + return c.Equal(a) + }, + genA, + genE2, + )) + + properties.Property("[BLS12-39] Mul and MulBy01 should output the same result", prop.ForAll( + func(a *E6, c0, c1 *E2) bool { + var b E6 + b.B0.Set(c0) + b.B1.Set(c1) + b.Mul(&b, a) + a.MulBy01(c0, c1) + return b.Equal(a) + }, + genA, + genE2, + genE2, + )) + + properties.Property("[BLS12-39] Mul and MulBy1 should output the same result", prop.ForAll( + func(a *E6, c1 *E2) bool { + var b E6 + b.B1.Set(c1) + b.Mul(&b, a) + a.MulBy1(c1) + return b.Equal(a) + }, + genA, + genE2, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +// ------------------------------------------------------------ +// benches + +func BenchmarkE6Add(b *testing.B) { + var a, c E6 + a.SetRandom() + c.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Add(&a, &c) + } +} + +func BenchmarkE6Sub(b *testing.B) { + var a, c E6 + a.SetRandom() + c.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Sub(&a, &c) + } +} + +func BenchmarkE6Mul(b *testing.B) { + var a, c E6 + a.SetRandom() + c.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Mul(&a, &c) + } +} + +func BenchmarkE6Square(b *testing.B) { + var a E6 + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Square(&a) + } +} + +func BenchmarkE6Inverse(b *testing.B) { + var a E6 + a.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + a.Inverse(&a) + } +} + +func TestE6Div(t *testing.T) { + + parameters := gopter.DefaultTestParameters() + properties := gopter.NewProperties(parameters) + + genA := GenE6() + genB := GenE6() + + properties.Property("[BLS12-39] dividing then multiplying by the same element does nothing", prop.ForAll( + func(a, b *E6) bool { + var c E6 + c.Div(a, b) + c.Mul(&c, b) + return c.Equal(a) + }, + genA, + genB, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} diff --git a/ecc/bls12-39/internal/fptower/frobenius.go b/ecc/bls12-39/internal/fptower/frobenius.go new file mode 100644 index 0000000000..767f9fde4a --- /dev/null +++ b/ecc/bls12-39/internal/fptower/frobenius.go @@ -0,0 +1,247 @@ +// Copyright 2020 ConsenSys AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fptower + +import ( + "github.com/consensys/gnark-crypto/ecc/bls12-39/fp" +) + +// Frobenius set z to Frobenius(x), return z +func (z *E12) Frobenius(x *E12) *E12 { + // Algorithm 28 from https://eprint.iacr.org/2010/354.pdf (beware typos!) + var t [6]E2 + + // Frobenius acts on fp2 by conjugation + t[0].Conjugate(&x.C0.B0) + t[1].Conjugate(&x.C0.B1) + t[2].Conjugate(&x.C0.B2) + t[3].Conjugate(&x.C1.B0) + t[4].Conjugate(&x.C1.B1) + t[5].Conjugate(&x.C1.B2) + + t[1].MulByNonResidue1Power2(&t[1]) + t[2].MulByNonResidue1Power4(&t[2]) + t[3].MulByNonResidue1Power1(&t[3]) + t[4].MulByNonResidue1Power3(&t[4]) + t[5].MulByNonResidue1Power5(&t[5]) + + z.C0.B0 = t[0] + z.C0.B1 = t[1] + z.C0.B2 = t[2] + z.C1.B0 = t[3] + z.C1.B1 = t[4] + z.C1.B2 = t[5] + + return z +} + +// FrobeniusSquare set z to Frobenius^2(x), and return z +func (z *E12) FrobeniusSquare(x *E12) *E12 { + // Algorithm 29 from https://eprint.iacr.org/2010/354.pdf (beware typos!) + var t [6]E2 + + t[1].MulByNonResidue2Power2(&x.C0.B1) + t[2].MulByNonResidue2Power4(&x.C0.B2) + t[3].MulByNonResidue2Power1(&x.C1.B0) + t[4].MulByNonResidue2Power3(&x.C1.B1) + t[5].MulByNonResidue2Power5(&x.C1.B2) + + z.C0.B0 = x.C0.B0 + z.C0.B1 = t[1] + z.C0.B2 = t[2] + z.C1.B0 = t[3] + z.C1.B1 = t[4] + z.C1.B2 = t[5] + + return z +} + +// FrobeniusCube set z to Frobenius^3(x), return z +func (z *E12) FrobeniusCube(x *E12) *E12 { + // Algorithm 30 from https://eprint.iacr.org/2010/354.pdf (beware typos!) + var t [6]E2 + + // Frobenius^3 acts on fp2 by conjugation + t[0].Conjugate(&x.C0.B0) + t[1].Conjugate(&x.C0.B1) + t[2].Conjugate(&x.C0.B2) + t[3].Conjugate(&x.C1.B0) + t[4].Conjugate(&x.C1.B1) + t[5].Conjugate(&x.C1.B2) + + t[1].MulByNonResidue3Power2(&t[1]) + t[2].MulByNonResidue3Power4(&t[2]) + t[3].MulByNonResidue3Power1(&t[3]) + t[4].MulByNonResidue3Power3(&t[4]) + t[5].MulByNonResidue3Power5(&t[5]) + + z.C0.B0 = t[0] + z.C0.B1 = t[1] + z.C0.B2 = t[2] + z.C1.B0 = t[3] + z.C1.B1 = t[4] + z.C1.B2 = t[5] + + return z +} + +// MulByNonResidue1Power1 set z=x*(1,1)^(1*(p^1-1)/6) and return z +func (z *E2) MulByNonResidue1Power1(x *E2) *E2 { + // 171574396630*u + 199624070845 + var b E2 + b.A0 = fp.Element{186886073064} + b.A1 = fp.Element{75254894234} + z.Mul(x, &b) + return z +} + +// MulByNonResidue1Power2 set z=x*(1,1)^(2*(p^1-1)/6) and return z +func (z *E2) MulByNonResidue1Power2(x *E2) *E2 { + // 182009477101*u + 159768345029 + var b E2 + b.A0 = fp.Element{56732333561} + b.A1 = fp.Element{75375024703} + z.Mul(x, &b) + return z +} + +// MulByNonResidue1Power3 set z=x*(1,1)^(3*(p^1-1)/6) and return z +func (z *E2) MulByNonResidue1Power3(x *E2) *E2 { + // 228828781692*u + 293515655025 + var b E2 + b.A0 = fp.Element{320901648360} + b.A1 = fp.Element{219700117247} + z.Mul(x, &b) + return z +} + +// MulByNonResidue1Power4 set z=x*(1,1)^(4*(p^1-1)/6) and return z +func (z *E2) MulByNonResidue1Power4(x *E2) *E2 { + // 66088819674*u + 34127110868 + var b E2 + b.A0 = fp.Element{228884275100} + b.A1 = fp.Element{278145123361} + z.Mul(x, &b) + return z +} + +// MulByNonResidue1Power5 set z=x*(1,1)^(5*(p^1-1)/6) and return z +func (z *E2) MulByNonResidue1Power5(x *E2) *E2 { + // 214835958777*u + 36448585468 + var b E2 + b.A0 = fp.Element{195402511428} + b.A1 = fp.Element{227508494027} + z.Mul(x, &b) + return z +} + +// MulByNonResidue2Power1 set z=x*(1,1)^(1*(p^2-1)/6) and return z +func (z *E2) MulByNonResidue2Power1(x *E2) *E2 { + // 9702999902 + b := fp.Element{93717443168} + z.A0.Mul(&x.A0, &b) + z.A1.Mul(&x.A1, &b) + return z +} + +// MulByNonResidue2Power2 set z=x*(1,1)^(2*(p^2-1)/6) and return z +func (z *E2) MulByNonResidue2Power2(x *E2) *E2 { + // 9702999901 + b := fp.Element{268249031722} + z.A0.Mul(&x.A0, &b) + z.A1.Mul(&x.A1, &b) + + return z +} + +// MulByNonResidue2Power3 set z=x*(1,1)^(3*(p^2-1)/6) and return z +func (z *E2) MulByNonResidue2Power3(x *E2) *E2 { + // 326667333366 + b := fp.Element{174531588554} + z.A0.Mul(&x.A0, &b) + z.A1.Mul(&x.A1, &b) + + return z +} + +// MulByNonResidue2Power4 set z=x*(1,1)^(4*(p^2-1)/6) and return z +func (z *E2) MulByNonResidue2Power4(x *E2) *E2 { + // 316964333465 + b := fp.Element{232949890199} + z.A0.Mul(&x.A0, &b) + z.A1.Mul(&x.A1, &b) + + return z +} + +// MulByNonResidue2Power5 set z=x*(1,1)^(5*(p^2-1)/6) and return z +func (z *E2) MulByNonResidue2Power5(x *E2) *E2 { + // 316964333466 + b := fp.Element{58418301645} + z.A0.Mul(&x.A0, &b) + z.A1.Mul(&x.A1, &b) + + return z +} + +// MulByNonResidue3Power1 set z=x*(1,1)^(1*(p^3-1)/6) and return z +func (z *E2) MulByNonResidue3Power1(x *E2) *E2 { + // 32121226975*u + 263694451378 + var b E2 + b.A0.SetString("263694451378") + b.A1.SetString("32121226975") + z.Mul(x, &b) + return z +} + +// MulByNonResidue3Power2 set z=x*(1,1)^(2*(p^3-1)/6) and return z +func (z *E2) MulByNonResidue3Power2(x *E2) *E2 { + // 305020439417*u + 106752335729 + var b E2 + b.A0.SetString("106752335729") + b.A1.SetString("305020439417") + z.Mul(x, &b) + return z +} + +// MulByNonResidue3Power3 set z=x*(1,1)^(3*(p^3-1)/6) and return z +func (z *E2) MulByNonResidue3Power3(x *E2) *E2 { + // 97838551675*u + 33151678342 + var b E2 + b.A0.SetString("33151678342") + b.A1.SetString("97838551675") + z.Mul(x, &b) + return z +} + +// MulByNonResidue3Power4 set z=x*(1,1)^(4*(p^3-1)/6) and return z +func (z *E2) MulByNonResidue3Power4(x *E2) *E2 { + // 63458547829*u + 178103343759 + var b E2 + b.A0.SetString("178103343759") + b.A1.SetString("63458547829") + z.Mul(x, &b) + return z +} + +// MulByNonResidue3Power5 set z=x*(1,1)^(5*(p^3-1)/6) and return z +func (z *E2) MulByNonResidue3Power5(x *E2) *E2 { + // 325397761406*u + 297085250314 + var b E2 + b.A0.SetString("297085250314") + b.A1.SetString("325397761406") + z.Mul(x, &b) + return z +} diff --git a/ecc/bls12-39/internal/fptower/generators_test.go b/ecc/bls12-39/internal/fptower/generators_test.go new file mode 100644 index 0000000000..54fdce7131 --- /dev/null +++ b/ecc/bls12-39/internal/fptower/generators_test.go @@ -0,0 +1,51 @@ +package fptower + +import ( + "crypto/rand" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fp" + "github.com/leanovate/gopter" +) + +// Fp generates an Fp element +func GenFp() gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + var elmt fp.Element + var b [fp.Bytes]byte + rand.Read(b[:]) + elmt.SetBytes(b[:]) + genResult := gopter.NewGenResult(elmt, gopter.NoShrinker) + return genResult + } +} + +// E2 generates an E2 elmt +func GenE2() gopter.Gen { + return gopter.CombineGens( + GenFp(), + GenFp(), + ).Map(func(values []interface{}) *E2 { + return &E2{A0: values[0].(fp.Element), A1: values[1].(fp.Element)} + }) +} + +// E6 generates an E6 elmt +func GenE6() gopter.Gen { + return gopter.CombineGens( + GenE2(), + GenE2(), + GenE2(), + ).Map(func(values []interface{}) *E6 { + return &E6{B0: *values[0].(*E2), B1: *values[1].(*E2), B2: *values[2].(*E2)} + }) +} + +// E12 generates an E6 elmt +func GenE12() gopter.Gen { + return gopter.CombineGens( + GenE6(), + GenE6(), + ).Map(func(values []interface{}) *E12 { + return &E12{C0: *values[0].(*E6), C1: *values[1].(*E6)} + }) +} diff --git a/ecc/bls12-39/marshal.go b/ecc/bls12-39/marshal.go new file mode 100644 index 0000000000..e39a3c4691 --- /dev/null +++ b/ecc/bls12-39/marshal.go @@ -0,0 +1,1115 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package bls1239 + +import ( + "encoding/binary" + "errors" + "io" + "reflect" + "sync/atomic" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fp" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/consensys/gnark-crypto/ecc/bls12-39/internal/fptower" + "github.com/consensys/gnark-crypto/internal/parallel" +) + +// To encode G1Affine and G2Affine points, we mask the most significant bits with these bits to specify without ambiguity +// metadata needed for point (de)compression +// we follow the BLS12-381 style encoding as specified in ZCash and now IETF +// The most significant bit, when set, indicates that the point is in compressed form. Otherwise, the point is in uncompressed form. +// The second-most significant bit indicates that the point is at infinity. If this bit is set, the remaining bits of the group element's encoding should be set to zero. +// The third-most significant bit is set if (and only if) this point is in compressed form and it is not the point at infinity and its y-coordinate is the lexicographically largest of the two associated with the encoded x-coordinate. +const ( + mMask byte = 0b111 << 5 + mUncompressed byte = 0b000 << 5 + mUncompressedInfinity byte = 0b010 << 5 + mCompressedSmallest byte = 0b100 << 5 + mCompressedLargest byte = 0b101 << 5 + mCompressedInfinity byte = 0b110 << 5 +) + +// SizeOfGT represents the size in bytes that a GT element need in binary form +const SizeOfGT = fptower.SizeOfGT + +// Encoder writes bls12-39 object values to an output stream +type Encoder struct { + w io.Writer + n int64 // written bytes + raw bool // raw vs compressed encoding +} + +// Decoder reads bls12-39 object values from an inbound stream +type Decoder struct { + r io.Reader + n int64 // read bytes + subGroupCheck bool // default to true +} + +// NewDecoder returns a binary decoder supporting curve bls12-39 objects in both +// compressed and uncompressed (raw) forms +func NewDecoder(r io.Reader, options ...func(*Decoder)) *Decoder { + d := &Decoder{r: r, subGroupCheck: true} + + for _, o := range options { + o(d) + } + + return d +} + +// Decode reads the binary encoding of v from the stream +// type must be *uint64, *fr.Element, *fp.Element, *G1Affine, *G2Affine, *[]G1Affine or *[]G2Affine +func (dec *Decoder) Decode(v interface{}) (err error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() || !rv.Elem().CanSet() { + return errors.New("bls12-39 decoder: unsupported type, need pointer") + } + + // implementation note: code is a bit verbose (abusing code generation), but minimize allocations on the heap + // in particular, careful attention must be given to usage of Bytes() method on Elements and Points + // that return an array (not a slice) of bytes. Using this is beneficial to minimize memallocs + // in very large (de)serialization upstream in gnark. + // (but detrimental to code lisibility here) + // TODO double check memory usage and factorize this + + var buf [SizeOfG2AffineUncompressed]byte + var read int + + switch t := v.(type) { + case *fr.Element: + read, err = io.ReadFull(dec.r, buf[:fr.Bytes]) + dec.n += int64(read) + if err != nil { + return + } + t.SetBytes(buf[:fr.Bytes]) + return + case *fp.Element: + read, err = io.ReadFull(dec.r, buf[:fp.Bytes]) + dec.n += int64(read) + if err != nil { + return + } + t.SetBytes(buf[:fp.Bytes]) + return + case *[]fr.Element: + var sliceLen uint32 + sliceLen, err = dec.readUint32() + if err != nil { + return + } + if len(*t) != int(sliceLen) { + *t = make([]fr.Element, sliceLen) + } + + for i := 0; i < len(*t); i++ { + read, err = io.ReadFull(dec.r, buf[:fr.Bytes]) + dec.n += int64(read) + if err != nil { + return + } + (*t)[i].SetBytes(buf[:fr.Bytes]) + } + return + case *[]fp.Element: + var sliceLen uint32 + sliceLen, err = dec.readUint32() + if err != nil { + return + } + if len(*t) != int(sliceLen) { + *t = make([]fp.Element, sliceLen) + } + + for i := 0; i < len(*t); i++ { + read, err = io.ReadFull(dec.r, buf[:fp.Bytes]) + dec.n += int64(read) + if err != nil { + return + } + (*t)[i].SetBytes(buf[:fp.Bytes]) + } + return + case *G1Affine: + // we start by reading compressed point size, if metadata tells us it is uncompressed, we read more. + read, err = io.ReadFull(dec.r, buf[:SizeOfG1AffineCompressed]) + dec.n += int64(read) + if err != nil { + return + } + nbBytes := SizeOfG1AffineCompressed + // most significant byte contains metadata + if !isCompressed(buf[0]) { + nbBytes = SizeOfG1AffineUncompressed + // we read more. + read, err = io.ReadFull(dec.r, buf[SizeOfG1AffineCompressed:SizeOfG1AffineUncompressed]) + dec.n += int64(read) + if err != nil { + return + } + } + _, err = t.setBytes(buf[:nbBytes], dec.subGroupCheck) + return + case *G2Affine: + // we start by reading compressed point size, if metadata tells us it is uncompressed, we read more. + read, err = io.ReadFull(dec.r, buf[:SizeOfG2AffineCompressed]) + dec.n += int64(read) + if err != nil { + return + } + nbBytes := SizeOfG2AffineCompressed + // most significant byte contains metadata + if !isCompressed(buf[0]) { + nbBytes = SizeOfG2AffineUncompressed + // we read more. + read, err = io.ReadFull(dec.r, buf[SizeOfG2AffineCompressed:SizeOfG2AffineUncompressed]) + dec.n += int64(read) + if err != nil { + return + } + } + _, err = t.setBytes(buf[:nbBytes], dec.subGroupCheck) + return + case *[]G1Affine: + var sliceLen uint32 + sliceLen, err = dec.readUint32() + if err != nil { + return + } + if len(*t) != int(sliceLen) { + *t = make([]G1Affine, sliceLen) + } + compressed := make([]bool, sliceLen) + for i := 0; i < len(*t); i++ { + + // we start by reading compressed point size, if metadata tells us it is uncompressed, we read more. + read, err = io.ReadFull(dec.r, buf[:SizeOfG1AffineCompressed]) + dec.n += int64(read) + if err != nil { + return + } + nbBytes := SizeOfG1AffineCompressed + // most significant byte contains metadata + if !isCompressed(buf[0]) { + nbBytes = SizeOfG1AffineUncompressed + // we read more. + read, err = io.ReadFull(dec.r, buf[SizeOfG1AffineCompressed:SizeOfG1AffineUncompressed]) + dec.n += int64(read) + if err != nil { + return + } + _, err = (*t)[i].setBytes(buf[:nbBytes], false) + if err != nil { + return + } + } else { + compressed[i] = !((*t)[i].unsafeSetCompressedBytes(buf[:nbBytes])) + } + } + var nbErrs uint64 + parallel.Execute(len(compressed), func(start, end int) { + for i := start; i < end; i++ { + if compressed[i] { + if err := (*t)[i].unsafeComputeY(dec.subGroupCheck); err != nil { + atomic.AddUint64(&nbErrs, 1) + } + } else if dec.subGroupCheck { + if !(*t)[i].IsInSubGroup() { + atomic.AddUint64(&nbErrs, 1) + } + } + } + }) + if nbErrs != 0 { + return errors.New("point decompression failed") + } + + return nil + case *[]G2Affine: + var sliceLen uint32 + sliceLen, err = dec.readUint32() + if err != nil { + return + } + if len(*t) != int(sliceLen) { + *t = make([]G2Affine, sliceLen) + } + compressed := make([]bool, sliceLen) + for i := 0; i < len(*t); i++ { + + // we start by reading compressed point size, if metadata tells us it is uncompressed, we read more. + read, err = io.ReadFull(dec.r, buf[:SizeOfG2AffineCompressed]) + dec.n += int64(read) + if err != nil { + return + } + nbBytes := SizeOfG2AffineCompressed + // most significant byte contains metadata + if !isCompressed(buf[0]) { + nbBytes = SizeOfG2AffineUncompressed + // we read more. + read, err = io.ReadFull(dec.r, buf[SizeOfG2AffineCompressed:SizeOfG2AffineUncompressed]) + dec.n += int64(read) + if err != nil { + return + } + _, err = (*t)[i].setBytes(buf[:nbBytes], false) + if err != nil { + return + } + } else { + compressed[i] = !((*t)[i].unsafeSetCompressedBytes(buf[:nbBytes])) + } + } + var nbErrs uint64 + parallel.Execute(len(compressed), func(start, end int) { + for i := start; i < end; i++ { + if compressed[i] { + if err := (*t)[i].unsafeComputeY(dec.subGroupCheck); err != nil { + atomic.AddUint64(&nbErrs, 1) + } + } else if dec.subGroupCheck { + if !(*t)[i].IsInSubGroup() { + atomic.AddUint64(&nbErrs, 1) + } + } + } + }) + if nbErrs != 0 { + return errors.New("point decompression failed") + } + + return nil + default: + n := binary.Size(t) + if n == -1 { + return errors.New("bls12-39 encoder: unsupported type") + } + err = binary.Read(dec.r, binary.BigEndian, t) + if err == nil { + dec.n += int64(n) + } + return + } +} + +// BytesRead return total bytes read from reader +func (dec *Decoder) BytesRead() int64 { + return dec.n +} + +func (dec *Decoder) readUint32() (r uint32, err error) { + var read int + var buf [4]byte + read, err = io.ReadFull(dec.r, buf[:4]) + dec.n += int64(read) + if err != nil { + return + } + r = binary.BigEndian.Uint32(buf[:4]) + return +} + +func isCompressed(msb byte) bool { + mData := msb & mMask + return !((mData == mUncompressed) || (mData == mUncompressedInfinity)) +} + +// NewEncoder returns a binary encoder supporting curve bls12-39 objects +func NewEncoder(w io.Writer, options ...func(*Encoder)) *Encoder { + // default settings + enc := &Encoder{ + w: w, + n: 0, + raw: false, + } + + // handle options + for _, option := range options { + option(enc) + } + + return enc +} + +// Encode writes the binary encoding of v to the stream +// type must be uint64, *fr.Element, *fp.Element, *G1Affine, *G2Affine, []G1Affine or []G2Affine +func (enc *Encoder) Encode(v interface{}) (err error) { + if enc.raw { + return enc.encodeRaw(v) + } + return enc.encode(v) +} + +// BytesWritten return total bytes written on writer +func (enc *Encoder) BytesWritten() int64 { + return enc.n +} + +// RawEncoding returns an option to use in NewEncoder(...) which sets raw encoding mode to true +// points will not be compressed using this option +func RawEncoding() func(*Encoder) { + return func(enc *Encoder) { + enc.raw = true + } +} + +// NoSubgroupChecks returns an option to use in NewDecoder(...) which disable subgroup checks on the points +// the decoder will read. Use with caution, as crafted points from an untrusted source can lead to crypto-attacks. +func NoSubgroupChecks() func(*Decoder) { + return func(dec *Decoder) { + dec.subGroupCheck = false + } +} + +func (enc *Encoder) encode(v interface{}) (err error) { + + // implementation note: code is a bit verbose (abusing code generation), but minimize allocations on the heap + // TODO double check memory usage and factorize this + + var written int + switch t := v.(type) { + case *fr.Element: + buf := t.Bytes() + written, err = enc.w.Write(buf[:]) + enc.n += int64(written) + return + case *fp.Element: + buf := t.Bytes() + written, err = enc.w.Write(buf[:]) + enc.n += int64(written) + return + case *G1Affine: + buf := t.Bytes() + written, err = enc.w.Write(buf[:]) + enc.n += int64(written) + return + case *G2Affine: + buf := t.Bytes() + written, err = enc.w.Write(buf[:]) + enc.n += int64(written) + return + case []fr.Element: + // write slice length + err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))) + if err != nil { + return + } + enc.n += 4 + var buf [fr.Bytes]byte + for i := 0; i < len(t); i++ { + buf = t[i].Bytes() + written, err = enc.w.Write(buf[:]) + enc.n += int64(written) + if err != nil { + return + } + } + return nil + case []fp.Element: + // write slice length + err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))) + if err != nil { + return + } + enc.n += 4 + var buf [fp.Bytes]byte + for i := 0; i < len(t); i++ { + buf = t[i].Bytes() + written, err = enc.w.Write(buf[:]) + enc.n += int64(written) + if err != nil { + return + } + } + return nil + + case []G1Affine: + // write slice length + err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))) + if err != nil { + return + } + enc.n += 4 + + var buf [SizeOfG1AffineCompressed]byte + + for i := 0; i < len(t); i++ { + buf = t[i].Bytes() + written, err = enc.w.Write(buf[:]) + enc.n += int64(written) + if err != nil { + return + } + } + return nil + case []G2Affine: + // write slice length + err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))) + if err != nil { + return + } + enc.n += 4 + + var buf [SizeOfG2AffineCompressed]byte + + for i := 0; i < len(t); i++ { + buf = t[i].Bytes() + written, err = enc.w.Write(buf[:]) + enc.n += int64(written) + if err != nil { + return + } + } + return nil + default: + n := binary.Size(t) + if n == -1 { + return errors.New(" encoder: unsupported type") + } + err = binary.Write(enc.w, binary.BigEndian, t) + enc.n += int64(n) + return + } +} + +func (enc *Encoder) encodeRaw(v interface{}) (err error) { + + // implementation note: code is a bit verbose (abusing code generation), but minimize allocations on the heap + // TODO double check memory usage and factorize this + + var written int + switch t := v.(type) { + case *fr.Element: + buf := t.Bytes() + written, err = enc.w.Write(buf[:]) + enc.n += int64(written) + return + case *fp.Element: + buf := t.Bytes() + written, err = enc.w.Write(buf[:]) + enc.n += int64(written) + return + case *G1Affine: + buf := t.RawBytes() + written, err = enc.w.Write(buf[:]) + enc.n += int64(written) + return + case *G2Affine: + buf := t.RawBytes() + written, err = enc.w.Write(buf[:]) + enc.n += int64(written) + return + case []fr.Element: + // write slice length + err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))) + if err != nil { + return + } + enc.n += 4 + var buf [fr.Bytes]byte + for i := 0; i < len(t); i++ { + buf = t[i].Bytes() + written, err = enc.w.Write(buf[:]) + enc.n += int64(written) + if err != nil { + return + } + } + return nil + case []fp.Element: + // write slice length + err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))) + if err != nil { + return + } + enc.n += 4 + var buf [fp.Bytes]byte + for i := 0; i < len(t); i++ { + buf = t[i].Bytes() + written, err = enc.w.Write(buf[:]) + enc.n += int64(written) + if err != nil { + return + } + } + return nil + + case []G1Affine: + // write slice length + err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))) + if err != nil { + return + } + enc.n += 4 + + var buf [SizeOfG1AffineUncompressed]byte + + for i := 0; i < len(t); i++ { + buf = t[i].RawBytes() + written, err = enc.w.Write(buf[:]) + enc.n += int64(written) + if err != nil { + return + } + } + return nil + case []G2Affine: + // write slice length + err = binary.Write(enc.w, binary.BigEndian, uint32(len(t))) + if err != nil { + return + } + enc.n += 4 + + var buf [SizeOfG2AffineUncompressed]byte + + for i := 0; i < len(t); i++ { + buf = t[i].RawBytes() + written, err = enc.w.Write(buf[:]) + enc.n += int64(written) + if err != nil { + return + } + } + return nil + default: + n := binary.Size(t) + if n == -1 { + return errors.New(" encoder: unsupported type") + } + err = binary.Write(enc.w, binary.BigEndian, t) + enc.n += int64(n) + return + } +} + +// SizeOfG1AffineCompressed represents the size in bytes that a G1Affine need in binary form, compressed +const SizeOfG1AffineCompressed = 8 + +// SizeOfG1AffineUncompressed represents the size in bytes that a G1Affine need in binary form, uncompressed +const SizeOfG1AffineUncompressed = SizeOfG1AffineCompressed * 2 + +// Marshal converts p to a byte slice (without point compression) +func (p *G1Affine) Marshal() []byte { + b := p.RawBytes() + return b[:] +} + +// Unmarshal is an allias to SetBytes() +func (p *G1Affine) Unmarshal(buf []byte) error { + _, err := p.SetBytes(buf) + return err +} + +// Bytes returns binary representation of p +// will store X coordinate in regular form and a parity bit +// we follow the BLS12-381 style encoding as specified in ZCash and now IETF +// The most significant bit, when set, indicates that the point is in compressed form. Otherwise, the point is in uncompressed form. +// The second-most significant bit indicates that the point is at infinity. If this bit is set, the remaining bits of the group element's encoding should be set to zero. +// The third-most significant bit is set if (and only if) this point is in compressed form and it is not the point at infinity and its y-coordinate is the lexicographically largest of the two associated with the encoded x-coordinate. +func (p *G1Affine) Bytes() (res [SizeOfG1AffineCompressed]byte) { + + // check if p is infinity point + if p.X.IsZero() && p.Y.IsZero() { + res[0] = mCompressedInfinity + return + } + + // tmp is used to convert from montgomery representation to regular + var tmp fp.Element + + msbMask := mCompressedSmallest + // compressed, we need to know if Y is lexicographically bigger than -Y + // if p.Y ">" -p.Y + if p.Y.LexicographicallyLargest() { + msbMask = mCompressedLargest + } + + // we store X and mask the most significant word with our metadata mask + tmp = p.X + tmp.FromMont() + binary.BigEndian.PutUint64(res[0:8], tmp[0]) + + res[0] |= msbMask + + return +} + +// RawBytes returns binary representation of p (stores X and Y coordinate) +// see Bytes() for a compressed representation +func (p *G1Affine) RawBytes() (res [SizeOfG1AffineUncompressed]byte) { + + // check if p is infinity point + if p.X.IsZero() && p.Y.IsZero() { + + res[0] = mUncompressedInfinity + + return + } + + // tmp is used to convert from montgomery representation to regular + var tmp fp.Element + + // not compressed + // we store the Y coordinate + tmp = p.Y + tmp.FromMont() + binary.BigEndian.PutUint64(res[8:16], tmp[0]) + + // we store X and mask the most significant word with our metadata mask + tmp = p.X + tmp.FromMont() + binary.BigEndian.PutUint64(res[0:8], tmp[0]) + + res[0] |= mUncompressed + + return +} + +// SetBytes sets p from binary representation in buf and returns number of consumed bytes +// bytes in buf must match either RawBytes() or Bytes() output +// if buf is too short io.ErrShortBuffer is returned +// if buf contains compressed representation (output from Bytes()) and we're unable to compute +// the Y coordinate (i.e the square root doesn't exist) this function retunrs an error +// this check if the resulting point is on the curve and in the correct subgroup +func (p *G1Affine) SetBytes(buf []byte) (int, error) { + return p.setBytes(buf, true) +} + +func (p *G1Affine) setBytes(buf []byte, subGroupCheck bool) (int, error) { + if len(buf) < SizeOfG1AffineCompressed { + return 0, io.ErrShortBuffer + } + + // most significant byte + mData := buf[0] & mMask + + // check buffer size + if (mData == mUncompressed) || (mData == mUncompressedInfinity) { + if len(buf) < SizeOfG1AffineUncompressed { + return 0, io.ErrShortBuffer + } + } + + // if infinity is encoded in the metadata, we don't need to read the buffer + if mData == mCompressedInfinity { + p.X.SetZero() + p.Y.SetZero() + return SizeOfG1AffineCompressed, nil + } + if mData == mUncompressedInfinity { + p.X.SetZero() + p.Y.SetZero() + return SizeOfG1AffineUncompressed, nil + } + + // uncompressed point + if mData == mUncompressed { + // read X and Y coordinates + p.X.SetBytes(buf[:fp.Bytes]) + p.Y.SetBytes(buf[fp.Bytes : fp.Bytes*2]) + + // subgroup check + if subGroupCheck && !p.IsInSubGroup() { + return 0, errors.New("invalid point: subgroup check failed") + } + + return SizeOfG1AffineUncompressed, nil + } + + // we have a compressed coordinate + // we need to + // 1. copy the buffer (to keep this method thread safe) + // 2. we need to solve the curve equation to compute Y + + var bufX [fp.Bytes]byte + copy(bufX[:fp.Bytes], buf[:fp.Bytes]) + bufX[0] &= ^mMask + + // read X coordinate + p.X.SetBytes(bufX[:fp.Bytes]) + + var YSquared, Y fp.Element + + YSquared.Square(&p.X).Mul(&YSquared, &p.X) + YSquared.Add(&YSquared, &bCurveCoeff) + if Y.Sqrt(&YSquared) == nil { + return 0, errors.New("invalid compressed coordinate: square root doesn't exist") + } + + if Y.LexicographicallyLargest() { + // Y ">" -Y + if mData == mCompressedSmallest { + Y.Neg(&Y) + } + } else { + // Y "<=" -Y + if mData == mCompressedLargest { + Y.Neg(&Y) + } + } + + p.Y = Y + + // subgroup check + if subGroupCheck && !p.IsInSubGroup() { + return 0, errors.New("invalid point: subgroup check failed") + } + + return SizeOfG1AffineCompressed, nil +} + +// unsafeComputeY called by Decoder when processing slices of compressed point in parallel (step 2) +// it computes the Y coordinate from the already set X coordinate and is compute intensive +func (p *G1Affine) unsafeComputeY(subGroupCheck bool) error { + // stored in unsafeSetCompressedBytes + + mData := byte(p.Y[0]) + + // we have a compressed coordinate, we need to solve the curve equation to compute Y + var YSquared, Y fp.Element + + YSquared.Square(&p.X).Mul(&YSquared, &p.X) + YSquared.Add(&YSquared, &bCurveCoeff) + if Y.Sqrt(&YSquared) == nil { + return errors.New("invalid compressed coordinate: square root doesn't exist") + } + + if Y.LexicographicallyLargest() { + // Y ">" -Y + if mData == mCompressedSmallest { + Y.Neg(&Y) + } + } else { + // Y "<=" -Y + if mData == mCompressedLargest { + Y.Neg(&Y) + } + } + + p.Y = Y + + // subgroup check + if subGroupCheck && !p.IsInSubGroup() { + return errors.New("invalid point: subgroup check failed") + } + + return nil +} + +// unsafeSetCompressedBytes is called by Decoder when processing slices of compressed point in parallel (step 1) +// assumes buf[:8] mask is set to compressed +// returns true if point is infinity and need no further processing +// it sets X coordinate and uses Y for scratch space to store decompression metadata +func (p *G1Affine) unsafeSetCompressedBytes(buf []byte) (isInfinity bool) { + + // read the most significant byte + mData := buf[0] & mMask + + if mData == mCompressedInfinity { + p.X.SetZero() + p.Y.SetZero() + isInfinity = true + return + } + + // we need to copy the input buffer (to keep this method thread safe) + var bufX [fp.Bytes]byte + copy(bufX[:fp.Bytes], buf[:fp.Bytes]) + bufX[0] &= ^mMask + + // read X coordinate + p.X.SetBytes(bufX[:fp.Bytes]) + // store mData in p.Y[0] + p.Y[0] = uint64(mData) + + // recomputing Y will be done asynchronously + return +} + +// SizeOfG2AffineCompressed represents the size in bytes that a G2Affine need in binary form, compressed +const SizeOfG2AffineCompressed = 8 * 2 + +// SizeOfG2AffineUncompressed represents the size in bytes that a G2Affine need in binary form, uncompressed +const SizeOfG2AffineUncompressed = SizeOfG2AffineCompressed * 2 + +// Marshal converts p to a byte slice (without point compression) +func (p *G2Affine) Marshal() []byte { + b := p.RawBytes() + return b[:] +} + +// Unmarshal is an allias to SetBytes() +func (p *G2Affine) Unmarshal(buf []byte) error { + _, err := p.SetBytes(buf) + return err +} + +// Bytes returns binary representation of p +// will store X coordinate in regular form and a parity bit +// we follow the BLS12-381 style encoding as specified in ZCash and now IETF +// The most significant bit, when set, indicates that the point is in compressed form. Otherwise, the point is in uncompressed form. +// The second-most significant bit indicates that the point is at infinity. If this bit is set, the remaining bits of the group element's encoding should be set to zero. +// The third-most significant bit is set if (and only if) this point is in compressed form and it is not the point at infinity and its y-coordinate is the lexicographically largest of the two associated with the encoded x-coordinate. +func (p *G2Affine) Bytes() (res [SizeOfG2AffineCompressed]byte) { + + // check if p is infinity point + if p.X.IsZero() && p.Y.IsZero() { + res[0] = mCompressedInfinity + return + } + + // tmp is used to convert from montgomery representation to regular + var tmp fp.Element + + msbMask := mCompressedSmallest + // compressed, we need to know if Y is lexicographically bigger than -Y + // if p.Y ">" -p.Y + if p.Y.LexicographicallyLargest() { + msbMask = mCompressedLargest + } + + // we store X and mask the most significant word with our metadata mask + // p.X.A1 | p.X.A0 + tmp = p.X.A0 + tmp.FromMont() + binary.BigEndian.PutUint64(res[8:16], tmp[0]) + + tmp = p.X.A1 + tmp.FromMont() + binary.BigEndian.PutUint64(res[0:8], tmp[0]) + + res[0] |= msbMask + + return +} + +// RawBytes returns binary representation of p (stores X and Y coordinate) +// see Bytes() for a compressed representation +func (p *G2Affine) RawBytes() (res [SizeOfG2AffineUncompressed]byte) { + + // check if p is infinity point + if p.X.IsZero() && p.Y.IsZero() { + + res[0] = mUncompressedInfinity + + return + } + + // tmp is used to convert from montgomery representation to regular + var tmp fp.Element + + // not compressed + // we store the Y coordinate + // p.Y.A1 | p.Y.A0 + tmp = p.Y.A0 + tmp.FromMont() + binary.BigEndian.PutUint64(res[24:32], tmp[0]) + + tmp = p.Y.A1 + tmp.FromMont() + binary.BigEndian.PutUint64(res[16:24], tmp[0]) + + // we store X and mask the most significant word with our metadata mask + // p.X.A1 | p.X.A0 + tmp = p.X.A1 + tmp.FromMont() + binary.BigEndian.PutUint64(res[0:8], tmp[0]) + + tmp = p.X.A0 + tmp.FromMont() + binary.BigEndian.PutUint64(res[8:16], tmp[0]) + + res[0] |= mUncompressed + + return +} + +// SetBytes sets p from binary representation in buf and returns number of consumed bytes +// bytes in buf must match either RawBytes() or Bytes() output +// if buf is too short io.ErrShortBuffer is returned +// if buf contains compressed representation (output from Bytes()) and we're unable to compute +// the Y coordinate (i.e the square root doesn't exist) this function retunrs an error +// this check if the resulting point is on the curve and in the correct subgroup +func (p *G2Affine) SetBytes(buf []byte) (int, error) { + return p.setBytes(buf, true) +} + +func (p *G2Affine) setBytes(buf []byte, subGroupCheck bool) (int, error) { + if len(buf) < SizeOfG2AffineCompressed { + return 0, io.ErrShortBuffer + } + + // most significant byte + mData := buf[0] & mMask + + // check buffer size + if (mData == mUncompressed) || (mData == mUncompressedInfinity) { + if len(buf) < SizeOfG2AffineUncompressed { + return 0, io.ErrShortBuffer + } + } + + // if infinity is encoded in the metadata, we don't need to read the buffer + if mData == mCompressedInfinity { + p.X.SetZero() + p.Y.SetZero() + return SizeOfG2AffineCompressed, nil + } + if mData == mUncompressedInfinity { + p.X.SetZero() + p.Y.SetZero() + return SizeOfG2AffineUncompressed, nil + } + + // uncompressed point + if mData == mUncompressed { + // read X and Y coordinates + // p.X.A1 | p.X.A0 + p.X.A1.SetBytes(buf[:fp.Bytes]) + p.X.A0.SetBytes(buf[fp.Bytes : fp.Bytes*2]) + // p.Y.A1 | p.Y.A0 + p.Y.A1.SetBytes(buf[fp.Bytes*2 : fp.Bytes*3]) + p.Y.A0.SetBytes(buf[fp.Bytes*3 : fp.Bytes*4]) + + // subgroup check + if subGroupCheck && !p.IsInSubGroup() { + return 0, errors.New("invalid point: subgroup check failed") + } + + return SizeOfG2AffineUncompressed, nil + } + + // we have a compressed coordinate + // we need to + // 1. copy the buffer (to keep this method thread safe) + // 2. we need to solve the curve equation to compute Y + + var bufX [fp.Bytes]byte + copy(bufX[:fp.Bytes], buf[:fp.Bytes]) + bufX[0] &= ^mMask + + // read X coordinate + // p.X.A1 | p.X.A0 + p.X.A1.SetBytes(bufX[:fp.Bytes]) + p.X.A0.SetBytes(buf[fp.Bytes : fp.Bytes*2]) + + var YSquared, Y fptower.E2 + + YSquared.Square(&p.X).Mul(&YSquared, &p.X) + YSquared.Add(&YSquared, &bTwistCurveCoeff) + if YSquared.Legendre() == -1 { + return 0, errors.New("invalid compressed coordinate: square root doesn't exist") + } + Y.Sqrt(&YSquared) + + if Y.LexicographicallyLargest() { + // Y ">" -Y + if mData == mCompressedSmallest { + Y.Neg(&Y) + } + } else { + // Y "<=" -Y + if mData == mCompressedLargest { + Y.Neg(&Y) + } + } + + p.Y = Y + + // subgroup check + if subGroupCheck && !p.IsInSubGroup() { + return 0, errors.New("invalid point: subgroup check failed") + } + + return SizeOfG2AffineCompressed, nil +} + +// unsafeComputeY called by Decoder when processing slices of compressed point in parallel (step 2) +// it computes the Y coordinate from the already set X coordinate and is compute intensive +func (p *G2Affine) unsafeComputeY(subGroupCheck bool) error { + // stored in unsafeSetCompressedBytes + + mData := byte(p.Y.A0[0]) + + // we have a compressed coordinate, we need to solve the curve equation to compute Y + var YSquared, Y fptower.E2 + + YSquared.Square(&p.X).Mul(&YSquared, &p.X) + YSquared.Add(&YSquared, &bTwistCurveCoeff) + if YSquared.Legendre() == -1 { + return errors.New("invalid compressed coordinate: square root doesn't exist") + } + Y.Sqrt(&YSquared) + + if Y.LexicographicallyLargest() { + // Y ">" -Y + if mData == mCompressedSmallest { + Y.Neg(&Y) + } + } else { + // Y "<=" -Y + if mData == mCompressedLargest { + Y.Neg(&Y) + } + } + + p.Y = Y + + // subgroup check + if subGroupCheck && !p.IsInSubGroup() { + return errors.New("invalid point: subgroup check failed") + } + + return nil +} + +// unsafeSetCompressedBytes is called by Decoder when processing slices of compressed point in parallel (step 1) +// assumes buf[:8] mask is set to compressed +// returns true if point is infinity and need no further processing +// it sets X coordinate and uses Y for scratch space to store decompression metadata +func (p *G2Affine) unsafeSetCompressedBytes(buf []byte) (isInfinity bool) { + + // read the most significant byte + mData := buf[0] & mMask + + if mData == mCompressedInfinity { + p.X.SetZero() + p.Y.SetZero() + isInfinity = true + return + } + + // we need to copy the input buffer (to keep this method thread safe) + var bufX [fp.Bytes]byte + copy(bufX[:fp.Bytes], buf[:fp.Bytes]) + bufX[0] &= ^mMask + + // read X coordinate + // p.X.A1 | p.X.A0 + p.X.A1.SetBytes(bufX[:fp.Bytes]) + p.X.A0.SetBytes(buf[fp.Bytes : fp.Bytes*2]) + + // store mData in p.Y.A0[0] + p.Y.A0[0] = uint64(mData) + + // recomputing Y will be done asynchronously + return +} diff --git a/ecc/bls12-39/marshal_test.go b/ecc/bls12-39/marshal_test.go new file mode 100644 index 0000000000..2a5f77b338 --- /dev/null +++ b/ecc/bls12-39/marshal_test.go @@ -0,0 +1,473 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package bls1239 + +import ( + "bytes" + "io" + "math/big" + "math/rand" + "testing" + + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/prop" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fp" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/consensys/gnark-crypto/ecc/bls12-39/internal/fptower" +) + +const ( + nbFuzzShort = 10 + nbFuzz = 100 +) + +func TestEncoder(t *testing.T) { + t.Parallel() + // TODO need proper fuzz testing here + + var inA uint64 + var inB fr.Element + var inC fp.Element + var inD G1Affine + var inE G1Affine + var inF G2Affine + var inG []G1Affine + var inH []G2Affine + var inI []fp.Element + var inJ []fr.Element + + // set values of inputs + inA = rand.Uint64() + inB.SetRandom() + inC.SetRandom() + inD.ScalarMultiplication(&g1GenAff, new(big.Int).SetUint64(rand.Uint64())) + // inE --> infinity + inF.ScalarMultiplication(&g2GenAff, new(big.Int).SetUint64(rand.Uint64())) + inG = make([]G1Affine, 2) + inH = make([]G2Affine, 0) + inG[1] = inD + inI = make([]fp.Element, 3) + inI[2] = inD.X + inJ = make([]fr.Element, 0) + + // encode them, compressed and raw + var buf, bufRaw bytes.Buffer + enc := NewEncoder(&buf) + encRaw := NewEncoder(&bufRaw, RawEncoding()) + toEncode := []interface{}{inA, &inB, &inC, &inD, &inE, &inF, inG, inH, inI, inJ} + for _, v := range toEncode { + if err := enc.Encode(v); err != nil { + t.Fatal(err) + } + if err := encRaw.Encode(v); err != nil { + t.Fatal(err) + } + } + + testDecode := func(t *testing.T, r io.Reader, n int64) { + dec := NewDecoder(r) + var outA uint64 + var outB fr.Element + var outC fp.Element + var outD G1Affine + var outE G1Affine + outE.X.SetOne() + outE.Y.SetUint64(42) + var outF G2Affine + var outG []G1Affine + var outH []G2Affine + var outI []fp.Element + var outJ []fr.Element + + toDecode := []interface{}{&outA, &outB, &outC, &outD, &outE, &outF, &outG, &outH, &outI, &outJ} + for _, v := range toDecode { + if err := dec.Decode(v); err != nil { + t.Fatal(err) + } + } + + // compare values + if inA != outA { + t.Fatal("didn't encode/decode uint64 value properly") + } + + if !inB.Equal(&outB) || !inC.Equal(&outC) { + t.Fatal("decode(encode(Element) failed") + } + if !inD.Equal(&outD) || !inE.Equal(&outE) { + t.Fatal("decode(encode(G1Affine) failed") + } + if !inF.Equal(&outF) { + t.Fatal("decode(encode(G2Affine) failed") + } + if (len(inG) != len(outG)) || (len(inH) != len(outH)) { + t.Fatal("decode(encode(slice(points))) failed") + } + for i := 0; i < len(inG); i++ { + if !inG[i].Equal(&outG[i]) { + t.Fatal("decode(encode(slice(points))) failed") + } + } + if (len(inI) != len(outI)) || (len(inJ) != len(outJ)) { + t.Fatal("decode(encode(slice(elements))) failed") + } + for i := 0; i < len(inI); i++ { + if !inI[i].Equal(&outI[i]) { + t.Fatal("decode(encode(slice(elements))) failed") + } + } + if n != dec.BytesRead() { + t.Fatal("bytes read don't match bytes written") + } + } + + // decode them + testDecode(t, &buf, enc.BytesWritten()) + testDecode(t, &bufRaw, encRaw.BytesWritten()) + +} + +func TestIsCompressed(t *testing.T) { + t.Parallel() + var g1Inf, g1 G1Affine + var g2Inf, g2 G2Affine + + g1 = g1GenAff + g2 = g2GenAff + + { + b := g1Inf.Bytes() + if !isCompressed(b[0]) { + t.Fatal("g1Inf.Bytes() should be compressed") + } + } + + { + b := g1Inf.RawBytes() + if isCompressed(b[0]) { + t.Fatal("g1Inf.RawBytes() should be uncompressed") + } + } + + { + b := g1.Bytes() + if !isCompressed(b[0]) { + t.Fatal("g1.Bytes() should be compressed") + } + } + + { + b := g1.RawBytes() + if isCompressed(b[0]) { + t.Fatal("g1.RawBytes() should be uncompressed") + } + } + + { + b := g2Inf.Bytes() + if !isCompressed(b[0]) { + t.Fatal("g2Inf.Bytes() should be compressed") + } + } + + { + b := g2Inf.RawBytes() + if isCompressed(b[0]) { + t.Fatal("g2Inf.RawBytes() should be uncompressed") + } + } + + { + b := g2.Bytes() + if !isCompressed(b[0]) { + t.Fatal("g2.Bytes() should be compressed") + } + } + + { + b := g2.RawBytes() + if isCompressed(b[0]) { + t.Fatal("g2.RawBytes() should be uncompressed") + } + } + +} + +func TestG1AffineSerialization(t *testing.T) { + t.Parallel() + // test round trip serialization of infinity + { + // compressed + { + var p1, p2 G1Affine + p2.X.SetRandom() + p2.Y.SetRandom() + buf := p1.Bytes() + n, err := p2.SetBytes(buf[:]) + if err != nil { + t.Fatal(err) + } + if n != SizeOfG1AffineCompressed { + t.Fatal("invalid number of bytes consumed in buffer") + } + if !(p2.X.IsZero() && p2.Y.IsZero()) { + t.Fatal("deserialization of uncompressed infinity point is not infinity") + } + } + + // uncompressed + { + var p1, p2 G1Affine + p2.X.SetRandom() + p2.Y.SetRandom() + buf := p1.RawBytes() + n, err := p2.SetBytes(buf[:]) + if err != nil { + t.Fatal(err) + } + if n != SizeOfG1AffineUncompressed { + t.Fatal("invalid number of bytes consumed in buffer") + } + if !(p2.X.IsZero() && p2.Y.IsZero()) { + t.Fatal("deserialization of uncompressed infinity point is not infinity") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + properties.Property("[G1] Affine SetBytes(RawBytes) should stay the same", prop.ForAll( + func(a fp.Element) bool { + var start, end G1Affine + var ab big.Int + a.ToBigIntRegular(&ab) + start.ScalarMultiplication(&g1GenAff, &ab) + + buf := start.RawBytes() + n, err := end.SetBytes(buf[:]) + if err != nil { + return false + } + if n != SizeOfG1AffineUncompressed { + return false + } + return start.X.Equal(&end.X) && start.Y.Equal(&end.Y) + }, + GenFp(), + )) + + properties.Property("[G1] Affine SetBytes(Bytes()) should stay the same", prop.ForAll( + func(a fp.Element) bool { + var start, end G1Affine + var ab big.Int + a.ToBigIntRegular(&ab) + start.ScalarMultiplication(&g1GenAff, &ab) + + buf := start.Bytes() + n, err := end.SetBytes(buf[:]) + if err != nil { + return false + } + if n != SizeOfG1AffineCompressed { + return false + } + return start.X.Equal(&end.X) && start.Y.Equal(&end.Y) + }, + GenFp(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestG2AffineSerialization(t *testing.T) { + t.Parallel() + // test round trip serialization of infinity + { + // compressed + { + var p1, p2 G2Affine + p2.X.SetRandom() + p2.Y.SetRandom() + buf := p1.Bytes() + n, err := p2.SetBytes(buf[:]) + if err != nil { + t.Fatal(err) + } + if n != SizeOfG2AffineCompressed { + t.Fatal("invalid number of bytes consumed in buffer") + } + if !(p2.X.IsZero() && p2.Y.IsZero()) { + t.Fatal("deserialization of uncompressed infinity point is not infinity") + } + } + + // uncompressed + { + var p1, p2 G2Affine + p2.X.SetRandom() + p2.Y.SetRandom() + buf := p1.RawBytes() + n, err := p2.SetBytes(buf[:]) + if err != nil { + t.Fatal(err) + } + if n != SizeOfG2AffineUncompressed { + t.Fatal("invalid number of bytes consumed in buffer") + } + if !(p2.X.IsZero() && p2.Y.IsZero()) { + t.Fatal("deserialization of uncompressed infinity point is not infinity") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + properties.Property("[G2] Affine SetBytes(RawBytes) should stay the same", prop.ForAll( + func(a fp.Element) bool { + var start, end G2Affine + var ab big.Int + a.ToBigIntRegular(&ab) + start.ScalarMultiplication(&g2GenAff, &ab) + + buf := start.RawBytes() + n, err := end.SetBytes(buf[:]) + if err != nil { + return false + } + if n != SizeOfG2AffineUncompressed { + return false + } + return start.X.Equal(&end.X) && start.Y.Equal(&end.Y) + }, + GenFp(), + )) + + properties.Property("[G2] Affine SetBytes(Bytes()) should stay the same", prop.ForAll( + func(a fp.Element) bool { + var start, end G2Affine + var ab big.Int + a.ToBigIntRegular(&ab) + start.ScalarMultiplication(&g2GenAff, &ab) + + buf := start.Bytes() + n, err := end.SetBytes(buf[:]) + if err != nil { + return false + } + if n != SizeOfG2AffineCompressed { + return false + } + return start.X.Equal(&end.X) && start.Y.Equal(&end.Y) + }, + GenFp(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +// define Gopters generators + +// GenFr generates an Fr element +func GenFr() gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + var elmt fr.Element + var b [fr.Bytes]byte + _, err := rand.Read(b[:]) + if err != nil { + panic(err) + } + elmt.SetBytes(b[:]) + genResult := gopter.NewGenResult(elmt, gopter.NoShrinker) + return genResult + } +} + +// GenFp generates an Fp element +func GenFp() gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + var elmt fp.Element + var b [fp.Bytes]byte + _, err := rand.Read(b[:]) + if err != nil { + panic(err) + } + elmt.SetBytes(b[:]) + genResult := gopter.NewGenResult(elmt, gopter.NoShrinker) + return genResult + } +} + +// GenE2 generates an fptower.E2 elmt +func GenE2() gopter.Gen { + return gopter.CombineGens( + GenFp(), + GenFp(), + ).Map(func(values []interface{}) fptower.E2 { + return fptower.E2{A0: values[0].(fp.Element), A1: values[1].(fp.Element)} + }) +} + +// GenE6 generates an fptower.E6 elmt +func GenE6() gopter.Gen { + return gopter.CombineGens( + GenE2(), + GenE2(), + GenE2(), + ).Map(func(values []interface{}) fptower.E6 { + return fptower.E6{B0: values[0].(fptower.E2), B1: values[1].(fptower.E2), B2: values[2].(fptower.E2)} + }) +} + +// GenE12 generates an fptower.E6 elmt +func GenE12() gopter.Gen { + return gopter.CombineGens( + GenE6(), + GenE6(), + ).Map(func(values []interface{}) fptower.E12 { + return fptower.E12{C0: values[0].(fptower.E6), C1: values[1].(fptower.E6)} + }) +} + +// GenBigInt generates a big.Int +func GenBigInt() gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + var s big.Int + var b [fp.Bytes]byte + _, err := rand.Read(b[:]) + if err != nil { + panic(err) + } + s.SetBytes(b[:]) + genResult := gopter.NewGenResult(s, gopter.NoShrinker) + return genResult + } +} diff --git a/ecc/bls12-39/multiexp.go b/ecc/bls12-39/multiexp.go new file mode 100644 index 0000000000..4f42867497 --- /dev/null +++ b/ecc/bls12-39/multiexp.go @@ -0,0 +1,2303 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package bls1239 + +import ( + "errors" + "github.com/consensys/gnark-crypto/ecc" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/consensys/gnark-crypto/internal/parallel" + "math" + "runtime" +) + +// selector stores the index, mask and shifts needed to select bits from a scalar +// it is used during the multiExp algorithm or the batch scalar multiplication +type selector struct { + index uint64 // index in the multi-word scalar to select bits from + mask uint64 // mask (c-bit wide) + shift uint64 // shift needed to get our bits on low positions + + multiWordSelect bool // set to true if we need to select bits from 2 words (case where c doesn't divide 64) + maskHigh uint64 // same than mask, for index+1 + shiftHigh uint64 // same than shift, for index+1 +} + +// partitionScalars compute, for each scalars over c-bit wide windows, nbChunk digits +// if the digit is larger than 2^{c-1}, then, we borrow 2^c from the next window and substract +// 2^{c} to the current digit, making it negative. +// negative digits can be processed in a later step as adding -G into the bucket instead of G +// (computing -G is cheap, and this saves us half of the buckets in the MultiExp or BatchScalarMul) +// scalarsMont indicates wheter the provided scalars are in montgomery form +// returns smallValues, which represent the number of scalars which meets the following condition +// 0 < scalar < 2^c (in other words, scalars where only the c-least significant bits are non zero) +func partitionScalars(scalars []fr.Element, c uint64, scalarsMont bool, nbTasks int) ([]fr.Element, int) { + toReturn := make([]fr.Element, len(scalars)) + + // number of c-bit radixes in a scalar + nbChunks := fr.Limbs * 64 / c + if (fr.Limbs*64)%c != 0 { + nbChunks++ + } + + mask := uint64((1 << c) - 1) // low c bits are 1 + msbWindow := uint64(1 << (c - 1)) // msb of the c-bit window + max := int(1 << (c - 1)) // max value we want for our digits + cDivides64 := (64 % c) == 0 // if c doesn't divide 64, we may need to select over multiple words + + // compute offset and word selector / shift to select the right bits of our windows + selectors := make([]selector, nbChunks) + for chunk := uint64(0); chunk < nbChunks; chunk++ { + jc := uint64(chunk * c) + d := selector{} + d.index = jc / 64 + d.shift = jc - (d.index * 64) + d.mask = mask << d.shift + d.multiWordSelect = !cDivides64 && d.shift > (64-c) && d.index < (fr.Limbs-1) + if d.multiWordSelect { + nbBitsHigh := d.shift - uint64(64-c) + d.maskHigh = (1 << nbBitsHigh) - 1 + d.shiftHigh = (c - nbBitsHigh) + } + selectors[chunk] = d + } + + // for each chunk, we could track the number of non-zeros points we will need to process + // this way, if a chunk has more work to do than others, we can spawn off more go routines + // (at the cost of more buckets allocated) + // a simplified approach is to track the small values where only the first word is set + // if this number represent a significant number of points, then we will split first chunk + // processing in the msm in 2, to ensure all go routines finish at ~same time + // /!\ nbTasks is enough as parallel.Execute is not going to spawn more than nbTasks go routine + // if it does, though, this will deadlocK. + chSmallValues := make(chan int, nbTasks) + + parallel.Execute(len(scalars), func(start, end int) { + smallValues := 0 + for i := start; i < end; i++ { + var carry int + + scalar := scalars[i] + if scalarsMont { + scalar.FromMont() + } + if scalar.FitsOnOneWord() { + // everything is 0, no need to process this scalar + if scalar[0] == 0 { + continue + } + // low c-bits are 1 in mask + if scalar[0]&mask == scalar[0] { + smallValues++ + } + } + + // for each chunk in the scalar, compute the current digit, and an eventual carry + for chunk := uint64(0); chunk < nbChunks; chunk++ { + s := selectors[chunk] + + // init with carry if any + digit := carry + carry = 0 + + // digit = value of the c-bit window + digit += int((scalar[s.index] & s.mask) >> s.shift) + + if s.multiWordSelect { + // we are selecting bits over 2 words + digit += int(scalar[s.index+1]&s.maskHigh) << s.shiftHigh + } + + // if digit is zero, no impact on result + if digit == 0 { + continue + } + + // if the digit is larger than 2^{c-1}, then, we borrow 2^c from the next window and substract + // 2^{c} to the current digit, making it negative. + if digit >= max { + digit -= (1 << c) + carry = 1 + } + + var bits uint64 + if digit >= 0 { + bits = uint64(digit) + } else { + bits = uint64(-digit-1) | msbWindow + } + + toReturn[i][s.index] |= (bits << s.shift) + if s.multiWordSelect { + toReturn[i][s.index+1] |= (bits >> s.shiftHigh) + } + + } + } + + chSmallValues <- smallValues + + }, nbTasks) + + // aggregate small values + close(chSmallValues) + smallValues := 0 + for o := range chSmallValues { + smallValues += o + } + return toReturn, smallValues +} + +// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf +func (p *G1Affine) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Affine, error) { + var _p G1Jac + if _, err := _p.MultiExp(points, scalars, config); err != nil { + return nil, err + } + p.FromJacobian(&_p) + return p, nil +} + +// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf +func (p *G1Jac) MultiExp(points []G1Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G1Jac, error) { + // note: + // each of the msmCX method is the same, except for the c constant it declares + // duplicating (through template generation) these methods allows to declare the buckets on the stack + // the choice of c needs to be improved: + // there is a theoritical value that gives optimal asymptotics + // but in practice, other factors come into play, including: + // * if c doesn't divide 64, the word size, then we're bound to select bits over 2 words of our scalars, instead of 1 + // * number of CPUs + // * cache friendliness (which depends on the host, G1 or G2... ) + // --> for example, on BN254, a G1 point fits into one cache line of 64bytes, but a G2 point don't. + + // for each msmCX + // step 1 + // we compute, for each scalars over c-bit wide windows, nbChunk digits + // if the digit is larger than 2^{c-1}, then, we borrow 2^c from the next window and substract + // 2^{c} to the current digit, making it negative. + // negative digits will be processed in the next step as adding -G into the bucket instead of G + // (computing -G is cheap, and this saves us half of the buckets) + // step 2 + // buckets are declared on the stack + // notice that we have 2^{c-1} buckets instead of 2^{c} (see step1) + // we use jacobian extended formulas here as they are faster than mixed addition + // msmProcessChunk places points into buckets base on their selector and return the weighted bucket sum in given channel + // step 3 + // reduce the buckets weigthed sums into our result (msmReduceChunk) + + // ensure len(points) == len(scalars) + nbPoints := len(points) + if nbPoints != len(scalars) { + return nil, errors.New("len(points) != len(scalars)") + } + + // if nbTasks is not set, use all available CPUs + if config.NbTasks <= 0 { + config.NbTasks = runtime.NumCPU() + } + + // here, we compute the best C for nbPoints + // we split recursively until nbChunks(c) >= nbTasks, + bestC := func(nbPoints int) uint64 { + // implemented msmC methods (the c we use must be in this slice) + implementedCs := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21} + var C uint64 + // approximate cost (in group operations) + // cost = bits/c * (nbPoints + 2^{c}) + // this needs to be verified empirically. + // for example, on a MBP 2016, for G2 MultiExp > 8M points, hand picking c gives better results + min := math.MaxFloat64 + for _, c := range implementedCs { + cc := fr.Limbs * 64 * (nbPoints + (1 << (c))) + cost := float64(cc) / float64(c) + if cost < min { + min = cost + C = c + } + } + // empirical, needs to be tuned. + // if C > 16 && nbPoints < 1 << 23 { + // C = 16 + // } + return C + } + + var C uint64 + nbSplits := 1 + nbChunks := 0 + for nbChunks < config.NbTasks { + C = bestC(nbPoints) + nbChunks = int(fr.Limbs * 64 / C) // number of c-bit radixes in a scalar + if (fr.Limbs*64)%C != 0 { + nbChunks++ + } + nbChunks *= nbSplits + if nbChunks < config.NbTasks { + nbSplits <<= 1 + nbPoints >>= 1 + } + } + + // partition the scalars + // note: we do that before the actual chunk processing, as for each c-bit window (starting from LSW) + // if it's larger than 2^{c-1}, we have a carry we need to propagate up to the higher window + var smallValues int + scalars, smallValues = partitionScalars(scalars, C, config.ScalarsMont, config.NbTasks) + + // if we have more than 10% of small values, we split the processing of the first chunk in 2 + // we may want to do that in msmInnerG1Jac , but that would incur a cost of looping through all scalars one more time + splitFirstChunk := (float64(smallValues) / float64(len(scalars))) >= 0.1 + + // we have nbSplits intermediate results that we must sum together. + _p := make([]G1Jac, nbSplits-1) + chDone := make(chan int, nbSplits-1) + for i := 0; i < nbSplits-1; i++ { + start := i * nbPoints + end := start + nbPoints + go func(start, end, i int) { + msmInnerG1Jac(&_p[i], int(C), points[start:end], scalars[start:end], splitFirstChunk) + chDone <- i + }(start, end, i) + } + + msmInnerG1Jac(p, int(C), points[(nbSplits-1)*nbPoints:], scalars[(nbSplits-1)*nbPoints:], splitFirstChunk) + for i := 0; i < nbSplits-1; i++ { + done := <-chDone + p.AddAssign(&_p[done]) + } + close(chDone) + return p, nil +} + +func msmInnerG1Jac(p *G1Jac, c int, points []G1Affine, scalars []fr.Element, splitFirstChunk bool) { + + switch c { + + case 4: + p.msmC4(points, scalars, splitFirstChunk) + + case 5: + p.msmC5(points, scalars, splitFirstChunk) + + case 6: + p.msmC6(points, scalars, splitFirstChunk) + + case 7: + p.msmC7(points, scalars, splitFirstChunk) + + case 8: + p.msmC8(points, scalars, splitFirstChunk) + + case 9: + p.msmC9(points, scalars, splitFirstChunk) + + case 10: + p.msmC10(points, scalars, splitFirstChunk) + + case 11: + p.msmC11(points, scalars, splitFirstChunk) + + case 12: + p.msmC12(points, scalars, splitFirstChunk) + + case 13: + p.msmC13(points, scalars, splitFirstChunk) + + case 14: + p.msmC14(points, scalars, splitFirstChunk) + + case 15: + p.msmC15(points, scalars, splitFirstChunk) + + case 16: + p.msmC16(points, scalars, splitFirstChunk) + + case 20: + p.msmC20(points, scalars, splitFirstChunk) + + case 21: + p.msmC21(points, scalars, splitFirstChunk) + + case 22: + p.msmC22(points, scalars, splitFirstChunk) + + default: + panic("not implemented") + } +} + +// msmReduceChunkG1Affine reduces the weighted sum of the buckets into the result of the multiExp +func msmReduceChunkG1Affine(p *G1Jac, c int, chChunks []chan g1JacExtended) *G1Jac { + var _p g1JacExtended + totalj := <-chChunks[len(chChunks)-1] + _p.Set(&totalj) + for j := len(chChunks) - 2; j >= 0; j-- { + for l := 0; l < c; l++ { + _p.double(&_p) + } + totalj := <-chChunks[j] + _p.add(&totalj) + } + + return p.unsafeFromJacExtended(&_p) +} + +func msmProcessChunkG1Affine(chunk uint64, + chRes chan<- g1JacExtended, + buckets []g1JacExtended, + c uint64, + points []G1Affine, + scalars []fr.Element) { + + mask := uint64((1 << c) - 1) // low c bits are 1 + msbWindow := uint64(1 << (c - 1)) + + for i := 0; i < len(buckets); i++ { + buckets[i].setInfinity() + } + + jc := uint64(chunk * c) + s := selector{} + s.index = jc / 64 + s.shift = jc - (s.index * 64) + s.mask = mask << s.shift + s.multiWordSelect = (64%c) != 0 && s.shift > (64-c) && s.index < (fr.Limbs-1) + if s.multiWordSelect { + nbBitsHigh := s.shift - uint64(64-c) + s.maskHigh = (1 << nbBitsHigh) - 1 + s.shiftHigh = (c - nbBitsHigh) + } + + // for each scalars, get the digit corresponding to the chunk we're processing. + for i := 0; i < len(scalars); i++ { + bits := (scalars[i][s.index] & s.mask) >> s.shift + if s.multiWordSelect { + bits += (scalars[i][s.index+1] & s.maskHigh) << s.shiftHigh + } + + if bits == 0 { + continue + } + + // if msbWindow bit is set, we need to substract + if bits&msbWindow == 0 { + // add + buckets[bits-1].addMixed(&points[i]) + } else { + // sub + buckets[bits & ^msbWindow].subMixed(&points[i]) + } + } + + // reduce buckets into total + // total = bucket[0] + 2*bucket[1] + 3*bucket[2] ... + n*bucket[n-1] + + var runningSum, total g1JacExtended + runningSum.setInfinity() + total.setInfinity() + for k := len(buckets) - 1; k >= 0; k-- { + if !buckets[k].ZZ.IsZero() { + runningSum.add(&buckets[k]) + } + total.add(&runningSum) + } + + chRes <- total + +} + +func (p *G1Jac) msmC4(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac { + const ( + c = 4 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks]chan g1JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g1JacExtended, 1) + } + + processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) { + var buckets [1 << (c - 1)]g1JacExtended + msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g1JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG1Affine(p, c, chChunks[:]) +} + +func (p *G1Jac) msmC5(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac { + const ( + c = 5 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g1JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g1JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G1Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g1JacExtended + msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) { + var buckets [1 << (c - 1)]g1JacExtended + msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g1JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG1Affine(p, c, chChunks[:]) +} + +func (p *G1Jac) msmC6(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac { + const ( + c = 6 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g1JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g1JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G1Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g1JacExtended + msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) { + var buckets [1 << (c - 1)]g1JacExtended + msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g1JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG1Affine(p, c, chChunks[:]) +} + +func (p *G1Jac) msmC7(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac { + const ( + c = 7 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g1JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g1JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G1Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g1JacExtended + msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) { + var buckets [1 << (c - 1)]g1JacExtended + msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g1JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG1Affine(p, c, chChunks[:]) +} + +func (p *G1Jac) msmC8(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac { + const ( + c = 8 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks]chan g1JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g1JacExtended, 1) + } + + processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) { + var buckets [1 << (c - 1)]g1JacExtended + msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g1JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG1Affine(p, c, chChunks[:]) +} + +func (p *G1Jac) msmC9(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac { + const ( + c = 9 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g1JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g1JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G1Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g1JacExtended + msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) { + var buckets [1 << (c - 1)]g1JacExtended + msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g1JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG1Affine(p, c, chChunks[:]) +} + +func (p *G1Jac) msmC10(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac { + const ( + c = 10 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g1JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g1JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G1Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g1JacExtended + msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) { + var buckets [1 << (c - 1)]g1JacExtended + msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g1JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG1Affine(p, c, chChunks[:]) +} + +func (p *G1Jac) msmC11(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac { + const ( + c = 11 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g1JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g1JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G1Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g1JacExtended + msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) { + var buckets [1 << (c - 1)]g1JacExtended + msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g1JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG1Affine(p, c, chChunks[:]) +} + +func (p *G1Jac) msmC12(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac { + const ( + c = 12 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g1JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g1JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G1Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g1JacExtended + msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) { + var buckets [1 << (c - 1)]g1JacExtended + msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g1JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG1Affine(p, c, chChunks[:]) +} + +func (p *G1Jac) msmC13(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac { + const ( + c = 13 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g1JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g1JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G1Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g1JacExtended + msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) { + var buckets [1 << (c - 1)]g1JacExtended + msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g1JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG1Affine(p, c, chChunks[:]) +} + +func (p *G1Jac) msmC14(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac { + const ( + c = 14 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g1JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g1JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G1Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g1JacExtended + msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) { + var buckets [1 << (c - 1)]g1JacExtended + msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g1JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG1Affine(p, c, chChunks[:]) +} + +func (p *G1Jac) msmC15(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac { + const ( + c = 15 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g1JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g1JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G1Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g1JacExtended + msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) { + var buckets [1 << (c - 1)]g1JacExtended + msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g1JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG1Affine(p, c, chChunks[:]) +} + +func (p *G1Jac) msmC16(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac { + const ( + c = 16 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks]chan g1JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g1JacExtended, 1) + } + + processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) { + var buckets [1 << (c - 1)]g1JacExtended + msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g1JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG1Affine(p, c, chChunks[:]) +} + +func (p *G1Jac) msmC20(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac { + const ( + c = 20 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g1JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g1JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G1Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g1JacExtended + msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) { + var buckets [1 << (c - 1)]g1JacExtended + msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g1JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG1Affine(p, c, chChunks[:]) +} + +func (p *G1Jac) msmC21(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac { + const ( + c = 21 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g1JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g1JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G1Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g1JacExtended + msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) { + var buckets [1 << (c - 1)]g1JacExtended + msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g1JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG1Affine(p, c, chChunks[:]) +} + +func (p *G1Jac) msmC22(points []G1Affine, scalars []fr.Element, splitFirstChunk bool) *G1Jac { + const ( + c = 22 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g1JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g1JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G1Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g1JacExtended + msmProcessChunkG1Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G1Affine, scalars []fr.Element, chChunk chan g1JacExtended) { + var buckets [1 << (c - 1)]g1JacExtended + msmProcessChunkG1Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g1JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG1Affine(p, c, chChunks[:]) +} + +// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf +func (p *G2Affine) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Affine, error) { + var _p G2Jac + if _, err := _p.MultiExp(points, scalars, config); err != nil { + return nil, err + } + p.FromJacobian(&_p) + return p, nil +} + +// MultiExp implements section 4 of https://eprint.iacr.org/2012/549.pdf +func (p *G2Jac) MultiExp(points []G2Affine, scalars []fr.Element, config ecc.MultiExpConfig) (*G2Jac, error) { + // note: + // each of the msmCX method is the same, except for the c constant it declares + // duplicating (through template generation) these methods allows to declare the buckets on the stack + // the choice of c needs to be improved: + // there is a theoritical value that gives optimal asymptotics + // but in practice, other factors come into play, including: + // * if c doesn't divide 64, the word size, then we're bound to select bits over 2 words of our scalars, instead of 1 + // * number of CPUs + // * cache friendliness (which depends on the host, G1 or G2... ) + // --> for example, on BN254, a G1 point fits into one cache line of 64bytes, but a G2 point don't. + + // for each msmCX + // step 1 + // we compute, for each scalars over c-bit wide windows, nbChunk digits + // if the digit is larger than 2^{c-1}, then, we borrow 2^c from the next window and substract + // 2^{c} to the current digit, making it negative. + // negative digits will be processed in the next step as adding -G into the bucket instead of G + // (computing -G is cheap, and this saves us half of the buckets) + // step 2 + // buckets are declared on the stack + // notice that we have 2^{c-1} buckets instead of 2^{c} (see step1) + // we use jacobian extended formulas here as they are faster than mixed addition + // msmProcessChunk places points into buckets base on their selector and return the weighted bucket sum in given channel + // step 3 + // reduce the buckets weigthed sums into our result (msmReduceChunk) + + // ensure len(points) == len(scalars) + nbPoints := len(points) + if nbPoints != len(scalars) { + return nil, errors.New("len(points) != len(scalars)") + } + + // if nbTasks is not set, use all available CPUs + if config.NbTasks <= 0 { + config.NbTasks = runtime.NumCPU() + } + + // here, we compute the best C for nbPoints + // we split recursively until nbChunks(c) >= nbTasks, + bestC := func(nbPoints int) uint64 { + // implemented msmC methods (the c we use must be in this slice) + implementedCs := []uint64{4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 21, 22} + var C uint64 + // approximate cost (in group operations) + // cost = bits/c * (nbPoints + 2^{c}) + // this needs to be verified empirically. + // for example, on a MBP 2016, for G2 MultiExp > 8M points, hand picking c gives better results + min := math.MaxFloat64 + for _, c := range implementedCs { + cc := fr.Limbs * 64 * (nbPoints + (1 << (c))) + cost := float64(cc) / float64(c) + if cost < min { + min = cost + C = c + } + } + // empirical, needs to be tuned. + // if C > 16 && nbPoints < 1 << 23 { + // C = 16 + // } + return C + } + + var C uint64 + nbSplits := 1 + nbChunks := 0 + for nbChunks < config.NbTasks { + C = bestC(nbPoints) + nbChunks = int(fr.Limbs * 64 / C) // number of c-bit radixes in a scalar + if (fr.Limbs*64)%C != 0 { + nbChunks++ + } + nbChunks *= nbSplits + if nbChunks < config.NbTasks { + nbSplits <<= 1 + nbPoints >>= 1 + } + } + + // partition the scalars + // note: we do that before the actual chunk processing, as for each c-bit window (starting from LSW) + // if it's larger than 2^{c-1}, we have a carry we need to propagate up to the higher window + var smallValues int + scalars, smallValues = partitionScalars(scalars, C, config.ScalarsMont, config.NbTasks) + + // if we have more than 10% of small values, we split the processing of the first chunk in 2 + // we may want to do that in msmInnerG2Jac , but that would incur a cost of looping through all scalars one more time + splitFirstChunk := (float64(smallValues) / float64(len(scalars))) >= 0.1 + + // we have nbSplits intermediate results that we must sum together. + _p := make([]G2Jac, nbSplits-1) + chDone := make(chan int, nbSplits-1) + for i := 0; i < nbSplits-1; i++ { + start := i * nbPoints + end := start + nbPoints + go func(start, end, i int) { + msmInnerG2Jac(&_p[i], int(C), points[start:end], scalars[start:end], splitFirstChunk) + chDone <- i + }(start, end, i) + } + + msmInnerG2Jac(p, int(C), points[(nbSplits-1)*nbPoints:], scalars[(nbSplits-1)*nbPoints:], splitFirstChunk) + for i := 0; i < nbSplits-1; i++ { + done := <-chDone + p.AddAssign(&_p[done]) + } + close(chDone) + return p, nil +} + +func msmInnerG2Jac(p *G2Jac, c int, points []G2Affine, scalars []fr.Element, splitFirstChunk bool) { + + switch c { + + case 4: + p.msmC4(points, scalars, splitFirstChunk) + + case 5: + p.msmC5(points, scalars, splitFirstChunk) + + case 6: + p.msmC6(points, scalars, splitFirstChunk) + + case 7: + p.msmC7(points, scalars, splitFirstChunk) + + case 8: + p.msmC8(points, scalars, splitFirstChunk) + + case 9: + p.msmC9(points, scalars, splitFirstChunk) + + case 10: + p.msmC10(points, scalars, splitFirstChunk) + + case 11: + p.msmC11(points, scalars, splitFirstChunk) + + case 12: + p.msmC12(points, scalars, splitFirstChunk) + + case 13: + p.msmC13(points, scalars, splitFirstChunk) + + case 14: + p.msmC14(points, scalars, splitFirstChunk) + + case 15: + p.msmC15(points, scalars, splitFirstChunk) + + case 16: + p.msmC16(points, scalars, splitFirstChunk) + + case 20: + p.msmC20(points, scalars, splitFirstChunk) + + case 21: + p.msmC21(points, scalars, splitFirstChunk) + + case 22: + p.msmC22(points, scalars, splitFirstChunk) + + default: + panic("not implemented") + } +} + +// msmReduceChunkG2Affine reduces the weighted sum of the buckets into the result of the multiExp +func msmReduceChunkG2Affine(p *G2Jac, c int, chChunks []chan g2JacExtended) *G2Jac { + var _p g2JacExtended + totalj := <-chChunks[len(chChunks)-1] + _p.Set(&totalj) + for j := len(chChunks) - 2; j >= 0; j-- { + for l := 0; l < c; l++ { + _p.double(&_p) + } + totalj := <-chChunks[j] + _p.add(&totalj) + } + + return p.unsafeFromJacExtended(&_p) +} + +func msmProcessChunkG2Affine(chunk uint64, + chRes chan<- g2JacExtended, + buckets []g2JacExtended, + c uint64, + points []G2Affine, + scalars []fr.Element) { + + mask := uint64((1 << c) - 1) // low c bits are 1 + msbWindow := uint64(1 << (c - 1)) + + for i := 0; i < len(buckets); i++ { + buckets[i].setInfinity() + } + + jc := uint64(chunk * c) + s := selector{} + s.index = jc / 64 + s.shift = jc - (s.index * 64) + s.mask = mask << s.shift + s.multiWordSelect = (64%c) != 0 && s.shift > (64-c) && s.index < (fr.Limbs-1) + if s.multiWordSelect { + nbBitsHigh := s.shift - uint64(64-c) + s.maskHigh = (1 << nbBitsHigh) - 1 + s.shiftHigh = (c - nbBitsHigh) + } + + // for each scalars, get the digit corresponding to the chunk we're processing. + for i := 0; i < len(scalars); i++ { + bits := (scalars[i][s.index] & s.mask) >> s.shift + if s.multiWordSelect { + bits += (scalars[i][s.index+1] & s.maskHigh) << s.shiftHigh + } + + if bits == 0 { + continue + } + + // if msbWindow bit is set, we need to substract + if bits&msbWindow == 0 { + // add + buckets[bits-1].addMixed(&points[i]) + } else { + // sub + buckets[bits & ^msbWindow].subMixed(&points[i]) + } + } + + // reduce buckets into total + // total = bucket[0] + 2*bucket[1] + 3*bucket[2] ... + n*bucket[n-1] + + var runningSum, total g2JacExtended + runningSum.setInfinity() + total.setInfinity() + for k := len(buckets) - 1; k >= 0; k-- { + if !buckets[k].ZZ.IsZero() { + runningSum.add(&buckets[k]) + } + total.add(&runningSum) + } + + chRes <- total + +} + +func (p *G2Jac) msmC4(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac { + const ( + c = 4 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks]chan g2JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g2JacExtended, 1) + } + + processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) { + var buckets [1 << (c - 1)]g2JacExtended + msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g2JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG2Affine(p, c, chChunks[:]) +} + +func (p *G2Jac) msmC5(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac { + const ( + c = 5 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g2JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g2JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G2Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g2JacExtended + msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) { + var buckets [1 << (c - 1)]g2JacExtended + msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g2JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG2Affine(p, c, chChunks[:]) +} + +func (p *G2Jac) msmC6(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac { + const ( + c = 6 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g2JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g2JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G2Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g2JacExtended + msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) { + var buckets [1 << (c - 1)]g2JacExtended + msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g2JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG2Affine(p, c, chChunks[:]) +} + +func (p *G2Jac) msmC7(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac { + const ( + c = 7 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g2JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g2JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G2Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g2JacExtended + msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) { + var buckets [1 << (c - 1)]g2JacExtended + msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g2JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG2Affine(p, c, chChunks[:]) +} + +func (p *G2Jac) msmC8(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac { + const ( + c = 8 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks]chan g2JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g2JacExtended, 1) + } + + processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) { + var buckets [1 << (c - 1)]g2JacExtended + msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g2JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG2Affine(p, c, chChunks[:]) +} + +func (p *G2Jac) msmC9(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac { + const ( + c = 9 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g2JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g2JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G2Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g2JacExtended + msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) { + var buckets [1 << (c - 1)]g2JacExtended + msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g2JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG2Affine(p, c, chChunks[:]) +} + +func (p *G2Jac) msmC10(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac { + const ( + c = 10 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g2JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g2JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G2Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g2JacExtended + msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) { + var buckets [1 << (c - 1)]g2JacExtended + msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g2JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG2Affine(p, c, chChunks[:]) +} + +func (p *G2Jac) msmC11(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac { + const ( + c = 11 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g2JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g2JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G2Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g2JacExtended + msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) { + var buckets [1 << (c - 1)]g2JacExtended + msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g2JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG2Affine(p, c, chChunks[:]) +} + +func (p *G2Jac) msmC12(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac { + const ( + c = 12 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g2JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g2JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G2Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g2JacExtended + msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) { + var buckets [1 << (c - 1)]g2JacExtended + msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g2JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG2Affine(p, c, chChunks[:]) +} + +func (p *G2Jac) msmC13(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac { + const ( + c = 13 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g2JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g2JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G2Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g2JacExtended + msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) { + var buckets [1 << (c - 1)]g2JacExtended + msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g2JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG2Affine(p, c, chChunks[:]) +} + +func (p *G2Jac) msmC14(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac { + const ( + c = 14 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g2JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g2JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G2Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g2JacExtended + msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) { + var buckets [1 << (c - 1)]g2JacExtended + msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g2JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG2Affine(p, c, chChunks[:]) +} + +func (p *G2Jac) msmC15(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac { + const ( + c = 15 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g2JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g2JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G2Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g2JacExtended + msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) { + var buckets [1 << (c - 1)]g2JacExtended + msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g2JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG2Affine(p, c, chChunks[:]) +} + +func (p *G2Jac) msmC16(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac { + const ( + c = 16 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks]chan g2JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g2JacExtended, 1) + } + + processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) { + var buckets [1 << (c - 1)]g2JacExtended + msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g2JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG2Affine(p, c, chChunks[:]) +} + +func (p *G2Jac) msmC20(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac { + const ( + c = 20 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g2JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g2JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G2Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g2JacExtended + msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) { + var buckets [1 << (c - 1)]g2JacExtended + msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g2JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG2Affine(p, c, chChunks[:]) +} + +func (p *G2Jac) msmC21(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac { + const ( + c = 21 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g2JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g2JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G2Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g2JacExtended + msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) { + var buckets [1 << (c - 1)]g2JacExtended + msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g2JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG2Affine(p, c, chChunks[:]) +} + +func (p *G2Jac) msmC22(points []G2Affine, scalars []fr.Element, splitFirstChunk bool) *G2Jac { + const ( + c = 22 // scalars partitioned into c-bit radixes + nbChunks = (fr.Limbs * 64 / c) // number of c-bit radixes in a scalar + ) + + // for each chunk, spawn one go routine that'll loop through all the scalars in the + // corresponding bit-window + // note that buckets is an array allocated on the stack (for most sizes of c) and this is + // critical for performance + + // each go routine sends its result in chChunks[i] channel + var chChunks [nbChunks + 1]chan g2JacExtended + for i := 0; i < len(chChunks); i++ { + chChunks[i] = make(chan g2JacExtended, 1) + } + + // c doesn't divide 64, last window is smaller we can allocate less buckets + const lastC = (fr.Limbs * 64) - (c * (fr.Limbs * 64 / c)) + go func(j uint64, points []G2Affine, scalars []fr.Element) { + var buckets [1 << (lastC - 1)]g2JacExtended + msmProcessChunkG2Affine(j, chChunks[j], buckets[:], c, points, scalars) + }(uint64(nbChunks), points, scalars) + + processChunk := func(j int, points []G2Affine, scalars []fr.Element, chChunk chan g2JacExtended) { + var buckets [1 << (c - 1)]g2JacExtended + msmProcessChunkG2Affine(uint64(j), chChunk, buckets[:], c, points, scalars) + } + + for j := int(nbChunks - 1); j > 0; j-- { + go processChunk(j, points, scalars, chChunks[j]) + } + + if !splitFirstChunk { + go processChunk(0, points, scalars, chChunks[0]) + } else { + chSplit := make(chan g2JacExtended, 2) + split := len(points) / 2 + go processChunk(0, points[:split], scalars[:split], chSplit) + go processChunk(0, points[split:], scalars[split:], chSplit) + go func() { + s1 := <-chSplit + s2 := <-chSplit + close(chSplit) + s1.add(&s2) + chChunks[0] <- s1 + }() + } + + return msmReduceChunkG2Affine(p, c, chChunks[:]) +} diff --git a/ecc/bls12-39/multiexp_test.go b/ecc/bls12-39/multiexp_test.go new file mode 100644 index 0000000000..a9ae37ee43 --- /dev/null +++ b/ecc/bls12-39/multiexp_test.go @@ -0,0 +1,481 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package bls1239 + +import ( + "fmt" + "math/big" + "math/bits" + "runtime" + "sync" + "testing" + + "github.com/consensys/gnark-crypto/ecc" + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/prop" +) + +func TestMultiExpG1(t *testing.T) { + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = 2 + } else { + parameters.MinSuccessfulTests = nbFuzzShort + } + + properties := gopter.NewProperties(parameters) + + genScalar := GenFr() + + // size of the multiExps + const nbSamples = 73 + + // multi exp points + var samplePoints [nbSamples]G1Affine + var g G1Jac + g.Set(&g1Gen) + for i := 1; i <= nbSamples; i++ { + samplePoints[i-1].FromJacobian(&g) + g.AddAssign(&g1Gen) + } + + // final scalar to use in double and add method (without mixer factor) + // n(n+1)(2n+1)/6 (sum of the squares from 1 to n) + var scalar big.Int + scalar.SetInt64(nbSamples) + scalar.Mul(&scalar, new(big.Int).SetInt64(nbSamples+1)) + scalar.Mul(&scalar, new(big.Int).SetInt64(2*nbSamples+1)) + scalar.Div(&scalar, new(big.Int).SetInt64(6)) + + // ensure a multiexp that's splitted has the same result as a non-splitted one.. + properties.Property("[G1] Multi exponentation (c=16) should be consistant with splitted multiexp", prop.ForAll( + func(mixer fr.Element) bool { + var samplePointsLarge [nbSamples * 13]G1Affine + for i := 0; i < 13; i++ { + copy(samplePointsLarge[i*nbSamples:], samplePoints[:]) + } + + var r16, splitted1, splitted2 G1Jac + + // mixer ensures that all the words of a fpElement are set + var sampleScalars [nbSamples * 13]fr.Element + + for i := 1; i <= nbSamples; i++ { + sampleScalars[i-1].SetUint64(uint64(i)). + Mul(&sampleScalars[i-1], &mixer). + FromMont() + } + + scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU()) + r16.msmC16(samplePoints[:], scalars16, true) + + splitted1.MultiExp(samplePointsLarge[:], sampleScalars[:], ecc.MultiExpConfig{NbTasks: 128}) + splitted2.MultiExp(samplePointsLarge[:], sampleScalars[:], ecc.MultiExpConfig{NbTasks: 51}) + return r16.Equal(&splitted1) && r16.Equal(&splitted2) + }, + genScalar, + )) + + properties.Property("[G1] Multi exponentation (c=5, c=16) should be consistant with sum of square", prop.ForAll( + func(mixer fr.Element) bool { + + var expected G1Jac + + // compute expected result with double and add + var finalScalar, mixerBigInt big.Int + finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) + expected.ScalarMultiplication(&g1Gen, &finalScalar) + + // mixer ensures that all the words of a fpElement are set + var sampleScalars [nbSamples]fr.Element + + for i := 1; i <= nbSamples; i++ { + sampleScalars[i-1].SetUint64(uint64(i)). + Mul(&sampleScalars[i-1], &mixer). + FromMont() + } + + scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU()) + scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU()) + + var r5, r16 G1Jac + r5.msmC5(samplePoints[:], scalars5, false) + r16.msmC16(samplePoints[:], scalars16, true) + return (r5.Equal(&expected) && r16.Equal(&expected)) + }, + genScalar, + )) + + // note : this test is here as we expect to have a different multiExp than the above bucket method + // for small number of points + properties.Property("[G1] Multi exponentation (<50points) should be consistant with sum of square", prop.ForAll( + func(mixer fr.Element) bool { + + var g G1Jac + g.Set(&g1Gen) + + // mixer ensures that all the words of a fpElement are set + samplePoints := make([]G1Affine, 30) + sampleScalars := make([]fr.Element, 30) + + for i := 1; i <= 30; i++ { + sampleScalars[i-1].SetUint64(uint64(i)). + Mul(&sampleScalars[i-1], &mixer). + FromMont() + samplePoints[i-1].FromJacobian(&g) + g.AddAssign(&g1Gen) + } + + var op1MultiExp G1Affine + op1MultiExp.MultiExp(samplePoints, sampleScalars, ecc.MultiExpConfig{}) + + var finalBigScalar fr.Element + var finalBigScalarBi big.Int + var op1ScalarMul G1Affine + finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) + finalBigScalar.ToBigIntRegular(&finalBigScalarBi) + op1ScalarMul.ScalarMultiplication(&g1GenAff, &finalBigScalarBi) + + return op1ScalarMul.Equal(&op1MultiExp) + }, + genScalar, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func BenchmarkMultiExpG1(b *testing.B) { + // ensure every words of the scalars are filled + var mixer fr.Element + mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487") + + const pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + const nbSamples = 1 << pow + + var samplePoints [nbSamples]G1Affine + var sampleScalars [nbSamples]fr.Element + + for i := 1; i <= nbSamples; i++ { + sampleScalars[i-1].SetUint64(uint64(i)). + Mul(&sampleScalars[i-1], &mixer). + FromMont() + samplePoints[i-1] = g1GenAff + } + + var testPoint G1Affine + + for i := 5; i <= pow; i++ { + using := 1 << i + + b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { + b.ResetTimer() + for j := 0; j < b.N; j++ { + testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) + } + }) + } +} + +func BenchmarkMultiExpG1Reference(b *testing.B) { + // ensure every words of the scalars are filled + var mixer fr.Element + mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487") + + const nbSamples = 1 << 20 + + var samplePoints [nbSamples]G1Affine + var sampleScalars [nbSamples]fr.Element + + for i := 1; i <= nbSamples; i++ { + sampleScalars[i-1].SetUint64(uint64(i)). + Mul(&sampleScalars[i-1], &mixer). + FromMont() + samplePoints[i-1] = g1GenAff + } + + var testPoint G1Affine + + b.ResetTimer() + for j := 0; j < b.N; j++ { + testPoint.MultiExp(samplePoints[:], sampleScalars[:], ecc.MultiExpConfig{}) + } +} + +func BenchmarkManyMultiExpG1Reference(b *testing.B) { + // ensure every words of the scalars are filled + var mixer fr.Element + mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487") + + const nbSamples = 1 << 20 + + var samplePoints [nbSamples]G1Affine + var sampleScalars [nbSamples]fr.Element + + for i := 1; i <= nbSamples; i++ { + sampleScalars[i-1].SetUint64(uint64(i)). + Mul(&sampleScalars[i-1], &mixer). + FromMont() + samplePoints[i-1] = g1GenAff + } + + var t1, t2, t3 G1Affine + b.ResetTimer() + for j := 0; j < b.N; j++ { + var wg sync.WaitGroup + wg.Add(3) + go func() { + t1.MultiExp(samplePoints[:], sampleScalars[:], ecc.MultiExpConfig{}) + wg.Done() + }() + go func() { + t2.MultiExp(samplePoints[:], sampleScalars[:], ecc.MultiExpConfig{}) + wg.Done() + }() + go func() { + t3.MultiExp(samplePoints[:], sampleScalars[:], ecc.MultiExpConfig{}) + wg.Done() + }() + wg.Wait() + } +} + +func TestMultiExpG2(t *testing.T) { + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = 2 + } else { + parameters.MinSuccessfulTests = nbFuzzShort + } + + properties := gopter.NewProperties(parameters) + + genScalar := GenFr() + + // size of the multiExps + const nbSamples = 73 + + // multi exp points + var samplePoints [nbSamples]G2Affine + var g G2Jac + g.Set(&g2Gen) + for i := 1; i <= nbSamples; i++ { + samplePoints[i-1].FromJacobian(&g) + g.AddAssign(&g2Gen) + } + + // final scalar to use in double and add method (without mixer factor) + // n(n+1)(2n+1)/6 (sum of the squares from 1 to n) + var scalar big.Int + scalar.SetInt64(nbSamples) + scalar.Mul(&scalar, new(big.Int).SetInt64(nbSamples+1)) + scalar.Mul(&scalar, new(big.Int).SetInt64(2*nbSamples+1)) + scalar.Div(&scalar, new(big.Int).SetInt64(6)) + + // ensure a multiexp that's splitted has the same result as a non-splitted one.. + properties.Property("[G2] Multi exponentation (c=16) should be consistant with splitted multiexp", prop.ForAll( + func(mixer fr.Element) bool { + var samplePointsLarge [nbSamples * 13]G2Affine + for i := 0; i < 13; i++ { + copy(samplePointsLarge[i*nbSamples:], samplePoints[:]) + } + + var r16, splitted1, splitted2 G2Jac + + // mixer ensures that all the words of a fpElement are set + var sampleScalars [nbSamples * 13]fr.Element + + for i := 1; i <= nbSamples; i++ { + sampleScalars[i-1].SetUint64(uint64(i)). + Mul(&sampleScalars[i-1], &mixer). + FromMont() + } + + scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU()) + r16.msmC16(samplePoints[:], scalars16, true) + + splitted1.MultiExp(samplePointsLarge[:], sampleScalars[:], ecc.MultiExpConfig{NbTasks: 128}) + splitted2.MultiExp(samplePointsLarge[:], sampleScalars[:], ecc.MultiExpConfig{NbTasks: 51}) + return r16.Equal(&splitted1) && r16.Equal(&splitted2) + }, + genScalar, + )) + + properties.Property("[G2] Multi exponentation (c=5, c=16) should be consistant with sum of square", prop.ForAll( + func(mixer fr.Element) bool { + + var expected G2Jac + + // compute expected result with double and add + var finalScalar, mixerBigInt big.Int + finalScalar.Mul(&scalar, mixer.ToBigIntRegular(&mixerBigInt)) + expected.ScalarMultiplication(&g2Gen, &finalScalar) + + // mixer ensures that all the words of a fpElement are set + var sampleScalars [nbSamples]fr.Element + + for i := 1; i <= nbSamples; i++ { + sampleScalars[i-1].SetUint64(uint64(i)). + Mul(&sampleScalars[i-1], &mixer). + FromMont() + } + + scalars5, _ := partitionScalars(sampleScalars[:], 5, false, runtime.NumCPU()) + scalars16, _ := partitionScalars(sampleScalars[:], 16, false, runtime.NumCPU()) + + var r5, r16 G2Jac + r5.msmC5(samplePoints[:], scalars5, false) + r16.msmC16(samplePoints[:], scalars16, true) + return (r5.Equal(&expected) && r16.Equal(&expected)) + }, + genScalar, + )) + + // note : this test is here as we expect to have a different multiExp than the above bucket method + // for small number of points + properties.Property("[G2] Multi exponentation (<50points) should be consistant with sum of square", prop.ForAll( + func(mixer fr.Element) bool { + + var g G2Jac + g.Set(&g2Gen) + + // mixer ensures that all the words of a fpElement are set + samplePoints := make([]G2Affine, 30) + sampleScalars := make([]fr.Element, 30) + + for i := 1; i <= 30; i++ { + sampleScalars[i-1].SetUint64(uint64(i)). + Mul(&sampleScalars[i-1], &mixer). + FromMont() + samplePoints[i-1].FromJacobian(&g) + g.AddAssign(&g2Gen) + } + + var op1MultiExp G2Affine + op1MultiExp.MultiExp(samplePoints, sampleScalars, ecc.MultiExpConfig{}) + + var finalBigScalar fr.Element + var finalBigScalarBi big.Int + var op1ScalarMul G2Affine + finalBigScalar.SetString("9455").Mul(&finalBigScalar, &mixer) + finalBigScalar.ToBigIntRegular(&finalBigScalarBi) + op1ScalarMul.ScalarMultiplication(&g2GenAff, &finalBigScalarBi) + + return op1ScalarMul.Equal(&op1MultiExp) + }, + genScalar, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func BenchmarkMultiExpG2(b *testing.B) { + // ensure every words of the scalars are filled + var mixer fr.Element + mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487") + + const pow = (bits.UintSize / 2) - (bits.UintSize / 8) // 24 on 64 bits arch, 12 on 32 bits + const nbSamples = 1 << pow + + var samplePoints [nbSamples]G2Affine + var sampleScalars [nbSamples]fr.Element + + for i := 1; i <= nbSamples; i++ { + sampleScalars[i-1].SetUint64(uint64(i)). + Mul(&sampleScalars[i-1], &mixer). + FromMont() + samplePoints[i-1] = g2GenAff + } + + var testPoint G2Affine + + for i := 5; i <= pow; i++ { + using := 1 << i + + b.Run(fmt.Sprintf("%d points", using), func(b *testing.B) { + b.ResetTimer() + for j := 0; j < b.N; j++ { + testPoint.MultiExp(samplePoints[:using], sampleScalars[:using], ecc.MultiExpConfig{}) + } + }) + } +} + +func BenchmarkMultiExpG2Reference(b *testing.B) { + // ensure every words of the scalars are filled + var mixer fr.Element + mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487") + + const nbSamples = 1 << 20 + + var samplePoints [nbSamples]G2Affine + var sampleScalars [nbSamples]fr.Element + + for i := 1; i <= nbSamples; i++ { + sampleScalars[i-1].SetUint64(uint64(i)). + Mul(&sampleScalars[i-1], &mixer). + FromMont() + samplePoints[i-1] = g2GenAff + } + + var testPoint G2Affine + + b.ResetTimer() + for j := 0; j < b.N; j++ { + testPoint.MultiExp(samplePoints[:], sampleScalars[:], ecc.MultiExpConfig{}) + } +} + +func BenchmarkManyMultiExpG2Reference(b *testing.B) { + // ensure every words of the scalars are filled + var mixer fr.Element + mixer.SetString("7716837800905789770901243404444209691916730933998574719964609384059111546487") + + const nbSamples = 1 << 20 + + var samplePoints [nbSamples]G2Affine + var sampleScalars [nbSamples]fr.Element + + for i := 1; i <= nbSamples; i++ { + sampleScalars[i-1].SetUint64(uint64(i)). + Mul(&sampleScalars[i-1], &mixer). + FromMont() + samplePoints[i-1] = g2GenAff + } + + var t1, t2, t3 G2Affine + b.ResetTimer() + for j := 0; j < b.N; j++ { + var wg sync.WaitGroup + wg.Add(3) + go func() { + t1.MultiExp(samplePoints[:], sampleScalars[:], ecc.MultiExpConfig{}) + wg.Done() + }() + go func() { + t2.MultiExp(samplePoints[:], sampleScalars[:], ecc.MultiExpConfig{}) + wg.Done() + }() + go func() { + t3.MultiExp(samplePoints[:], sampleScalars[:], ecc.MultiExpConfig{}) + wg.Done() + }() + wg.Wait() + } +} diff --git a/ecc/bls12-39/pairing.go b/ecc/bls12-39/pairing.go new file mode 100644 index 0000000000..1bf0c275e1 --- /dev/null +++ b/ecc/bls12-39/pairing.go @@ -0,0 +1,248 @@ +// Copyright 2020 ConsenSys AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bls1239 + +import ( + "errors" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/internal/fptower" +) + +// GT target group of the pairing +type GT = fptower.E12 + +type lineEvaluation struct { + r0 fptower.E2 + r1 fptower.E2 + r2 fptower.E2 +} + +// Pair calculates the reduced pairing for a set of points +func Pair(P []G1Affine, Q []G2Affine) (GT, error) { + f, err := MillerLoop(P, Q) + if err != nil { + return GT{}, err + } + return FinalExponentiation(&f), nil +} + +// PairingCheck calculates the reduced pairing for a set of points and returns True if the result is One +func PairingCheck(P []G1Affine, Q []G2Affine) (bool, error) { + f, err := Pair(P, Q) + if err != nil { + return false, err + } + var one GT + one.SetOne() + return f.Equal(&one), nil +} + +// FinalExponentiation computes the final expo x**(p**6-1)(p**2+1)(p**4 - p**2 +1)/r +func FinalExponentiation(z *GT, _z ...*GT) GT { + + var result GT + result.Set(z) + + for _, e := range _z { + result.Mul(&result, e) + } + + // https://eprint.iacr.org/2016/130.pdf + var t [3]GT + + // easy part + t[0].Conjugate(&result) + result.Inverse(&result) + t[0].Mul(&t[0], &result) + result.FrobeniusSquare(&t[0]). + Mul(&result, &t[0]) + + // hard part (up to permutation) + // Daiki Hayashida and Kenichiro Hayasaka + // and Tadanori Teruya + // https://eprint.iacr.org/2020/875.pdf + t[0].CyclotomicSquare(&result) + t[1].Expt(&result) + t[2].InverseUnitary(&result) + t[1].Mul(&t[1], &t[2]) + t[2].Expt(&t[1]) + t[1].InverseUnitary(&t[1]) + t[1].Mul(&t[1], &t[2]) + t[2].Expt(&t[1]) + t[1].Frobenius(&t[1]) + t[1].Mul(&t[1], &t[2]) + result.Mul(&result, &t[0]) + t[0].Expt(&t[1]) + t[2].Expt(&t[0]) + t[0].FrobeniusSquare(&t[1]) + t[1].InverseUnitary(&t[1]) + t[1].Mul(&t[1], &t[2]) + t[1].Mul(&t[1], &t[0]) + result.Mul(&result, &t[1]) + + return result +} + +// MillerLoop Miller loop +func MillerLoop(P []G1Affine, Q []G2Affine) (GT, error) { + // check input size match + n := len(P) + if n == 0 || n != len(Q) { + return GT{}, errors.New("invalid inputs sizes") + } + + // filter infinity points + p := make([]G1Affine, 0, n) + q := make([]G2Affine, 0, n) + + for k := 0; k < n; k++ { + if P[k].IsInfinity() || Q[k].IsInfinity() { + continue + } + p = append(p, P[k]) + q = append(q, Q[k]) + } + + n = len(p) + + // projective points for Q + qProj := make([]g2Proj, n) + for k := 0; k < n; k++ { + qProj[k].FromAffine(&q[k]) + } + + var result GT + result.SetOne() + + var l lineEvaluation + + // i == 5 + for k := 0; k < n; k++ { + qProj[k].DoubleStep(&l) + // line eval + l.r0.MulByElement(&l.r0, &p[k].Y) + l.r1.MulByElement(&l.r1, &p[k].X) + result.MulBy034(&l.r0, &l.r1, &l.r2) + qProj[k].AddMixedStep(&l, &q[k]) + // line eval + l.r0.MulByElement(&l.r0, &p[k].Y) + l.r1.MulByElement(&l.r1, &p[k].X) + result.MulBy034(&l.r0, &l.r1, &l.r2) + } + + for i := 4; i >= 0; i-- { + result.Square(&result) + + for k := 0; k < n; k++ { + qProj[k].DoubleStep(&l) + // line eval + l.r0.MulByElement(&l.r0, &p[k].Y) + l.r1.MulByElement(&l.r1, &p[k].X) + result.MulBy034(&l.r0, &l.r1, &l.r2) + } + + if loopCounter[i] == 0 { + continue + } + + for k := 0; k < n; k++ { + qProj[k].AddMixedStep(&l, &q[k]) + // line eval + l.r0.MulByElement(&l.r0, &p[k].Y) + l.r1.MulByElement(&l.r1, &p[k].X) + result.MulBy034(&l.r0, &l.r1, &l.r2) + } + } + + return result, nil +} + +// DoubleStep doubles a point in Homogenous projective coordinates, and evaluates the line in Miller loop +// https://eprint.iacr.org/2013/722.pdf (Section 4.3) +func (p *g2Proj) DoubleStep(evaluations *lineEvaluation) { + + // get some Element from our pool + var t1, A, B, C, D, E, EE, F, G, H, I, J, K fptower.E2 + A.Mul(&p.x, &p.y) + A.Halve() + B.Square(&p.y) + C.Square(&p.z) + D.Double(&C). + Add(&D, &C) + E.MulBybTwistCurveCoeff(&D) + F.Double(&E). + Add(&F, &E) + G.Add(&B, &F) + G.Halve() + H.Add(&p.y, &p.z). + Square(&H) + t1.Add(&B, &C) + H.Sub(&H, &t1) + I.Sub(&E, &B) + J.Square(&p.x) + EE.Square(&E) + K.Double(&EE). + Add(&K, &EE) + + // X, Y, Z + p.x.Sub(&B, &F). + Mul(&p.x, &A) + p.y.Square(&G). + Sub(&p.y, &K) + p.z.Mul(&B, &H) + + // Line evaluation + evaluations.r0.Neg(&H) + evaluations.r1.Double(&J). + Add(&evaluations.r1, &J) + evaluations.r2.Set(&I) +} + +// AddMixedStep point addition in Mixed Homogenous projective and Affine coordinates +// https://eprint.iacr.org/2013/722.pdf (Section 4.3) +func (p *g2Proj) AddMixedStep(evaluations *lineEvaluation, a *G2Affine) { + + // get some Element from our pool + var Y2Z1, X2Z1, O, L, C, D, E, F, G, H, t0, t1, t2, J fptower.E2 + Y2Z1.Mul(&a.Y, &p.z) + O.Sub(&p.y, &Y2Z1) + X2Z1.Mul(&a.X, &p.z) + L.Sub(&p.x, &X2Z1) + C.Square(&O) + D.Square(&L) + E.Mul(&L, &D) + F.Mul(&p.z, &C) + G.Mul(&p.x, &D) + t0.Double(&G) + H.Add(&E, &F). + Sub(&H, &t0) + t1.Mul(&p.y, &E) + + // X, Y, Z + p.x.Mul(&L, &H) + p.y.Sub(&G, &H). + Mul(&p.y, &O). + Sub(&p.y, &t1) + p.z.Mul(&E, &p.z) + + t2.Mul(&L, &a.Y) + J.Mul(&a.X, &O). + Sub(&J, &t2) + + // Line evaluation + evaluations.r0.Set(&L) + evaluations.r1.Neg(&O) + evaluations.r2.Set(&J) +} diff --git a/ecc/bls12-39/pairing_test.go b/ecc/bls12-39/pairing_test.go new file mode 100644 index 0000000000..bad8dc1312 --- /dev/null +++ b/ecc/bls12-39/pairing_test.go @@ -0,0 +1,328 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package bls1239 + +import ( + "fmt" + "math/big" + "testing" + + "github.com/consensys/gnark-crypto/ecc/bls12-39/fr" + "github.com/leanovate/gopter" + "github.com/leanovate/gopter/prop" +) + +// ------------------------------------------------------------ +// tests + +func TestPairing(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := GenE12() + genR1 := GenFr() + genR2 := GenFr() + + properties.Property("[BLS12-39] Having the receiver as operand (final expo) should output the same result", prop.ForAll( + func(a GT) bool { + b := a + b = FinalExponentiation(&a) + a = FinalExponentiation(&a) + return a.Equal(&b) + }, + genA, + )) + + properties.Property("[BLS12-39] Exponentiating FinalExpo(a) to r should output 1", prop.ForAll( + func(a GT) bool { + b := FinalExponentiation(&a) + return !a.IsInSubGroup() && b.IsInSubGroup() + }, + genA, + )) + + properties.Property("[BLS12-39] Expt(Expt) and Exp(t^2) should output the same result in the cyclotomic subgroup", prop.ForAll( + func(a GT) bool { + var b, c, d GT + b.Conjugate(&a) + a.Inverse(&a) + b.Mul(&b, &a) + + a.FrobeniusSquare(&b). + Mul(&a, &b) + + c.Expt(&a).Expt(&c) + d.Exp(&a, xGen).Exp(&d, xGen) + return c.Equal(&d) + }, + genA, + )) + + properties.Property("[BLS12-39] bilinearity", prop.ForAll( + func(a, b fr.Element) bool { + + var res, resa, resb, resab, zero GT + + var ag1 G1Affine + var bg2 G2Affine + + var abigint, bbigint, ab big.Int + + a.ToBigIntRegular(&abigint) + b.ToBigIntRegular(&bbigint) + ab.Mul(&abigint, &bbigint) + + ag1.ScalarMultiplication(&g1GenAff, &abigint) + bg2.ScalarMultiplication(&g2GenAff, &bbigint) + + res, _ = Pair([]G1Affine{g1GenAff}, []G2Affine{g2GenAff}) + resa, _ = Pair([]G1Affine{ag1}, []G2Affine{g2GenAff}) + resb, _ = Pair([]G1Affine{g1GenAff}, []G2Affine{bg2}) + + resab.Exp(&res, ab) + resa.Exp(&resa, bbigint) + resb.Exp(&resb, abigint) + + return resab.Equal(&resa) && resab.Equal(&resb) && !res.Equal(&zero) + + }, + genR1, + genR2, + )) + + properties.Property("[BLS12-39] PairingCheck", prop.ForAll( + func(a, b fr.Element) bool { + + var g1GenAffNeg G1Affine + g1GenAffNeg.Neg(&g1GenAff) + tabP := []G1Affine{g1GenAff, g1GenAffNeg} + tabQ := []G2Affine{g2GenAff, g2GenAff} + + res, _ := PairingCheck(tabP, tabQ) + + return res + }, + genR1, + genR2, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestMillerLoop(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genR1 := GenFr() + genR2 := GenFr() + + properties.Property("[BLS12-39] MillerLoop of pairs should be equal to the product of MillerLoops", prop.ForAll( + func(a, b fr.Element) bool { + + var simpleProd, factorizedProd GT + + var ag1 G1Affine + var bg2 G2Affine + + var abigint, bbigint big.Int + + a.ToBigIntRegular(&abigint) + b.ToBigIntRegular(&bbigint) + + ag1.ScalarMultiplication(&g1GenAff, &abigint) + bg2.ScalarMultiplication(&g2GenAff, &bbigint) + + P0 := []G1Affine{g1GenAff} + P1 := []G1Affine{ag1} + Q0 := []G2Affine{g2GenAff} + Q1 := []G2Affine{bg2} + + // FE( ML(a,b) * ML(c,d) * ML(e,f) * ML(g,h) ) + M1, _ := MillerLoop(P0, Q0) + M2, _ := MillerLoop(P1, Q0) + M3, _ := MillerLoop(P0, Q1) + M4, _ := MillerLoop(P1, Q1) + simpleProd.Mul(&M1, &M2).Mul(&simpleProd, &M3).Mul(&simpleProd, &M4) + simpleProd = FinalExponentiation(&simpleProd) + + tabP := []G1Affine{g1GenAff, ag1, g1GenAff, ag1} + tabQ := []G2Affine{g2GenAff, g2GenAff, bg2, bg2} + + // FE( ML([a,c,e,g] ; [b,d,f,h]) ) -> saves 3 squares in Fqk + factorizedProd, _ = Pair(tabP, tabQ) + + return simpleProd.Equal(&factorizedProd) + }, + genR1, + genR2, + )) + + properties.Property("[BLS12-39] MillerLoop should skip pairs with a point at infinity", prop.ForAll( + func(a, b fr.Element) bool { + + var one GT + + var ag1, g1Inf G1Affine + var bg2, g2Inf G2Affine + + var abigint, bbigint big.Int + + one.SetOne() + + a.ToBigIntRegular(&abigint) + b.ToBigIntRegular(&bbigint) + + ag1.ScalarMultiplication(&g1GenAff, &abigint) + bg2.ScalarMultiplication(&g2GenAff, &bbigint) + + g1Inf.FromJacobian(&g1Infinity) + g2Inf.FromJacobian(&g2Infinity) + + // e([0,c] ; [b,d]) + tabP := []G1Affine{g1Inf, ag1} + tabQ := []G2Affine{g2GenAff, bg2} + res1, _ := Pair(tabP, tabQ) + + // e([a,c] ; [0,d]) + tabP = []G1Affine{g1GenAff, ag1} + tabQ = []G2Affine{g2Inf, bg2} + res2, _ := Pair(tabP, tabQ) + + // e([0,c] ; [d,0]) + tabP = []G1Affine{g1Inf, ag1} + tabQ = []G2Affine{bg2, g2Inf} + res3, _ := Pair(tabP, tabQ) + + return res1.Equal(&res2) && !res2.Equal(&res3) && res3.Equal(&one) + }, + genR1, + genR2, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +// ------------------------------------------------------------ +// benches + +func BenchmarkPairing(b *testing.B) { + + var g1GenAff G1Affine + var g2GenAff G2Affine + + g1GenAff.FromJacobian(&g1Gen) + g2GenAff.FromJacobian(&g2Gen) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + Pair([]G1Affine{g1GenAff}, []G2Affine{g2GenAff}) + } +} + +func BenchmarkMillerLoop(b *testing.B) { + + var g1GenAff G1Affine + var g2GenAff G2Affine + + g1GenAff.FromJacobian(&g1Gen) + g2GenAff.FromJacobian(&g2Gen) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + MillerLoop([]G1Affine{g1GenAff}, []G2Affine{g2GenAff}) + } +} + +func BenchmarkFinalExponentiation(b *testing.B) { + + var a GT + a.SetRandom() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + FinalExponentiation(&a) + } + +} + +func BenchmarkMultiMiller(b *testing.B) { + + var g1GenAff G1Affine + var g2GenAff G2Affine + + g1GenAff.FromJacobian(&g1Gen) + g2GenAff.FromJacobian(&g2Gen) + + n := 10 + P := make([]G1Affine, n) + Q := make([]G2Affine, n) + + for i := 2; i <= n; i++ { + for j := 0; j < i; j++ { + P[j].Set(&g1GenAff) + Q[j].Set(&g2GenAff) + } + b.Run(fmt.Sprintf("%d pairs", i), func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + MillerLoop(P, Q) + } + }) + } +} + +func BenchmarkMultiPair(b *testing.B) { + + var g1GenAff G1Affine + var g2GenAff G2Affine + + g1GenAff.FromJacobian(&g1Gen) + g2GenAff.FromJacobian(&g2Gen) + + n := 10 + P := make([]G1Affine, n) + Q := make([]G2Affine, n) + + for i := 2; i <= n; i++ { + for j := 0; j < i; j++ { + P[j].Set(&g1GenAff) + Q[j].Set(&g2GenAff) + } + b.Run(fmt.Sprintf("%d pairs", i), func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + Pair(P, Q) + } + }) + } +} diff --git a/ecc/bls24-315/fp/element.go b/ecc/bls24-315/fp/element.go index 1804bd16c0..2d713d4d72 100644 --- a/ecc/bls24-315/fp/element.go +++ b/ecc/bls24-315/fp/element.go @@ -77,9 +77,6 @@ var qElement = Element{ qElementWord4, } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = 8083954730842193919 - // rSquare var rSquare = Element{ 7746605402484284438, @@ -96,7 +93,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("39705142709513438335025689890408969744933502416914749335064285505637884093126342347073617133569", 10) + // base10: 39705142709513438335025689890408969744933502416914749335064285505637884093126342347073617133569 + _modulus.SetString("4c23a02b586d650d3f7498be97c5eafdec1d01aa27a1ae0421ee5da52bde5026fe802ff40300001", 16) } // NewElement returns a new Element from a uint64 value @@ -340,7 +338,7 @@ func (z *Element) SetRandom() (*Element, error) { z[4] = binary.BigEndian.Uint64(bytes[32:40]) z[4] %= 342900304943437392 - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { var b uint64 @@ -363,10 +361,10 @@ func One() Element { // Halve sets z to z / 2 (mod p) func (z *Element) Halve() { - if z[0]&1 == 1 { + var carry uint64 + if z[0]&1 == 1 { // z = z + q - var carry uint64 z[0], carry = bits.Add64(z[0], 8063698428123676673, 0) z[1], carry = bits.Add64(z[1], 4764498181658371330, carry) z[2], carry = bits.Add64(z[2], 16051339359738796768, carry) @@ -374,9 +372,7 @@ func (z *Element) Halve() { z[4], _ = bits.Add64(z[4], 342900304943437392, carry) } - // z = z >> 1 - z[0] = z[0]>>1 | z[1]<<63 z[1] = z[1]>>1 | z[2]<<63 z[2] = z[2]>>1 | z[3]<<63 @@ -385,8 +381,6 @@ func (z *Element) Halve() { } -// API with assembly impl - // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *Element) Mul(x, y *Element) *Element { @@ -526,7 +520,7 @@ func _mulGeneric(z, x, y *Element) { z[4], z[3] = madd3(m, 342900304943437392, c[0], c[2], c[1]) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { var b uint64 @@ -536,72 +530,7 @@ func _mulGeneric(z, x, y *Element) { z[3], b = bits.Sub64(z[3], 15273757526516850351, b) z[4], _ = bits.Sub64(z[4], 342900304943437392, b) } -} - -func _mulWGeneric(z, x *Element, y uint64) { - var t [5]uint64 - { - // round 0 - c1, c0 := bits.Mul64(y, x[0]) - m := c0 * 8083954730842193919 - c2 := madd0(m, 8063698428123676673, c0) - c1, c0 = madd1(y, x[1], c1) - c2, t[0] = madd2(m, 4764498181658371330, c2, c0) - c1, c0 = madd1(y, x[2], c1) - c2, t[1] = madd2(m, 16051339359738796768, c2, c0) - c1, c0 = madd1(y, x[3], c1) - c2, t[2] = madd2(m, 15273757526516850351, c2, c0) - c1, c0 = madd1(y, x[4], c1) - t[4], t[3] = madd3(m, 342900304943437392, c0, c2, c1) - } - { - // round 1 - m := t[0] * 8083954730842193919 - c2 := madd0(m, 8063698428123676673, t[0]) - c2, t[0] = madd2(m, 4764498181658371330, c2, t[1]) - c2, t[1] = madd2(m, 16051339359738796768, c2, t[2]) - c2, t[2] = madd2(m, 15273757526516850351, c2, t[3]) - t[4], t[3] = madd2(m, 342900304943437392, t[4], c2) - } - { - // round 2 - m := t[0] * 8083954730842193919 - c2 := madd0(m, 8063698428123676673, t[0]) - c2, t[0] = madd2(m, 4764498181658371330, c2, t[1]) - c2, t[1] = madd2(m, 16051339359738796768, c2, t[2]) - c2, t[2] = madd2(m, 15273757526516850351, c2, t[3]) - t[4], t[3] = madd2(m, 342900304943437392, t[4], c2) - } - { - // round 3 - m := t[0] * 8083954730842193919 - c2 := madd0(m, 8063698428123676673, t[0]) - c2, t[0] = madd2(m, 4764498181658371330, c2, t[1]) - c2, t[1] = madd2(m, 16051339359738796768, c2, t[2]) - c2, t[2] = madd2(m, 15273757526516850351, c2, t[3]) - t[4], t[3] = madd2(m, 342900304943437392, t[4], c2) - } - { - // round 4 - m := t[0] * 8083954730842193919 - c2 := madd0(m, 8063698428123676673, t[0]) - c2, z[0] = madd2(m, 4764498181658371330, c2, t[1]) - c2, z[1] = madd2(m, 16051339359738796768, c2, t[2]) - c2, z[2] = madd2(m, 15273757526516850351, c2, t[3]) - z[4], z[3] = madd2(m, 342900304943437392, t[4], c2) - } - - // if z > q → z -= q - // note: this is NOT constant time - if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { - var b uint64 - z[0], b = bits.Sub64(z[0], 8063698428123676673, 0) - z[1], b = bits.Sub64(z[1], 4764498181658371330, b) - z[2], b = bits.Sub64(z[2], 16051339359738796768, b) - z[3], b = bits.Sub64(z[3], 15273757526516850351, b) - z[4], _ = bits.Sub64(z[4], 342900304943437392, b) - } } func _fromMontGeneric(z *Element) { @@ -658,7 +587,7 @@ func _fromMontGeneric(z *Element) { z[4] = C } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { var b uint64 @@ -679,7 +608,7 @@ func _addGeneric(z, x, y *Element) { z[3], carry = bits.Add64(x[3], y[3], carry) z[4], _ = bits.Add64(x[4], y[4], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { var b uint64 @@ -700,7 +629,7 @@ func _doubleGeneric(z, x *Element) { z[3], carry = bits.Add64(x[3], x[3], carry) z[4], _ = bits.Add64(x[4], x[4], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { var b uint64 @@ -744,7 +673,7 @@ func _negGeneric(z, x *Element) { func _reduceGeneric(z *Element) { - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { var b uint64 @@ -839,18 +768,30 @@ func (z *Element) BitLen() int { return bits.Len64(z[0]) } -// Exp z = x^exponent mod q -func (z *Element) Exp(x Element, exponent *big.Int) *Element { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } @@ -1372,14 +1313,10 @@ func (z *Element) Inverse(x *Element) *Element { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *Element) inverseExp(x *Element) *Element { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) return z.Exp(*x, qMinusTwo) } @@ -1421,6 +1358,8 @@ func (z *Element) linearComb(x *Element, xC int64, y *Element, yC int64) { // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *Element) montReduceSigned(x *Element, xHi uint64) { + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 8083954730842193919 const signBitRemover = ^signBitSelector neg := xHi&signBitSelector != 0 @@ -1496,7 +1435,7 @@ func (z *Element) montReduceSigned(x *Element, xHi uint64) { z[4], z[3] = madd2(m, qElementWord4, t[i+4], C) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { var b uint64 @@ -1550,7 +1489,7 @@ func (z *Element) montReduceSignedSimpleButSlow(x *Element, xHi uint64) { z[3], c = bits.Add64(z[3], 0, c) z[4], _ = bits.Add64(z[4], 0, c) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { var b uint64 @@ -1598,6 +1537,72 @@ func (z *Element) mulWSigned(x *Element, y int64) { } } +func _mulWGeneric(z, x *Element, y uint64) { + + var t [5]uint64 + { + // round 0 + c1, c0 := bits.Mul64(y, x[0]) + m := c0 * 8083954730842193919 + c2 := madd0(m, 8063698428123676673, c0) + c1, c0 = madd1(y, x[1], c1) + c2, t[0] = madd2(m, 4764498181658371330, c2, c0) + c1, c0 = madd1(y, x[2], c1) + c2, t[1] = madd2(m, 16051339359738796768, c2, c0) + c1, c0 = madd1(y, x[3], c1) + c2, t[2] = madd2(m, 15273757526516850351, c2, c0) + c1, c0 = madd1(y, x[4], c1) + t[4], t[3] = madd3(m, 342900304943437392, c0, c2, c1) + } + { + // round 1 + m := t[0] * 8083954730842193919 + c2 := madd0(m, 8063698428123676673, t[0]) + c2, t[0] = madd2(m, 4764498181658371330, c2, t[1]) + c2, t[1] = madd2(m, 16051339359738796768, c2, t[2]) + c2, t[2] = madd2(m, 15273757526516850351, c2, t[3]) + t[4], t[3] = madd2(m, 342900304943437392, t[4], c2) + } + { + // round 2 + m := t[0] * 8083954730842193919 + c2 := madd0(m, 8063698428123676673, t[0]) + c2, t[0] = madd2(m, 4764498181658371330, c2, t[1]) + c2, t[1] = madd2(m, 16051339359738796768, c2, t[2]) + c2, t[2] = madd2(m, 15273757526516850351, c2, t[3]) + t[4], t[3] = madd2(m, 342900304943437392, t[4], c2) + } + { + // round 3 + m := t[0] * 8083954730842193919 + c2 := madd0(m, 8063698428123676673, t[0]) + c2, t[0] = madd2(m, 4764498181658371330, c2, t[1]) + c2, t[1] = madd2(m, 16051339359738796768, c2, t[2]) + c2, t[2] = madd2(m, 15273757526516850351, c2, t[3]) + t[4], t[3] = madd2(m, 342900304943437392, t[4], c2) + } + { + // round 4 + m := t[0] * 8083954730842193919 + c2 := madd0(m, 8063698428123676673, t[0]) + c2, z[0] = madd2(m, 4764498181658371330, c2, t[1]) + c2, z[1] = madd2(m, 16051339359738796768, c2, t[2]) + c2, z[2] = madd2(m, 15273757526516850351, c2, t[3]) + z[4], z[3] = madd2(m, 342900304943437392, t[4], c2) + } + + // if z >= q → z -= q + // note: this is NOT constant time + if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { + var b uint64 + z[0], b = bits.Sub64(z[0], 8063698428123676673, 0) + z[1], b = bits.Sub64(z[1], 4764498181658371330, b) + z[2], b = bits.Sub64(z[2], 16051339359738796768, b) + z[3], b = bits.Sub64(z[3], 15273757526516850351, b) + z[4], _ = bits.Sub64(z[4], 342900304943437392, b) + } +} + func (z *Element) neg(x *Element, xHi uint64) uint64 { var b uint64 diff --git a/ecc/bls24-315/fp/element_ops_noasm.go b/ecc/bls24-315/fp/element_ops_noasm.go index fec6289183..7d02396a3f 100644 --- a/ecc/bls24-315/fp/element_ops_noasm.go +++ b/ecc/bls24-315/fp/element_ops_noasm.go @@ -46,7 +46,6 @@ func MulBy13(x *Element) { func Butterfly(a, b *Element) { _butterflyGeneric(a, b) } - func mul(z, x, y *Element) { _mulGeneric(z, x, y) } diff --git a/ecc/bls24-315/fp/element_test.go b/ecc/bls24-315/fp/element_test.go index 6726f8b697..f0ebbfc67f 100644 --- a/ecc/bls24-315/fp/element_test.go +++ b/ecc/bls24-315/fp/element_test.go @@ -22,10 +22,12 @@ import ( "fmt" "math/big" "math/bits" + + "github.com/consensys/gnark-crypto/field" mrand "math/rand" + "testing" - "github.com/consensys/gnark-crypto/field" "github.com/leanovate/gopter" ggen "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" @@ -322,7 +324,6 @@ func init() { a[4]-- staticTestValues = append(staticTestValues, a) } - { a := qElement a[4]-- @@ -336,6 +337,12 @@ func init() { staticTestValues = append(staticTestValues, a) } + { + a := qElement + a[4] = 0 + staticTestValues = append(staticTestValues, a) + } + } func TestElementReduce(t *testing.T) { @@ -473,7 +480,6 @@ func TestElementInverseExp(t *testing.T) { properties = gopter.NewProperties(parameters) properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly if supportAdx { t.Log("disabling ADX") @@ -1420,8 +1426,8 @@ func TestElementSquare(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1499,8 +1505,8 @@ func TestElementInverse(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1578,8 +1584,8 @@ func TestElementSqrt(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1674,8 +1680,8 @@ func TestElementDouble(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1770,8 +1776,8 @@ func TestElementNeg(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -2106,6 +2112,134 @@ func TestElementSetInterface(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + func TestElementFromMont(t *testing.T) { t.Parallel() @@ -2300,8 +2434,6 @@ func genFull() gopter.Gen { } } -// Some utils - func (z *Element) matchVeryBigInt(aHi uint64, aInt *big.Int) error { var modulus big.Int var aIntMod big.Int diff --git a/ecc/bls24-315/fr/element.go b/ecc/bls24-315/fr/element.go index 6907494b12..220874226a 100644 --- a/ecc/bls24-315/fr/element.go +++ b/ecc/bls24-315/fr/element.go @@ -75,9 +75,6 @@ var qElement = Element{ qElementWord3, } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = 2184305180030271487 - // rSquare var rSquare = Element{ 6242551132904523857, @@ -93,7 +90,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("11502027791375260645628074404575422495959608200132055716665986169834464870401", 10) + // base10: 11502027791375260645628074404575422495959608200132055716665986169834464870401 + _modulus.SetString("196deac24a9da12b25fc7ec9cf927a98c8c480ece644e36419d0c5fd00c00001", 16) } // NewElement returns a new Element from a uint64 value @@ -327,7 +325,7 @@ func (z *Element) SetRandom() (*Element, error) { z[3] = binary.BigEndian.Uint64(bytes[24:32]) z[3] %= 1832378743606059307 - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 1832378743606059307 || (z[3] == 1832378743606059307 && (z[2] < 2737202078770428568 || (z[2] == 2737202078770428568 && (z[1] < 14466829657984787300 || (z[1] == 14466829657984787300 && (z[0] < 1860204336533995521))))))) { var b uint64 @@ -349,19 +347,17 @@ func One() Element { // Halve sets z to z / 2 (mod p) func (z *Element) Halve() { - if z[0]&1 == 1 { + var carry uint64 + if z[0]&1 == 1 { // z = z + q - var carry uint64 z[0], carry = bits.Add64(z[0], 1860204336533995521, 0) z[1], carry = bits.Add64(z[1], 14466829657984787300, carry) z[2], carry = bits.Add64(z[2], 2737202078770428568, carry) z[3], _ = bits.Add64(z[3], 1832378743606059307, carry) } - // z = z >> 1 - z[0] = z[0]>>1 | z[1]<<63 z[1] = z[1]>>1 | z[2]<<63 z[2] = z[2]>>1 | z[3]<<63 @@ -369,8 +365,6 @@ func (z *Element) Halve() { } -// API with assembly impl - // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *Element) Mul(x, y *Element) *Element { @@ -486,7 +480,7 @@ func _mulGeneric(z, x, y *Element) { z[3], z[2] = madd3(m, 1832378743606059307, c[0], c[2], c[1]) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 1832378743606059307 || (z[3] == 1832378743606059307 && (z[2] < 2737202078770428568 || (z[2] == 2737202078770428568 && (z[1] < 14466829657984787300 || (z[1] == 14466829657984787300 && (z[0] < 1860204336533995521))))))) { var b uint64 @@ -495,57 +489,7 @@ func _mulGeneric(z, x, y *Element) { z[2], b = bits.Sub64(z[2], 2737202078770428568, b) z[3], _ = bits.Sub64(z[3], 1832378743606059307, b) } -} - -func _mulWGeneric(z, x *Element, y uint64) { - - var t [4]uint64 - { - // round 0 - c1, c0 := bits.Mul64(y, x[0]) - m := c0 * 2184305180030271487 - c2 := madd0(m, 1860204336533995521, c0) - c1, c0 = madd1(y, x[1], c1) - c2, t[0] = madd2(m, 14466829657984787300, c2, c0) - c1, c0 = madd1(y, x[2], c1) - c2, t[1] = madd2(m, 2737202078770428568, c2, c0) - c1, c0 = madd1(y, x[3], c1) - t[3], t[2] = madd3(m, 1832378743606059307, c0, c2, c1) - } - { - // round 1 - m := t[0] * 2184305180030271487 - c2 := madd0(m, 1860204336533995521, t[0]) - c2, t[0] = madd2(m, 14466829657984787300, c2, t[1]) - c2, t[1] = madd2(m, 2737202078770428568, c2, t[2]) - t[3], t[2] = madd2(m, 1832378743606059307, t[3], c2) - } - { - // round 2 - m := t[0] * 2184305180030271487 - c2 := madd0(m, 1860204336533995521, t[0]) - c2, t[0] = madd2(m, 14466829657984787300, c2, t[1]) - c2, t[1] = madd2(m, 2737202078770428568, c2, t[2]) - t[3], t[2] = madd2(m, 1832378743606059307, t[3], c2) - } - { - // round 3 - m := t[0] * 2184305180030271487 - c2 := madd0(m, 1860204336533995521, t[0]) - c2, z[0] = madd2(m, 14466829657984787300, c2, t[1]) - c2, z[1] = madd2(m, 2737202078770428568, c2, t[2]) - z[3], z[2] = madd2(m, 1832378743606059307, t[3], c2) - } - // if z > q → z -= q - // note: this is NOT constant time - if !(z[3] < 1832378743606059307 || (z[3] == 1832378743606059307 && (z[2] < 2737202078770428568 || (z[2] == 2737202078770428568 && (z[1] < 14466829657984787300 || (z[1] == 14466829657984787300 && (z[0] < 1860204336533995521))))))) { - var b uint64 - z[0], b = bits.Sub64(z[0], 1860204336533995521, 0) - z[1], b = bits.Sub64(z[1], 14466829657984787300, b) - z[2], b = bits.Sub64(z[2], 2737202078770428568, b) - z[3], _ = bits.Sub64(z[3], 1832378743606059307, b) - } } func _fromMontGeneric(z *Element) { @@ -588,7 +532,7 @@ func _fromMontGeneric(z *Element) { z[3] = C } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 1832378743606059307 || (z[3] == 1832378743606059307 && (z[2] < 2737202078770428568 || (z[2] == 2737202078770428568 && (z[1] < 14466829657984787300 || (z[1] == 14466829657984787300 && (z[0] < 1860204336533995521))))))) { var b uint64 @@ -607,7 +551,7 @@ func _addGeneric(z, x, y *Element) { z[2], carry = bits.Add64(x[2], y[2], carry) z[3], _ = bits.Add64(x[3], y[3], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 1832378743606059307 || (z[3] == 1832378743606059307 && (z[2] < 2737202078770428568 || (z[2] == 2737202078770428568 && (z[1] < 14466829657984787300 || (z[1] == 14466829657984787300 && (z[0] < 1860204336533995521))))))) { var b uint64 @@ -626,7 +570,7 @@ func _doubleGeneric(z, x *Element) { z[2], carry = bits.Add64(x[2], x[2], carry) z[3], _ = bits.Add64(x[3], x[3], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 1832378743606059307 || (z[3] == 1832378743606059307 && (z[2] < 2737202078770428568 || (z[2] == 2737202078770428568 && (z[1] < 14466829657984787300 || (z[1] == 14466829657984787300 && (z[0] < 1860204336533995521))))))) { var b uint64 @@ -666,7 +610,7 @@ func _negGeneric(z, x *Element) { func _reduceGeneric(z *Element) { - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 1832378743606059307 || (z[3] == 1832378743606059307 && (z[2] < 2737202078770428568 || (z[2] == 2737202078770428568 && (z[1] < 14466829657984787300 || (z[1] == 14466829657984787300 && (z[0] < 1860204336533995521))))))) { var b uint64 @@ -757,18 +701,30 @@ func (z *Element) BitLen() int { return bits.Len64(z[0]) } -// Exp z = x^exponent mod q -func (z *Element) Exp(x Element, exponent *big.Int) *Element { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } @@ -1282,14 +1238,10 @@ func (z *Element) Inverse(x *Element) *Element { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *Element) inverseExp(x *Element) *Element { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) return z.Exp(*x, qMinusTwo) } @@ -1331,6 +1283,8 @@ func (z *Element) linearComb(x *Element, xC int64, y *Element, yC int64) { // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *Element) montReduceSigned(x *Element, xHi uint64) { + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 2184305180030271487 const signBitRemover = ^signBitSelector neg := xHi&signBitSelector != 0 @@ -1390,7 +1344,7 @@ func (z *Element) montReduceSigned(x *Element, xHi uint64) { z[3], z[2] = madd2(m, qElementWord3, t[i+3], C) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 1832378743606059307 || (z[3] == 1832378743606059307 && (z[2] < 2737202078770428568 || (z[2] == 2737202078770428568 && (z[1] < 14466829657984787300 || (z[1] == 14466829657984787300 && (z[0] < 1860204336533995521))))))) { var b uint64 @@ -1440,7 +1394,7 @@ func (z *Element) montReduceSignedSimpleButSlow(x *Element, xHi uint64) { z[2], c = bits.Add64(z[2], 0, c) z[3], _ = bits.Add64(z[3], 0, c) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 1832378743606059307 || (z[3] == 1832378743606059307 && (z[2] < 2737202078770428568 || (z[2] == 2737202078770428568 && (z[1] < 14466829657984787300 || (z[1] == 14466829657984787300 && (z[0] < 1860204336533995521))))))) { var b uint64 @@ -1485,6 +1439,57 @@ func (z *Element) mulWSigned(x *Element, y int64) { } } +func _mulWGeneric(z, x *Element, y uint64) { + + var t [4]uint64 + { + // round 0 + c1, c0 := bits.Mul64(y, x[0]) + m := c0 * 2184305180030271487 + c2 := madd0(m, 1860204336533995521, c0) + c1, c0 = madd1(y, x[1], c1) + c2, t[0] = madd2(m, 14466829657984787300, c2, c0) + c1, c0 = madd1(y, x[2], c1) + c2, t[1] = madd2(m, 2737202078770428568, c2, c0) + c1, c0 = madd1(y, x[3], c1) + t[3], t[2] = madd3(m, 1832378743606059307, c0, c2, c1) + } + { + // round 1 + m := t[0] * 2184305180030271487 + c2 := madd0(m, 1860204336533995521, t[0]) + c2, t[0] = madd2(m, 14466829657984787300, c2, t[1]) + c2, t[1] = madd2(m, 2737202078770428568, c2, t[2]) + t[3], t[2] = madd2(m, 1832378743606059307, t[3], c2) + } + { + // round 2 + m := t[0] * 2184305180030271487 + c2 := madd0(m, 1860204336533995521, t[0]) + c2, t[0] = madd2(m, 14466829657984787300, c2, t[1]) + c2, t[1] = madd2(m, 2737202078770428568, c2, t[2]) + t[3], t[2] = madd2(m, 1832378743606059307, t[3], c2) + } + { + // round 3 + m := t[0] * 2184305180030271487 + c2 := madd0(m, 1860204336533995521, t[0]) + c2, z[0] = madd2(m, 14466829657984787300, c2, t[1]) + c2, z[1] = madd2(m, 2737202078770428568, c2, t[2]) + z[3], z[2] = madd2(m, 1832378743606059307, t[3], c2) + } + + // if z >= q → z -= q + // note: this is NOT constant time + if !(z[3] < 1832378743606059307 || (z[3] == 1832378743606059307 && (z[2] < 2737202078770428568 || (z[2] == 2737202078770428568 && (z[1] < 14466829657984787300 || (z[1] == 14466829657984787300 && (z[0] < 1860204336533995521))))))) { + var b uint64 + z[0], b = bits.Sub64(z[0], 1860204336533995521, 0) + z[1], b = bits.Sub64(z[1], 14466829657984787300, b) + z[2], b = bits.Sub64(z[2], 2737202078770428568, b) + z[3], _ = bits.Sub64(z[3], 1832378743606059307, b) + } +} + func (z *Element) neg(x *Element, xHi uint64) uint64 { var b uint64 diff --git a/ecc/bls24-315/fr/element_ops_noasm.go b/ecc/bls24-315/fr/element_ops_noasm.go index ec1fac18d6..7891ac9cd3 100644 --- a/ecc/bls24-315/fr/element_ops_noasm.go +++ b/ecc/bls24-315/fr/element_ops_noasm.go @@ -46,7 +46,6 @@ func MulBy13(x *Element) { func Butterfly(a, b *Element) { _butterflyGeneric(a, b) } - func mul(z, x, y *Element) { _mulGeneric(z, x, y) } diff --git a/ecc/bls24-315/fr/element_test.go b/ecc/bls24-315/fr/element_test.go index f6e4fbfedf..a858259e3b 100644 --- a/ecc/bls24-315/fr/element_test.go +++ b/ecc/bls24-315/fr/element_test.go @@ -22,10 +22,12 @@ import ( "fmt" "math/big" "math/bits" + + "github.com/consensys/gnark-crypto/field" mrand "math/rand" + "testing" - "github.com/consensys/gnark-crypto/field" "github.com/leanovate/gopter" ggen "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" @@ -320,7 +322,6 @@ func init() { a[3]-- staticTestValues = append(staticTestValues, a) } - { a := qElement a[3]-- @@ -334,6 +335,12 @@ func init() { staticTestValues = append(staticTestValues, a) } + { + a := qElement + a[3] = 0 + staticTestValues = append(staticTestValues, a) + } + } func TestElementReduce(t *testing.T) { @@ -471,7 +478,6 @@ func TestElementInverseExp(t *testing.T) { properties = gopter.NewProperties(parameters) properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly if supportAdx { t.Log("disabling ADX") @@ -1418,8 +1424,8 @@ func TestElementSquare(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1497,8 +1503,8 @@ func TestElementInverse(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1576,8 +1582,8 @@ func TestElementSqrt(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1672,8 +1678,8 @@ func TestElementDouble(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1768,8 +1774,8 @@ func TestElementNeg(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -2104,6 +2110,134 @@ func TestElementSetInterface(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + func TestElementFromMont(t *testing.T) { t.Parallel() @@ -2286,8 +2420,6 @@ func genFull() gopter.Gen { } } -// Some utils - func (z *Element) matchVeryBigInt(aHi uint64, aInt *big.Int) error { var modulus big.Int var aIntMod big.Int diff --git a/ecc/bls24-315/fr/mimc/mimc.go b/ecc/bls24-315/fr/mimc/mimc.go index 63e8f5e1f8..a8fa2126f4 100644 --- a/ecc/bls24-315/fr/mimc/mimc.go +++ b/ecc/bls24-315/fr/mimc/mimc.go @@ -64,7 +64,7 @@ func NewMiMC() hash.Hash { // Reset resets the Hash to its initial state. func (d *digest) Reset() { d.data = nil - d.h = fr.Element{0, 0, 0, 0} + d.h.SetZero() } // Sum appends the current hash to b and returns the resulting slice. diff --git a/ecc/bn254/fp/element.go b/ecc/bn254/fp/element.go index 81544f66e3..93737f2785 100644 --- a/ecc/bn254/fp/element.go +++ b/ecc/bn254/fp/element.go @@ -75,9 +75,6 @@ var qElement = Element{ qElementWord3, } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = 9786893198990664585 - // rSquare var rSquare = Element{ 17522657719365597833, @@ -93,7 +90,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("21888242871839275222246405745257275088696311157297823662689037894645226208583", 10) + // base10: 21888242871839275222246405745257275088696311157297823662689037894645226208583 + _modulus.SetString("30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", 16) } // NewElement returns a new Element from a uint64 value @@ -327,7 +325,7 @@ func (z *Element) SetRandom() (*Element, error) { z[3] = binary.BigEndian.Uint64(bytes[24:32]) z[3] %= 3486998266802970665 - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 10917124144477883021 || (z[1] == 10917124144477883021 && (z[0] < 4332616871279656263))))))) { var b uint64 @@ -349,19 +347,17 @@ func One() Element { // Halve sets z to z / 2 (mod p) func (z *Element) Halve() { - if z[0]&1 == 1 { + var carry uint64 + if z[0]&1 == 1 { // z = z + q - var carry uint64 z[0], carry = bits.Add64(z[0], 4332616871279656263, 0) z[1], carry = bits.Add64(z[1], 10917124144477883021, carry) z[2], carry = bits.Add64(z[2], 13281191951274694749, carry) z[3], _ = bits.Add64(z[3], 3486998266802970665, carry) } - // z = z >> 1 - z[0] = z[0]>>1 | z[1]<<63 z[1] = z[1]>>1 | z[2]<<63 z[2] = z[2]>>1 | z[3]<<63 @@ -369,8 +365,6 @@ func (z *Element) Halve() { } -// API with assembly impl - // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *Element) Mul(x, y *Element) *Element { @@ -486,7 +480,7 @@ func _mulGeneric(z, x, y *Element) { z[3], z[2] = madd3(m, 3486998266802970665, c[0], c[2], c[1]) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 10917124144477883021 || (z[1] == 10917124144477883021 && (z[0] < 4332616871279656263))))))) { var b uint64 @@ -495,57 +489,7 @@ func _mulGeneric(z, x, y *Element) { z[2], b = bits.Sub64(z[2], 13281191951274694749, b) z[3], _ = bits.Sub64(z[3], 3486998266802970665, b) } -} - -func _mulWGeneric(z, x *Element, y uint64) { - - var t [4]uint64 - { - // round 0 - c1, c0 := bits.Mul64(y, x[0]) - m := c0 * 9786893198990664585 - c2 := madd0(m, 4332616871279656263, c0) - c1, c0 = madd1(y, x[1], c1) - c2, t[0] = madd2(m, 10917124144477883021, c2, c0) - c1, c0 = madd1(y, x[2], c1) - c2, t[1] = madd2(m, 13281191951274694749, c2, c0) - c1, c0 = madd1(y, x[3], c1) - t[3], t[2] = madd3(m, 3486998266802970665, c0, c2, c1) - } - { - // round 1 - m := t[0] * 9786893198990664585 - c2 := madd0(m, 4332616871279656263, t[0]) - c2, t[0] = madd2(m, 10917124144477883021, c2, t[1]) - c2, t[1] = madd2(m, 13281191951274694749, c2, t[2]) - t[3], t[2] = madd2(m, 3486998266802970665, t[3], c2) - } - { - // round 2 - m := t[0] * 9786893198990664585 - c2 := madd0(m, 4332616871279656263, t[0]) - c2, t[0] = madd2(m, 10917124144477883021, c2, t[1]) - c2, t[1] = madd2(m, 13281191951274694749, c2, t[2]) - t[3], t[2] = madd2(m, 3486998266802970665, t[3], c2) - } - { - // round 3 - m := t[0] * 9786893198990664585 - c2 := madd0(m, 4332616871279656263, t[0]) - c2, z[0] = madd2(m, 10917124144477883021, c2, t[1]) - c2, z[1] = madd2(m, 13281191951274694749, c2, t[2]) - z[3], z[2] = madd2(m, 3486998266802970665, t[3], c2) - } - // if z > q → z -= q - // note: this is NOT constant time - if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 10917124144477883021 || (z[1] == 10917124144477883021 && (z[0] < 4332616871279656263))))))) { - var b uint64 - z[0], b = bits.Sub64(z[0], 4332616871279656263, 0) - z[1], b = bits.Sub64(z[1], 10917124144477883021, b) - z[2], b = bits.Sub64(z[2], 13281191951274694749, b) - z[3], _ = bits.Sub64(z[3], 3486998266802970665, b) - } } func _fromMontGeneric(z *Element) { @@ -588,7 +532,7 @@ func _fromMontGeneric(z *Element) { z[3] = C } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 10917124144477883021 || (z[1] == 10917124144477883021 && (z[0] < 4332616871279656263))))))) { var b uint64 @@ -607,7 +551,7 @@ func _addGeneric(z, x, y *Element) { z[2], carry = bits.Add64(x[2], y[2], carry) z[3], _ = bits.Add64(x[3], y[3], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 10917124144477883021 || (z[1] == 10917124144477883021 && (z[0] < 4332616871279656263))))))) { var b uint64 @@ -626,7 +570,7 @@ func _doubleGeneric(z, x *Element) { z[2], carry = bits.Add64(x[2], x[2], carry) z[3], _ = bits.Add64(x[3], x[3], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 10917124144477883021 || (z[1] == 10917124144477883021 && (z[0] < 4332616871279656263))))))) { var b uint64 @@ -666,7 +610,7 @@ func _negGeneric(z, x *Element) { func _reduceGeneric(z *Element) { - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 10917124144477883021 || (z[1] == 10917124144477883021 && (z[0] < 4332616871279656263))))))) { var b uint64 @@ -757,18 +701,30 @@ func (z *Element) BitLen() int { return bits.Len64(z[0]) } -// Exp z = x^exponent mod q -func (z *Element) Exp(x Element, exponent *big.Int) *Element { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } @@ -1230,14 +1186,10 @@ func (z *Element) Inverse(x *Element) *Element { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *Element) inverseExp(x *Element) *Element { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) return z.Exp(*x, qMinusTwo) } @@ -1279,6 +1231,8 @@ func (z *Element) linearComb(x *Element, xC int64, y *Element, yC int64) { // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *Element) montReduceSigned(x *Element, xHi uint64) { + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 9786893198990664585 const signBitRemover = ^signBitSelector neg := xHi&signBitSelector != 0 @@ -1338,7 +1292,7 @@ func (z *Element) montReduceSigned(x *Element, xHi uint64) { z[3], z[2] = madd2(m, qElementWord3, t[i+3], C) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 10917124144477883021 || (z[1] == 10917124144477883021 && (z[0] < 4332616871279656263))))))) { var b uint64 @@ -1388,7 +1342,7 @@ func (z *Element) montReduceSignedSimpleButSlow(x *Element, xHi uint64) { z[2], c = bits.Add64(z[2], 0, c) z[3], _ = bits.Add64(z[3], 0, c) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 10917124144477883021 || (z[1] == 10917124144477883021 && (z[0] < 4332616871279656263))))))) { var b uint64 @@ -1433,6 +1387,57 @@ func (z *Element) mulWSigned(x *Element, y int64) { } } +func _mulWGeneric(z, x *Element, y uint64) { + + var t [4]uint64 + { + // round 0 + c1, c0 := bits.Mul64(y, x[0]) + m := c0 * 9786893198990664585 + c2 := madd0(m, 4332616871279656263, c0) + c1, c0 = madd1(y, x[1], c1) + c2, t[0] = madd2(m, 10917124144477883021, c2, c0) + c1, c0 = madd1(y, x[2], c1) + c2, t[1] = madd2(m, 13281191951274694749, c2, c0) + c1, c0 = madd1(y, x[3], c1) + t[3], t[2] = madd3(m, 3486998266802970665, c0, c2, c1) + } + { + // round 1 + m := t[0] * 9786893198990664585 + c2 := madd0(m, 4332616871279656263, t[0]) + c2, t[0] = madd2(m, 10917124144477883021, c2, t[1]) + c2, t[1] = madd2(m, 13281191951274694749, c2, t[2]) + t[3], t[2] = madd2(m, 3486998266802970665, t[3], c2) + } + { + // round 2 + m := t[0] * 9786893198990664585 + c2 := madd0(m, 4332616871279656263, t[0]) + c2, t[0] = madd2(m, 10917124144477883021, c2, t[1]) + c2, t[1] = madd2(m, 13281191951274694749, c2, t[2]) + t[3], t[2] = madd2(m, 3486998266802970665, t[3], c2) + } + { + // round 3 + m := t[0] * 9786893198990664585 + c2 := madd0(m, 4332616871279656263, t[0]) + c2, z[0] = madd2(m, 10917124144477883021, c2, t[1]) + c2, z[1] = madd2(m, 13281191951274694749, c2, t[2]) + z[3], z[2] = madd2(m, 3486998266802970665, t[3], c2) + } + + // if z >= q → z -= q + // note: this is NOT constant time + if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 10917124144477883021 || (z[1] == 10917124144477883021 && (z[0] < 4332616871279656263))))))) { + var b uint64 + z[0], b = bits.Sub64(z[0], 4332616871279656263, 0) + z[1], b = bits.Sub64(z[1], 10917124144477883021, b) + z[2], b = bits.Sub64(z[2], 13281191951274694749, b) + z[3], _ = bits.Sub64(z[3], 3486998266802970665, b) + } +} + func (z *Element) neg(x *Element, xHi uint64) uint64 { var b uint64 diff --git a/ecc/bn254/fp/element_ops_noasm.go b/ecc/bn254/fp/element_ops_noasm.go index fec6289183..7d02396a3f 100644 --- a/ecc/bn254/fp/element_ops_noasm.go +++ b/ecc/bn254/fp/element_ops_noasm.go @@ -46,7 +46,6 @@ func MulBy13(x *Element) { func Butterfly(a, b *Element) { _butterflyGeneric(a, b) } - func mul(z, x, y *Element) { _mulGeneric(z, x, y) } diff --git a/ecc/bn254/fp/element_test.go b/ecc/bn254/fp/element_test.go index aef9afcc10..5a429fe538 100644 --- a/ecc/bn254/fp/element_test.go +++ b/ecc/bn254/fp/element_test.go @@ -22,10 +22,12 @@ import ( "fmt" "math/big" "math/bits" + + "github.com/consensys/gnark-crypto/field" mrand "math/rand" + "testing" - "github.com/consensys/gnark-crypto/field" "github.com/leanovate/gopter" ggen "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" @@ -320,7 +322,6 @@ func init() { a[3]-- staticTestValues = append(staticTestValues, a) } - { a := qElement a[3]-- @@ -334,6 +335,12 @@ func init() { staticTestValues = append(staticTestValues, a) } + { + a := qElement + a[3] = 0 + staticTestValues = append(staticTestValues, a) + } + } func TestElementReduce(t *testing.T) { @@ -471,7 +478,6 @@ func TestElementInverseExp(t *testing.T) { properties = gopter.NewProperties(parameters) properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly if supportAdx { t.Log("disabling ADX") @@ -1418,8 +1424,8 @@ func TestElementSquare(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1497,8 +1503,8 @@ func TestElementInverse(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1576,8 +1582,8 @@ func TestElementSqrt(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1672,8 +1678,8 @@ func TestElementDouble(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1768,8 +1774,8 @@ func TestElementNeg(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -2104,6 +2110,134 @@ func TestElementSetInterface(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + func TestElementFromMont(t *testing.T) { t.Parallel() @@ -2286,8 +2420,6 @@ func genFull() gopter.Gen { } } -// Some utils - func (z *Element) matchVeryBigInt(aHi uint64, aInt *big.Int) error { var modulus big.Int var aIntMod big.Int diff --git a/ecc/bn254/fr/element.go b/ecc/bn254/fr/element.go index 63b3810cbf..d239996cc5 100644 --- a/ecc/bn254/fr/element.go +++ b/ecc/bn254/fr/element.go @@ -75,9 +75,6 @@ var qElement = Element{ qElementWord3, } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = 14042775128853446655 - // rSquare var rSquare = Element{ 1997599621687373223, @@ -93,7 +90,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("21888242871839275222246405745257275088548364400416034343698204186575808495617", 10) + // base10: 21888242871839275222246405745257275088548364400416034343698204186575808495617 + _modulus.SetString("30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", 16) } // NewElement returns a new Element from a uint64 value @@ -327,7 +325,7 @@ func (z *Element) SetRandom() (*Element, error) { z[3] = binary.BigEndian.Uint64(bytes[24:32]) z[3] %= 3486998266802970665 - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 2896914383306846353 || (z[1] == 2896914383306846353 && (z[0] < 4891460686036598785))))))) { var b uint64 @@ -349,19 +347,17 @@ func One() Element { // Halve sets z to z / 2 (mod p) func (z *Element) Halve() { - if z[0]&1 == 1 { + var carry uint64 + if z[0]&1 == 1 { // z = z + q - var carry uint64 z[0], carry = bits.Add64(z[0], 4891460686036598785, 0) z[1], carry = bits.Add64(z[1], 2896914383306846353, carry) z[2], carry = bits.Add64(z[2], 13281191951274694749, carry) z[3], _ = bits.Add64(z[3], 3486998266802970665, carry) } - // z = z >> 1 - z[0] = z[0]>>1 | z[1]<<63 z[1] = z[1]>>1 | z[2]<<63 z[2] = z[2]>>1 | z[3]<<63 @@ -369,8 +365,6 @@ func (z *Element) Halve() { } -// API with assembly impl - // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *Element) Mul(x, y *Element) *Element { @@ -486,7 +480,7 @@ func _mulGeneric(z, x, y *Element) { z[3], z[2] = madd3(m, 3486998266802970665, c[0], c[2], c[1]) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 2896914383306846353 || (z[1] == 2896914383306846353 && (z[0] < 4891460686036598785))))))) { var b uint64 @@ -495,57 +489,7 @@ func _mulGeneric(z, x, y *Element) { z[2], b = bits.Sub64(z[2], 13281191951274694749, b) z[3], _ = bits.Sub64(z[3], 3486998266802970665, b) } -} - -func _mulWGeneric(z, x *Element, y uint64) { - - var t [4]uint64 - { - // round 0 - c1, c0 := bits.Mul64(y, x[0]) - m := c0 * 14042775128853446655 - c2 := madd0(m, 4891460686036598785, c0) - c1, c0 = madd1(y, x[1], c1) - c2, t[0] = madd2(m, 2896914383306846353, c2, c0) - c1, c0 = madd1(y, x[2], c1) - c2, t[1] = madd2(m, 13281191951274694749, c2, c0) - c1, c0 = madd1(y, x[3], c1) - t[3], t[2] = madd3(m, 3486998266802970665, c0, c2, c1) - } - { - // round 1 - m := t[0] * 14042775128853446655 - c2 := madd0(m, 4891460686036598785, t[0]) - c2, t[0] = madd2(m, 2896914383306846353, c2, t[1]) - c2, t[1] = madd2(m, 13281191951274694749, c2, t[2]) - t[3], t[2] = madd2(m, 3486998266802970665, t[3], c2) - } - { - // round 2 - m := t[0] * 14042775128853446655 - c2 := madd0(m, 4891460686036598785, t[0]) - c2, t[0] = madd2(m, 2896914383306846353, c2, t[1]) - c2, t[1] = madd2(m, 13281191951274694749, c2, t[2]) - t[3], t[2] = madd2(m, 3486998266802970665, t[3], c2) - } - { - // round 3 - m := t[0] * 14042775128853446655 - c2 := madd0(m, 4891460686036598785, t[0]) - c2, z[0] = madd2(m, 2896914383306846353, c2, t[1]) - c2, z[1] = madd2(m, 13281191951274694749, c2, t[2]) - z[3], z[2] = madd2(m, 3486998266802970665, t[3], c2) - } - // if z > q → z -= q - // note: this is NOT constant time - if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 2896914383306846353 || (z[1] == 2896914383306846353 && (z[0] < 4891460686036598785))))))) { - var b uint64 - z[0], b = bits.Sub64(z[0], 4891460686036598785, 0) - z[1], b = bits.Sub64(z[1], 2896914383306846353, b) - z[2], b = bits.Sub64(z[2], 13281191951274694749, b) - z[3], _ = bits.Sub64(z[3], 3486998266802970665, b) - } } func _fromMontGeneric(z *Element) { @@ -588,7 +532,7 @@ func _fromMontGeneric(z *Element) { z[3] = C } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 2896914383306846353 || (z[1] == 2896914383306846353 && (z[0] < 4891460686036598785))))))) { var b uint64 @@ -607,7 +551,7 @@ func _addGeneric(z, x, y *Element) { z[2], carry = bits.Add64(x[2], y[2], carry) z[3], _ = bits.Add64(x[3], y[3], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 2896914383306846353 || (z[1] == 2896914383306846353 && (z[0] < 4891460686036598785))))))) { var b uint64 @@ -626,7 +570,7 @@ func _doubleGeneric(z, x *Element) { z[2], carry = bits.Add64(x[2], x[2], carry) z[3], _ = bits.Add64(x[3], x[3], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 2896914383306846353 || (z[1] == 2896914383306846353 && (z[0] < 4891460686036598785))))))) { var b uint64 @@ -666,7 +610,7 @@ func _negGeneric(z, x *Element) { func _reduceGeneric(z *Element) { - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 2896914383306846353 || (z[1] == 2896914383306846353 && (z[0] < 4891460686036598785))))))) { var b uint64 @@ -757,18 +701,30 @@ func (z *Element) BitLen() int { return bits.Len64(z[0]) } -// Exp z = x^exponent mod q -func (z *Element) Exp(x Element, exponent *big.Int) *Element { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } @@ -1282,14 +1238,10 @@ func (z *Element) Inverse(x *Element) *Element { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *Element) inverseExp(x *Element) *Element { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) return z.Exp(*x, qMinusTwo) } @@ -1331,6 +1283,8 @@ func (z *Element) linearComb(x *Element, xC int64, y *Element, yC int64) { // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *Element) montReduceSigned(x *Element, xHi uint64) { + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 14042775128853446655 const signBitRemover = ^signBitSelector neg := xHi&signBitSelector != 0 @@ -1390,7 +1344,7 @@ func (z *Element) montReduceSigned(x *Element, xHi uint64) { z[3], z[2] = madd2(m, qElementWord3, t[i+3], C) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 2896914383306846353 || (z[1] == 2896914383306846353 && (z[0] < 4891460686036598785))))))) { var b uint64 @@ -1440,7 +1394,7 @@ func (z *Element) montReduceSignedSimpleButSlow(x *Element, xHi uint64) { z[2], c = bits.Add64(z[2], 0, c) z[3], _ = bits.Add64(z[3], 0, c) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 2896914383306846353 || (z[1] == 2896914383306846353 && (z[0] < 4891460686036598785))))))) { var b uint64 @@ -1485,6 +1439,57 @@ func (z *Element) mulWSigned(x *Element, y int64) { } } +func _mulWGeneric(z, x *Element, y uint64) { + + var t [4]uint64 + { + // round 0 + c1, c0 := bits.Mul64(y, x[0]) + m := c0 * 14042775128853446655 + c2 := madd0(m, 4891460686036598785, c0) + c1, c0 = madd1(y, x[1], c1) + c2, t[0] = madd2(m, 2896914383306846353, c2, c0) + c1, c0 = madd1(y, x[2], c1) + c2, t[1] = madd2(m, 13281191951274694749, c2, c0) + c1, c0 = madd1(y, x[3], c1) + t[3], t[2] = madd3(m, 3486998266802970665, c0, c2, c1) + } + { + // round 1 + m := t[0] * 14042775128853446655 + c2 := madd0(m, 4891460686036598785, t[0]) + c2, t[0] = madd2(m, 2896914383306846353, c2, t[1]) + c2, t[1] = madd2(m, 13281191951274694749, c2, t[2]) + t[3], t[2] = madd2(m, 3486998266802970665, t[3], c2) + } + { + // round 2 + m := t[0] * 14042775128853446655 + c2 := madd0(m, 4891460686036598785, t[0]) + c2, t[0] = madd2(m, 2896914383306846353, c2, t[1]) + c2, t[1] = madd2(m, 13281191951274694749, c2, t[2]) + t[3], t[2] = madd2(m, 3486998266802970665, t[3], c2) + } + { + // round 3 + m := t[0] * 14042775128853446655 + c2 := madd0(m, 4891460686036598785, t[0]) + c2, z[0] = madd2(m, 2896914383306846353, c2, t[1]) + c2, z[1] = madd2(m, 13281191951274694749, c2, t[2]) + z[3], z[2] = madd2(m, 3486998266802970665, t[3], c2) + } + + // if z >= q → z -= q + // note: this is NOT constant time + if !(z[3] < 3486998266802970665 || (z[3] == 3486998266802970665 && (z[2] < 13281191951274694749 || (z[2] == 13281191951274694749 && (z[1] < 2896914383306846353 || (z[1] == 2896914383306846353 && (z[0] < 4891460686036598785))))))) { + var b uint64 + z[0], b = bits.Sub64(z[0], 4891460686036598785, 0) + z[1], b = bits.Sub64(z[1], 2896914383306846353, b) + z[2], b = bits.Sub64(z[2], 13281191951274694749, b) + z[3], _ = bits.Sub64(z[3], 3486998266802970665, b) + } +} + func (z *Element) neg(x *Element, xHi uint64) uint64 { var b uint64 diff --git a/ecc/bn254/fr/element_ops_noasm.go b/ecc/bn254/fr/element_ops_noasm.go index ec1fac18d6..7891ac9cd3 100644 --- a/ecc/bn254/fr/element_ops_noasm.go +++ b/ecc/bn254/fr/element_ops_noasm.go @@ -46,7 +46,6 @@ func MulBy13(x *Element) { func Butterfly(a, b *Element) { _butterflyGeneric(a, b) } - func mul(z, x, y *Element) { _mulGeneric(z, x, y) } diff --git a/ecc/bn254/fr/element_test.go b/ecc/bn254/fr/element_test.go index e648e922b3..aa4e475d03 100644 --- a/ecc/bn254/fr/element_test.go +++ b/ecc/bn254/fr/element_test.go @@ -22,10 +22,12 @@ import ( "fmt" "math/big" "math/bits" + + "github.com/consensys/gnark-crypto/field" mrand "math/rand" + "testing" - "github.com/consensys/gnark-crypto/field" "github.com/leanovate/gopter" ggen "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" @@ -320,7 +322,6 @@ func init() { a[3]-- staticTestValues = append(staticTestValues, a) } - { a := qElement a[3]-- @@ -334,6 +335,12 @@ func init() { staticTestValues = append(staticTestValues, a) } + { + a := qElement + a[3] = 0 + staticTestValues = append(staticTestValues, a) + } + } func TestElementReduce(t *testing.T) { @@ -471,7 +478,6 @@ func TestElementInverseExp(t *testing.T) { properties = gopter.NewProperties(parameters) properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly if supportAdx { t.Log("disabling ADX") @@ -1418,8 +1424,8 @@ func TestElementSquare(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1497,8 +1503,8 @@ func TestElementInverse(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1576,8 +1582,8 @@ func TestElementSqrt(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1672,8 +1678,8 @@ func TestElementDouble(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1768,8 +1774,8 @@ func TestElementNeg(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -2104,6 +2110,134 @@ func TestElementSetInterface(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + func TestElementFromMont(t *testing.T) { t.Parallel() @@ -2286,8 +2420,6 @@ func genFull() gopter.Gen { } } -// Some utils - func (z *Element) matchVeryBigInt(aHi uint64, aInt *big.Int) error { var modulus big.Int var aIntMod big.Int diff --git a/ecc/bn254/fr/mimc/mimc.go b/ecc/bn254/fr/mimc/mimc.go index 43cc82ec43..7d40164b6b 100644 --- a/ecc/bn254/fr/mimc/mimc.go +++ b/ecc/bn254/fr/mimc/mimc.go @@ -64,7 +64,7 @@ func NewMiMC() hash.Hash { // Reset resets the Hash to its initial state. func (d *digest) Reset() { d.data = nil - d.h = fr.Element{0, 0, 0, 0} + d.h.SetZero() } // Sum appends the current hash to b and returns the resulting slice. diff --git a/ecc/bw6-633/fp/element.go b/ecc/bw6-633/fp/element.go index 4bfe262ab7..e6f8ecd572 100644 --- a/ecc/bw6-633/fp/element.go +++ b/ecc/bw6-633/fp/element.go @@ -87,9 +87,6 @@ var qElement = Element{ qElementWord9, } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = 13046692460116554043 - // rSquare var rSquare = Element{ 7358459907925294924, @@ -111,7 +108,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("20494478644167774678813387386538961497669590920908778075528754551012016751717791778743535050360001387419576570244406805463255765034468441182772056330021723098661967429339971741066259394985997", 10) + // base10: 20494478644167774678813387386538961497669590920908778075528754551012016751717791778743535050360001387419576570244406805463255765034468441182772056330021723098661967429339971741066259394985997 + _modulus.SetString("126633cc0f35f63fc1a174f01d72ab5a8fcd8c75d79d2c74e59769ad9bbda2f8152a6c0fadea490b8da9f5e83f57c497e0e8850edbda407d7b5ce7ab839c2253d369bd31147f73cd74916ea4570000d", 16) } // NewElement returns a new Element from a uint64 value @@ -405,7 +403,7 @@ func (z *Element) SetRandom() (*Element, error) { z[9] = binary.BigEndian.Uint64(bytes[72:80]) z[9] %= 82862755739295587 - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[9] < 82862755739295587 || (z[9] == 82862755739295587 && (z[8] < 18165857675053050549 || (z[8] == 18165857675053050549 && (z[7] < 12176845843281334983 || (z[7] == 12176845843281334983 && (z[6] < 5645674015335635503 || (z[6] == 5645674015335635503 && (z[5] < 9318693926755804304 || (z[5] == 9318693926755804304 && (z[4] < 13320134076191308873 || (z[4] == 13320134076191308873 && (z[3] < 9083347379620258823 || (z[3] == 9083347379620258823 && (z[2] < 15543556715411259941 || (z[2] == 15543556715411259941 && (z[1] < 4410884215886313276 || (z[1] == 4410884215886313276 && (z[0] < 15512955586897510413))))))))))))))))))) { var b uint64 @@ -433,10 +431,10 @@ func One() Element { // Halve sets z to z / 2 (mod p) func (z *Element) Halve() { - if z[0]&1 == 1 { + var carry uint64 + if z[0]&1 == 1 { // z = z + q - var carry uint64 z[0], carry = bits.Add64(z[0], 15512955586897510413, 0) z[1], carry = bits.Add64(z[1], 4410884215886313276, carry) z[2], carry = bits.Add64(z[2], 15543556715411259941, carry) @@ -449,9 +447,7 @@ func (z *Element) Halve() { z[9], _ = bits.Add64(z[9], 82862755739295587, carry) } - // z = z >> 1 - z[0] = z[0]>>1 | z[1]<<63 z[1] = z[1]>>1 | z[2]<<63 z[2] = z[2]>>1 | z[3]<<63 @@ -465,8 +461,6 @@ func (z *Element) Halve() { } -// API with assembly impl - // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *Element) Mul(x, y *Element) *Element { @@ -786,7 +780,7 @@ func _mulGeneric(z, x, y *Element) { z[9], z[8] = madd3(m, 82862755739295587, c[0], c[2], c[1]) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[9] < 82862755739295587 || (z[9] == 82862755739295587 && (z[8] < 18165857675053050549 || (z[8] == 18165857675053050549 && (z[7] < 12176845843281334983 || (z[7] == 12176845843281334983 && (z[6] < 5645674015335635503 || (z[6] == 5645674015335635503 && (z[5] < 9318693926755804304 || (z[5] == 9318693926755804304 && (z[4] < 13320134076191308873 || (z[4] == 13320134076191308873 && (z[3] < 9083347379620258823 || (z[3] == 9083347379620258823 && (z[2] < 15543556715411259941 || (z[2] == 15543556715411259941 && (z[1] < 4410884215886313276 || (z[1] == 4410884215886313276 && (z[0] < 15512955586897510413))))))))))))))))))) { var b uint64 @@ -801,177 +795,7 @@ func _mulGeneric(z, x, y *Element) { z[8], b = bits.Sub64(z[8], 18165857675053050549, b) z[9], _ = bits.Sub64(z[9], 82862755739295587, b) } -} - -func _mulWGeneric(z, x *Element, y uint64) { - - var t [10]uint64 - { - // round 0 - c1, c0 := bits.Mul64(y, x[0]) - m := c0 * 13046692460116554043 - c2 := madd0(m, 15512955586897510413, c0) - c1, c0 = madd1(y, x[1], c1) - c2, t[0] = madd2(m, 4410884215886313276, c2, c0) - c1, c0 = madd1(y, x[2], c1) - c2, t[1] = madd2(m, 15543556715411259941, c2, c0) - c1, c0 = madd1(y, x[3], c1) - c2, t[2] = madd2(m, 9083347379620258823, c2, c0) - c1, c0 = madd1(y, x[4], c1) - c2, t[3] = madd2(m, 13320134076191308873, c2, c0) - c1, c0 = madd1(y, x[5], c1) - c2, t[4] = madd2(m, 9318693926755804304, c2, c0) - c1, c0 = madd1(y, x[6], c1) - c2, t[5] = madd2(m, 5645674015335635503, c2, c0) - c1, c0 = madd1(y, x[7], c1) - c2, t[6] = madd2(m, 12176845843281334983, c2, c0) - c1, c0 = madd1(y, x[8], c1) - c2, t[7] = madd2(m, 18165857675053050549, c2, c0) - c1, c0 = madd1(y, x[9], c1) - t[9], t[8] = madd3(m, 82862755739295587, c0, c2, c1) - } - { - // round 1 - m := t[0] * 13046692460116554043 - c2 := madd0(m, 15512955586897510413, t[0]) - c2, t[0] = madd2(m, 4410884215886313276, c2, t[1]) - c2, t[1] = madd2(m, 15543556715411259941, c2, t[2]) - c2, t[2] = madd2(m, 9083347379620258823, c2, t[3]) - c2, t[3] = madd2(m, 13320134076191308873, c2, t[4]) - c2, t[4] = madd2(m, 9318693926755804304, c2, t[5]) - c2, t[5] = madd2(m, 5645674015335635503, c2, t[6]) - c2, t[6] = madd2(m, 12176845843281334983, c2, t[7]) - c2, t[7] = madd2(m, 18165857675053050549, c2, t[8]) - t[9], t[8] = madd2(m, 82862755739295587, t[9], c2) - } - { - // round 2 - m := t[0] * 13046692460116554043 - c2 := madd0(m, 15512955586897510413, t[0]) - c2, t[0] = madd2(m, 4410884215886313276, c2, t[1]) - c2, t[1] = madd2(m, 15543556715411259941, c2, t[2]) - c2, t[2] = madd2(m, 9083347379620258823, c2, t[3]) - c2, t[3] = madd2(m, 13320134076191308873, c2, t[4]) - c2, t[4] = madd2(m, 9318693926755804304, c2, t[5]) - c2, t[5] = madd2(m, 5645674015335635503, c2, t[6]) - c2, t[6] = madd2(m, 12176845843281334983, c2, t[7]) - c2, t[7] = madd2(m, 18165857675053050549, c2, t[8]) - t[9], t[8] = madd2(m, 82862755739295587, t[9], c2) - } - { - // round 3 - m := t[0] * 13046692460116554043 - c2 := madd0(m, 15512955586897510413, t[0]) - c2, t[0] = madd2(m, 4410884215886313276, c2, t[1]) - c2, t[1] = madd2(m, 15543556715411259941, c2, t[2]) - c2, t[2] = madd2(m, 9083347379620258823, c2, t[3]) - c2, t[3] = madd2(m, 13320134076191308873, c2, t[4]) - c2, t[4] = madd2(m, 9318693926755804304, c2, t[5]) - c2, t[5] = madd2(m, 5645674015335635503, c2, t[6]) - c2, t[6] = madd2(m, 12176845843281334983, c2, t[7]) - c2, t[7] = madd2(m, 18165857675053050549, c2, t[8]) - t[9], t[8] = madd2(m, 82862755739295587, t[9], c2) - } - { - // round 4 - m := t[0] * 13046692460116554043 - c2 := madd0(m, 15512955586897510413, t[0]) - c2, t[0] = madd2(m, 4410884215886313276, c2, t[1]) - c2, t[1] = madd2(m, 15543556715411259941, c2, t[2]) - c2, t[2] = madd2(m, 9083347379620258823, c2, t[3]) - c2, t[3] = madd2(m, 13320134076191308873, c2, t[4]) - c2, t[4] = madd2(m, 9318693926755804304, c2, t[5]) - c2, t[5] = madd2(m, 5645674015335635503, c2, t[6]) - c2, t[6] = madd2(m, 12176845843281334983, c2, t[7]) - c2, t[7] = madd2(m, 18165857675053050549, c2, t[8]) - t[9], t[8] = madd2(m, 82862755739295587, t[9], c2) - } - { - // round 5 - m := t[0] * 13046692460116554043 - c2 := madd0(m, 15512955586897510413, t[0]) - c2, t[0] = madd2(m, 4410884215886313276, c2, t[1]) - c2, t[1] = madd2(m, 15543556715411259941, c2, t[2]) - c2, t[2] = madd2(m, 9083347379620258823, c2, t[3]) - c2, t[3] = madd2(m, 13320134076191308873, c2, t[4]) - c2, t[4] = madd2(m, 9318693926755804304, c2, t[5]) - c2, t[5] = madd2(m, 5645674015335635503, c2, t[6]) - c2, t[6] = madd2(m, 12176845843281334983, c2, t[7]) - c2, t[7] = madd2(m, 18165857675053050549, c2, t[8]) - t[9], t[8] = madd2(m, 82862755739295587, t[9], c2) - } - { - // round 6 - m := t[0] * 13046692460116554043 - c2 := madd0(m, 15512955586897510413, t[0]) - c2, t[0] = madd2(m, 4410884215886313276, c2, t[1]) - c2, t[1] = madd2(m, 15543556715411259941, c2, t[2]) - c2, t[2] = madd2(m, 9083347379620258823, c2, t[3]) - c2, t[3] = madd2(m, 13320134076191308873, c2, t[4]) - c2, t[4] = madd2(m, 9318693926755804304, c2, t[5]) - c2, t[5] = madd2(m, 5645674015335635503, c2, t[6]) - c2, t[6] = madd2(m, 12176845843281334983, c2, t[7]) - c2, t[7] = madd2(m, 18165857675053050549, c2, t[8]) - t[9], t[8] = madd2(m, 82862755739295587, t[9], c2) - } - { - // round 7 - m := t[0] * 13046692460116554043 - c2 := madd0(m, 15512955586897510413, t[0]) - c2, t[0] = madd2(m, 4410884215886313276, c2, t[1]) - c2, t[1] = madd2(m, 15543556715411259941, c2, t[2]) - c2, t[2] = madd2(m, 9083347379620258823, c2, t[3]) - c2, t[3] = madd2(m, 13320134076191308873, c2, t[4]) - c2, t[4] = madd2(m, 9318693926755804304, c2, t[5]) - c2, t[5] = madd2(m, 5645674015335635503, c2, t[6]) - c2, t[6] = madd2(m, 12176845843281334983, c2, t[7]) - c2, t[7] = madd2(m, 18165857675053050549, c2, t[8]) - t[9], t[8] = madd2(m, 82862755739295587, t[9], c2) - } - { - // round 8 - m := t[0] * 13046692460116554043 - c2 := madd0(m, 15512955586897510413, t[0]) - c2, t[0] = madd2(m, 4410884215886313276, c2, t[1]) - c2, t[1] = madd2(m, 15543556715411259941, c2, t[2]) - c2, t[2] = madd2(m, 9083347379620258823, c2, t[3]) - c2, t[3] = madd2(m, 13320134076191308873, c2, t[4]) - c2, t[4] = madd2(m, 9318693926755804304, c2, t[5]) - c2, t[5] = madd2(m, 5645674015335635503, c2, t[6]) - c2, t[6] = madd2(m, 12176845843281334983, c2, t[7]) - c2, t[7] = madd2(m, 18165857675053050549, c2, t[8]) - t[9], t[8] = madd2(m, 82862755739295587, t[9], c2) - } - { - // round 9 - m := t[0] * 13046692460116554043 - c2 := madd0(m, 15512955586897510413, t[0]) - c2, z[0] = madd2(m, 4410884215886313276, c2, t[1]) - c2, z[1] = madd2(m, 15543556715411259941, c2, t[2]) - c2, z[2] = madd2(m, 9083347379620258823, c2, t[3]) - c2, z[3] = madd2(m, 13320134076191308873, c2, t[4]) - c2, z[4] = madd2(m, 9318693926755804304, c2, t[5]) - c2, z[5] = madd2(m, 5645674015335635503, c2, t[6]) - c2, z[6] = madd2(m, 12176845843281334983, c2, t[7]) - c2, z[7] = madd2(m, 18165857675053050549, c2, t[8]) - z[9], z[8] = madd2(m, 82862755739295587, t[9], c2) - } - // if z > q → z -= q - // note: this is NOT constant time - if !(z[9] < 82862755739295587 || (z[9] == 82862755739295587 && (z[8] < 18165857675053050549 || (z[8] == 18165857675053050549 && (z[7] < 12176845843281334983 || (z[7] == 12176845843281334983 && (z[6] < 5645674015335635503 || (z[6] == 5645674015335635503 && (z[5] < 9318693926755804304 || (z[5] == 9318693926755804304 && (z[4] < 13320134076191308873 || (z[4] == 13320134076191308873 && (z[3] < 9083347379620258823 || (z[3] == 9083347379620258823 && (z[2] < 15543556715411259941 || (z[2] == 15543556715411259941 && (z[1] < 4410884215886313276 || (z[1] == 4410884215886313276 && (z[0] < 15512955586897510413))))))))))))))))))) { - var b uint64 - z[0], b = bits.Sub64(z[0], 15512955586897510413, 0) - z[1], b = bits.Sub64(z[1], 4410884215886313276, b) - z[2], b = bits.Sub64(z[2], 15543556715411259941, b) - z[3], b = bits.Sub64(z[3], 9083347379620258823, b) - z[4], b = bits.Sub64(z[4], 13320134076191308873, b) - z[5], b = bits.Sub64(z[5], 9318693926755804304, b) - z[6], b = bits.Sub64(z[6], 5645674015335635503, b) - z[7], b = bits.Sub64(z[7], 12176845843281334983, b) - z[8], b = bits.Sub64(z[8], 18165857675053050549, b) - z[9], _ = bits.Sub64(z[9], 82862755739295587, b) - } } func _fromMontGeneric(z *Element) { @@ -1128,7 +952,7 @@ func _fromMontGeneric(z *Element) { z[9] = C } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[9] < 82862755739295587 || (z[9] == 82862755739295587 && (z[8] < 18165857675053050549 || (z[8] == 18165857675053050549 && (z[7] < 12176845843281334983 || (z[7] == 12176845843281334983 && (z[6] < 5645674015335635503 || (z[6] == 5645674015335635503 && (z[5] < 9318693926755804304 || (z[5] == 9318693926755804304 && (z[4] < 13320134076191308873 || (z[4] == 13320134076191308873 && (z[3] < 9083347379620258823 || (z[3] == 9083347379620258823 && (z[2] < 15543556715411259941 || (z[2] == 15543556715411259941 && (z[1] < 4410884215886313276 || (z[1] == 4410884215886313276 && (z[0] < 15512955586897510413))))))))))))))))))) { var b uint64 @@ -1159,7 +983,7 @@ func _addGeneric(z, x, y *Element) { z[8], carry = bits.Add64(x[8], y[8], carry) z[9], _ = bits.Add64(x[9], y[9], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[9] < 82862755739295587 || (z[9] == 82862755739295587 && (z[8] < 18165857675053050549 || (z[8] == 18165857675053050549 && (z[7] < 12176845843281334983 || (z[7] == 12176845843281334983 && (z[6] < 5645674015335635503 || (z[6] == 5645674015335635503 && (z[5] < 9318693926755804304 || (z[5] == 9318693926755804304 && (z[4] < 13320134076191308873 || (z[4] == 13320134076191308873 && (z[3] < 9083347379620258823 || (z[3] == 9083347379620258823 && (z[2] < 15543556715411259941 || (z[2] == 15543556715411259941 && (z[1] < 4410884215886313276 || (z[1] == 4410884215886313276 && (z[0] < 15512955586897510413))))))))))))))))))) { var b uint64 @@ -1190,7 +1014,7 @@ func _doubleGeneric(z, x *Element) { z[8], carry = bits.Add64(x[8], x[8], carry) z[9], _ = bits.Add64(x[9], x[9], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[9] < 82862755739295587 || (z[9] == 82862755739295587 && (z[8] < 18165857675053050549 || (z[8] == 18165857675053050549 && (z[7] < 12176845843281334983 || (z[7] == 12176845843281334983 && (z[6] < 5645674015335635503 || (z[6] == 5645674015335635503 && (z[5] < 9318693926755804304 || (z[5] == 9318693926755804304 && (z[4] < 13320134076191308873 || (z[4] == 13320134076191308873 && (z[3] < 9083347379620258823 || (z[3] == 9083347379620258823 && (z[2] < 15543556715411259941 || (z[2] == 15543556715411259941 && (z[1] < 4410884215886313276 || (z[1] == 4410884215886313276 && (z[0] < 15512955586897510413))))))))))))))))))) { var b uint64 @@ -1254,7 +1078,7 @@ func _negGeneric(z, x *Element) { func _reduceGeneric(z *Element) { - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[9] < 82862755739295587 || (z[9] == 82862755739295587 && (z[8] < 18165857675053050549 || (z[8] == 18165857675053050549 && (z[7] < 12176845843281334983 || (z[7] == 12176845843281334983 && (z[6] < 5645674015335635503 || (z[6] == 5645674015335635503 && (z[5] < 9318693926755804304 || (z[5] == 9318693926755804304 && (z[4] < 13320134076191308873 || (z[4] == 13320134076191308873 && (z[3] < 9083347379620258823 || (z[3] == 9083347379620258823 && (z[2] < 15543556715411259941 || (z[2] == 15543556715411259941 && (z[1] < 4410884215886313276 || (z[1] == 4410884215886313276 && (z[0] < 15512955586897510413))))))))))))))))))) { var b uint64 @@ -1369,18 +1193,30 @@ func (z *Element) BitLen() int { return bits.Len64(z[0]) } -// Exp z = x^exponent mod q -func (z *Element) Exp(x Element, exponent *big.Int) *Element { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } @@ -1893,14 +1729,10 @@ func (z *Element) Inverse(x *Element) *Element { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *Element) inverseExp(x *Element) *Element { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) return z.Exp(*x, qMinusTwo) } @@ -1942,6 +1774,8 @@ func (z *Element) linearComb(x *Element, xC int64, y *Element, yC int64) { // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *Element) montReduceSigned(x *Element, xHi uint64) { + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 13046692460116554043 const signBitRemover = ^signBitSelector neg := xHi&signBitSelector != 0 @@ -2127,7 +1961,7 @@ func (z *Element) montReduceSigned(x *Element, xHi uint64) { z[9], z[8] = madd2(m, qElementWord9, t[i+9], C) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[9] < 82862755739295587 || (z[9] == 82862755739295587 && (z[8] < 18165857675053050549 || (z[8] == 18165857675053050549 && (z[7] < 12176845843281334983 || (z[7] == 12176845843281334983 && (z[6] < 5645674015335635503 || (z[6] == 5645674015335635503 && (z[5] < 9318693926755804304 || (z[5] == 9318693926755804304 && (z[4] < 13320134076191308873 || (z[4] == 13320134076191308873 && (z[3] < 9083347379620258823 || (z[3] == 9083347379620258823 && (z[2] < 15543556715411259941 || (z[2] == 15543556715411259941 && (z[1] < 4410884215886313276 || (z[1] == 4410884215886313276 && (z[0] < 15512955586897510413))))))))))))))))))) { var b uint64 @@ -2201,7 +2035,7 @@ func (z *Element) montReduceSignedSimpleButSlow(x *Element, xHi uint64) { z[8], c = bits.Add64(z[8], 0, c) z[9], _ = bits.Add64(z[9], 0, c) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[9] < 82862755739295587 || (z[9] == 82862755739295587 && (z[8] < 18165857675053050549 || (z[8] == 18165857675053050549 && (z[7] < 12176845843281334983 || (z[7] == 12176845843281334983 && (z[6] < 5645674015335635503 || (z[6] == 5645674015335635503 && (z[5] < 9318693926755804304 || (z[5] == 9318693926755804304 && (z[4] < 13320134076191308873 || (z[4] == 13320134076191308873 && (z[3] < 9083347379620258823 || (z[3] == 9083347379620258823 && (z[2] < 15543556715411259941 || (z[2] == 15543556715411259941 && (z[1] < 4410884215886313276 || (z[1] == 4410884215886313276 && (z[0] < 15512955586897510413))))))))))))))))))) { var b uint64 @@ -2264,6 +2098,177 @@ func (z *Element) mulWSigned(x *Element, y int64) { } } +func _mulWGeneric(z, x *Element, y uint64) { + + var t [10]uint64 + { + // round 0 + c1, c0 := bits.Mul64(y, x[0]) + m := c0 * 13046692460116554043 + c2 := madd0(m, 15512955586897510413, c0) + c1, c0 = madd1(y, x[1], c1) + c2, t[0] = madd2(m, 4410884215886313276, c2, c0) + c1, c0 = madd1(y, x[2], c1) + c2, t[1] = madd2(m, 15543556715411259941, c2, c0) + c1, c0 = madd1(y, x[3], c1) + c2, t[2] = madd2(m, 9083347379620258823, c2, c0) + c1, c0 = madd1(y, x[4], c1) + c2, t[3] = madd2(m, 13320134076191308873, c2, c0) + c1, c0 = madd1(y, x[5], c1) + c2, t[4] = madd2(m, 9318693926755804304, c2, c0) + c1, c0 = madd1(y, x[6], c1) + c2, t[5] = madd2(m, 5645674015335635503, c2, c0) + c1, c0 = madd1(y, x[7], c1) + c2, t[6] = madd2(m, 12176845843281334983, c2, c0) + c1, c0 = madd1(y, x[8], c1) + c2, t[7] = madd2(m, 18165857675053050549, c2, c0) + c1, c0 = madd1(y, x[9], c1) + t[9], t[8] = madd3(m, 82862755739295587, c0, c2, c1) + } + { + // round 1 + m := t[0] * 13046692460116554043 + c2 := madd0(m, 15512955586897510413, t[0]) + c2, t[0] = madd2(m, 4410884215886313276, c2, t[1]) + c2, t[1] = madd2(m, 15543556715411259941, c2, t[2]) + c2, t[2] = madd2(m, 9083347379620258823, c2, t[3]) + c2, t[3] = madd2(m, 13320134076191308873, c2, t[4]) + c2, t[4] = madd2(m, 9318693926755804304, c2, t[5]) + c2, t[5] = madd2(m, 5645674015335635503, c2, t[6]) + c2, t[6] = madd2(m, 12176845843281334983, c2, t[7]) + c2, t[7] = madd2(m, 18165857675053050549, c2, t[8]) + t[9], t[8] = madd2(m, 82862755739295587, t[9], c2) + } + { + // round 2 + m := t[0] * 13046692460116554043 + c2 := madd0(m, 15512955586897510413, t[0]) + c2, t[0] = madd2(m, 4410884215886313276, c2, t[1]) + c2, t[1] = madd2(m, 15543556715411259941, c2, t[2]) + c2, t[2] = madd2(m, 9083347379620258823, c2, t[3]) + c2, t[3] = madd2(m, 13320134076191308873, c2, t[4]) + c2, t[4] = madd2(m, 9318693926755804304, c2, t[5]) + c2, t[5] = madd2(m, 5645674015335635503, c2, t[6]) + c2, t[6] = madd2(m, 12176845843281334983, c2, t[7]) + c2, t[7] = madd2(m, 18165857675053050549, c2, t[8]) + t[9], t[8] = madd2(m, 82862755739295587, t[9], c2) + } + { + // round 3 + m := t[0] * 13046692460116554043 + c2 := madd0(m, 15512955586897510413, t[0]) + c2, t[0] = madd2(m, 4410884215886313276, c2, t[1]) + c2, t[1] = madd2(m, 15543556715411259941, c2, t[2]) + c2, t[2] = madd2(m, 9083347379620258823, c2, t[3]) + c2, t[3] = madd2(m, 13320134076191308873, c2, t[4]) + c2, t[4] = madd2(m, 9318693926755804304, c2, t[5]) + c2, t[5] = madd2(m, 5645674015335635503, c2, t[6]) + c2, t[6] = madd2(m, 12176845843281334983, c2, t[7]) + c2, t[7] = madd2(m, 18165857675053050549, c2, t[8]) + t[9], t[8] = madd2(m, 82862755739295587, t[9], c2) + } + { + // round 4 + m := t[0] * 13046692460116554043 + c2 := madd0(m, 15512955586897510413, t[0]) + c2, t[0] = madd2(m, 4410884215886313276, c2, t[1]) + c2, t[1] = madd2(m, 15543556715411259941, c2, t[2]) + c2, t[2] = madd2(m, 9083347379620258823, c2, t[3]) + c2, t[3] = madd2(m, 13320134076191308873, c2, t[4]) + c2, t[4] = madd2(m, 9318693926755804304, c2, t[5]) + c2, t[5] = madd2(m, 5645674015335635503, c2, t[6]) + c2, t[6] = madd2(m, 12176845843281334983, c2, t[7]) + c2, t[7] = madd2(m, 18165857675053050549, c2, t[8]) + t[9], t[8] = madd2(m, 82862755739295587, t[9], c2) + } + { + // round 5 + m := t[0] * 13046692460116554043 + c2 := madd0(m, 15512955586897510413, t[0]) + c2, t[0] = madd2(m, 4410884215886313276, c2, t[1]) + c2, t[1] = madd2(m, 15543556715411259941, c2, t[2]) + c2, t[2] = madd2(m, 9083347379620258823, c2, t[3]) + c2, t[3] = madd2(m, 13320134076191308873, c2, t[4]) + c2, t[4] = madd2(m, 9318693926755804304, c2, t[5]) + c2, t[5] = madd2(m, 5645674015335635503, c2, t[6]) + c2, t[6] = madd2(m, 12176845843281334983, c2, t[7]) + c2, t[7] = madd2(m, 18165857675053050549, c2, t[8]) + t[9], t[8] = madd2(m, 82862755739295587, t[9], c2) + } + { + // round 6 + m := t[0] * 13046692460116554043 + c2 := madd0(m, 15512955586897510413, t[0]) + c2, t[0] = madd2(m, 4410884215886313276, c2, t[1]) + c2, t[1] = madd2(m, 15543556715411259941, c2, t[2]) + c2, t[2] = madd2(m, 9083347379620258823, c2, t[3]) + c2, t[3] = madd2(m, 13320134076191308873, c2, t[4]) + c2, t[4] = madd2(m, 9318693926755804304, c2, t[5]) + c2, t[5] = madd2(m, 5645674015335635503, c2, t[6]) + c2, t[6] = madd2(m, 12176845843281334983, c2, t[7]) + c2, t[7] = madd2(m, 18165857675053050549, c2, t[8]) + t[9], t[8] = madd2(m, 82862755739295587, t[9], c2) + } + { + // round 7 + m := t[0] * 13046692460116554043 + c2 := madd0(m, 15512955586897510413, t[0]) + c2, t[0] = madd2(m, 4410884215886313276, c2, t[1]) + c2, t[1] = madd2(m, 15543556715411259941, c2, t[2]) + c2, t[2] = madd2(m, 9083347379620258823, c2, t[3]) + c2, t[3] = madd2(m, 13320134076191308873, c2, t[4]) + c2, t[4] = madd2(m, 9318693926755804304, c2, t[5]) + c2, t[5] = madd2(m, 5645674015335635503, c2, t[6]) + c2, t[6] = madd2(m, 12176845843281334983, c2, t[7]) + c2, t[7] = madd2(m, 18165857675053050549, c2, t[8]) + t[9], t[8] = madd2(m, 82862755739295587, t[9], c2) + } + { + // round 8 + m := t[0] * 13046692460116554043 + c2 := madd0(m, 15512955586897510413, t[0]) + c2, t[0] = madd2(m, 4410884215886313276, c2, t[1]) + c2, t[1] = madd2(m, 15543556715411259941, c2, t[2]) + c2, t[2] = madd2(m, 9083347379620258823, c2, t[3]) + c2, t[3] = madd2(m, 13320134076191308873, c2, t[4]) + c2, t[4] = madd2(m, 9318693926755804304, c2, t[5]) + c2, t[5] = madd2(m, 5645674015335635503, c2, t[6]) + c2, t[6] = madd2(m, 12176845843281334983, c2, t[7]) + c2, t[7] = madd2(m, 18165857675053050549, c2, t[8]) + t[9], t[8] = madd2(m, 82862755739295587, t[9], c2) + } + { + // round 9 + m := t[0] * 13046692460116554043 + c2 := madd0(m, 15512955586897510413, t[0]) + c2, z[0] = madd2(m, 4410884215886313276, c2, t[1]) + c2, z[1] = madd2(m, 15543556715411259941, c2, t[2]) + c2, z[2] = madd2(m, 9083347379620258823, c2, t[3]) + c2, z[3] = madd2(m, 13320134076191308873, c2, t[4]) + c2, z[4] = madd2(m, 9318693926755804304, c2, t[5]) + c2, z[5] = madd2(m, 5645674015335635503, c2, t[6]) + c2, z[6] = madd2(m, 12176845843281334983, c2, t[7]) + c2, z[7] = madd2(m, 18165857675053050549, c2, t[8]) + z[9], z[8] = madd2(m, 82862755739295587, t[9], c2) + } + + // if z >= q → z -= q + // note: this is NOT constant time + if !(z[9] < 82862755739295587 || (z[9] == 82862755739295587 && (z[8] < 18165857675053050549 || (z[8] == 18165857675053050549 && (z[7] < 12176845843281334983 || (z[7] == 12176845843281334983 && (z[6] < 5645674015335635503 || (z[6] == 5645674015335635503 && (z[5] < 9318693926755804304 || (z[5] == 9318693926755804304 && (z[4] < 13320134076191308873 || (z[4] == 13320134076191308873 && (z[3] < 9083347379620258823 || (z[3] == 9083347379620258823 && (z[2] < 15543556715411259941 || (z[2] == 15543556715411259941 && (z[1] < 4410884215886313276 || (z[1] == 4410884215886313276 && (z[0] < 15512955586897510413))))))))))))))))))) { + var b uint64 + z[0], b = bits.Sub64(z[0], 15512955586897510413, 0) + z[1], b = bits.Sub64(z[1], 4410884215886313276, b) + z[2], b = bits.Sub64(z[2], 15543556715411259941, b) + z[3], b = bits.Sub64(z[3], 9083347379620258823, b) + z[4], b = bits.Sub64(z[4], 13320134076191308873, b) + z[5], b = bits.Sub64(z[5], 9318693926755804304, b) + z[6], b = bits.Sub64(z[6], 5645674015335635503, b) + z[7], b = bits.Sub64(z[7], 12176845843281334983, b) + z[8], b = bits.Sub64(z[8], 18165857675053050549, b) + z[9], _ = bits.Sub64(z[9], 82862755739295587, b) + } +} + func (z *Element) neg(x *Element, xHi uint64) uint64 { var b uint64 diff --git a/ecc/bw6-633/fp/element_ops_noasm.go b/ecc/bw6-633/fp/element_ops_noasm.go index fec6289183..7d02396a3f 100644 --- a/ecc/bw6-633/fp/element_ops_noasm.go +++ b/ecc/bw6-633/fp/element_ops_noasm.go @@ -46,7 +46,6 @@ func MulBy13(x *Element) { func Butterfly(a, b *Element) { _butterflyGeneric(a, b) } - func mul(z, x, y *Element) { _mulGeneric(z, x, y) } diff --git a/ecc/bw6-633/fp/element_test.go b/ecc/bw6-633/fp/element_test.go index b5c8bffca9..1dc51767f8 100644 --- a/ecc/bw6-633/fp/element_test.go +++ b/ecc/bw6-633/fp/element_test.go @@ -22,10 +22,12 @@ import ( "fmt" "math/big" "math/bits" + + "github.com/consensys/gnark-crypto/field" mrand "math/rand" + "testing" - "github.com/consensys/gnark-crypto/field" "github.com/leanovate/gopter" ggen "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" @@ -332,7 +334,6 @@ func init() { a[9]-- staticTestValues = append(staticTestValues, a) } - { a := qElement a[9]-- @@ -346,6 +347,12 @@ func init() { staticTestValues = append(staticTestValues, a) } + { + a := qElement + a[9] = 0 + staticTestValues = append(staticTestValues, a) + } + } func TestElementReduce(t *testing.T) { @@ -483,7 +490,6 @@ func TestElementInverseExp(t *testing.T) { properties = gopter.NewProperties(parameters) properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly if supportAdx { t.Log("disabling ADX") @@ -1430,8 +1436,8 @@ func TestElementSquare(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1509,8 +1515,8 @@ func TestElementInverse(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1588,8 +1594,8 @@ func TestElementSqrt(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1684,8 +1690,8 @@ func TestElementDouble(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1780,8 +1786,8 @@ func TestElementNeg(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -2116,6 +2122,134 @@ func TestElementSetInterface(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + func TestElementFromMont(t *testing.T) { t.Parallel() @@ -2370,8 +2504,6 @@ func genFull() gopter.Gen { } } -// Some utils - func (z *Element) matchVeryBigInt(aHi uint64, aInt *big.Int) error { var modulus big.Int var aIntMod big.Int diff --git a/ecc/bw6-633/fr/element.go b/ecc/bw6-633/fr/element.go index eba97b965d..23c6d93cdd 100644 --- a/ecc/bw6-633/fr/element.go +++ b/ecc/bw6-633/fr/element.go @@ -77,9 +77,6 @@ var qElement = Element{ qElementWord4, } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = 8083954730842193919 - // rSquare var rSquare = Element{ 7746605402484284438, @@ -96,7 +93,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("39705142709513438335025689890408969744933502416914749335064285505637884093126342347073617133569", 10) + // base10: 39705142709513438335025689890408969744933502416914749335064285505637884093126342347073617133569 + _modulus.SetString("4c23a02b586d650d3f7498be97c5eafdec1d01aa27a1ae0421ee5da52bde5026fe802ff40300001", 16) } // NewElement returns a new Element from a uint64 value @@ -340,7 +338,7 @@ func (z *Element) SetRandom() (*Element, error) { z[4] = binary.BigEndian.Uint64(bytes[32:40]) z[4] %= 342900304943437392 - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { var b uint64 @@ -363,10 +361,10 @@ func One() Element { // Halve sets z to z / 2 (mod p) func (z *Element) Halve() { - if z[0]&1 == 1 { + var carry uint64 + if z[0]&1 == 1 { // z = z + q - var carry uint64 z[0], carry = bits.Add64(z[0], 8063698428123676673, 0) z[1], carry = bits.Add64(z[1], 4764498181658371330, carry) z[2], carry = bits.Add64(z[2], 16051339359738796768, carry) @@ -374,9 +372,7 @@ func (z *Element) Halve() { z[4], _ = bits.Add64(z[4], 342900304943437392, carry) } - // z = z >> 1 - z[0] = z[0]>>1 | z[1]<<63 z[1] = z[1]>>1 | z[2]<<63 z[2] = z[2]>>1 | z[3]<<63 @@ -385,8 +381,6 @@ func (z *Element) Halve() { } -// API with assembly impl - // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *Element) Mul(x, y *Element) *Element { @@ -526,7 +520,7 @@ func _mulGeneric(z, x, y *Element) { z[4], z[3] = madd3(m, 342900304943437392, c[0], c[2], c[1]) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { var b uint64 @@ -536,72 +530,7 @@ func _mulGeneric(z, x, y *Element) { z[3], b = bits.Sub64(z[3], 15273757526516850351, b) z[4], _ = bits.Sub64(z[4], 342900304943437392, b) } -} - -func _mulWGeneric(z, x *Element, y uint64) { - var t [5]uint64 - { - // round 0 - c1, c0 := bits.Mul64(y, x[0]) - m := c0 * 8083954730842193919 - c2 := madd0(m, 8063698428123676673, c0) - c1, c0 = madd1(y, x[1], c1) - c2, t[0] = madd2(m, 4764498181658371330, c2, c0) - c1, c0 = madd1(y, x[2], c1) - c2, t[1] = madd2(m, 16051339359738796768, c2, c0) - c1, c0 = madd1(y, x[3], c1) - c2, t[2] = madd2(m, 15273757526516850351, c2, c0) - c1, c0 = madd1(y, x[4], c1) - t[4], t[3] = madd3(m, 342900304943437392, c0, c2, c1) - } - { - // round 1 - m := t[0] * 8083954730842193919 - c2 := madd0(m, 8063698428123676673, t[0]) - c2, t[0] = madd2(m, 4764498181658371330, c2, t[1]) - c2, t[1] = madd2(m, 16051339359738796768, c2, t[2]) - c2, t[2] = madd2(m, 15273757526516850351, c2, t[3]) - t[4], t[3] = madd2(m, 342900304943437392, t[4], c2) - } - { - // round 2 - m := t[0] * 8083954730842193919 - c2 := madd0(m, 8063698428123676673, t[0]) - c2, t[0] = madd2(m, 4764498181658371330, c2, t[1]) - c2, t[1] = madd2(m, 16051339359738796768, c2, t[2]) - c2, t[2] = madd2(m, 15273757526516850351, c2, t[3]) - t[4], t[3] = madd2(m, 342900304943437392, t[4], c2) - } - { - // round 3 - m := t[0] * 8083954730842193919 - c2 := madd0(m, 8063698428123676673, t[0]) - c2, t[0] = madd2(m, 4764498181658371330, c2, t[1]) - c2, t[1] = madd2(m, 16051339359738796768, c2, t[2]) - c2, t[2] = madd2(m, 15273757526516850351, c2, t[3]) - t[4], t[3] = madd2(m, 342900304943437392, t[4], c2) - } - { - // round 4 - m := t[0] * 8083954730842193919 - c2 := madd0(m, 8063698428123676673, t[0]) - c2, z[0] = madd2(m, 4764498181658371330, c2, t[1]) - c2, z[1] = madd2(m, 16051339359738796768, c2, t[2]) - c2, z[2] = madd2(m, 15273757526516850351, c2, t[3]) - z[4], z[3] = madd2(m, 342900304943437392, t[4], c2) - } - - // if z > q → z -= q - // note: this is NOT constant time - if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { - var b uint64 - z[0], b = bits.Sub64(z[0], 8063698428123676673, 0) - z[1], b = bits.Sub64(z[1], 4764498181658371330, b) - z[2], b = bits.Sub64(z[2], 16051339359738796768, b) - z[3], b = bits.Sub64(z[3], 15273757526516850351, b) - z[4], _ = bits.Sub64(z[4], 342900304943437392, b) - } } func _fromMontGeneric(z *Element) { @@ -658,7 +587,7 @@ func _fromMontGeneric(z *Element) { z[4] = C } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { var b uint64 @@ -679,7 +608,7 @@ func _addGeneric(z, x, y *Element) { z[3], carry = bits.Add64(x[3], y[3], carry) z[4], _ = bits.Add64(x[4], y[4], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { var b uint64 @@ -700,7 +629,7 @@ func _doubleGeneric(z, x *Element) { z[3], carry = bits.Add64(x[3], x[3], carry) z[4], _ = bits.Add64(x[4], x[4], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { var b uint64 @@ -744,7 +673,7 @@ func _negGeneric(z, x *Element) { func _reduceGeneric(z *Element) { - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { var b uint64 @@ -839,18 +768,30 @@ func (z *Element) BitLen() int { return bits.Len64(z[0]) } -// Exp z = x^exponent mod q -func (z *Element) Exp(x Element, exponent *big.Int) *Element { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } @@ -1372,14 +1313,10 @@ func (z *Element) Inverse(x *Element) *Element { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *Element) inverseExp(x *Element) *Element { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) return z.Exp(*x, qMinusTwo) } @@ -1421,6 +1358,8 @@ func (z *Element) linearComb(x *Element, xC int64, y *Element, yC int64) { // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *Element) montReduceSigned(x *Element, xHi uint64) { + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 8083954730842193919 const signBitRemover = ^signBitSelector neg := xHi&signBitSelector != 0 @@ -1496,7 +1435,7 @@ func (z *Element) montReduceSigned(x *Element, xHi uint64) { z[4], z[3] = madd2(m, qElementWord4, t[i+4], C) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { var b uint64 @@ -1550,7 +1489,7 @@ func (z *Element) montReduceSignedSimpleButSlow(x *Element, xHi uint64) { z[3], c = bits.Add64(z[3], 0, c) z[4], _ = bits.Add64(z[4], 0, c) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { var b uint64 @@ -1598,6 +1537,72 @@ func (z *Element) mulWSigned(x *Element, y int64) { } } +func _mulWGeneric(z, x *Element, y uint64) { + + var t [5]uint64 + { + // round 0 + c1, c0 := bits.Mul64(y, x[0]) + m := c0 * 8083954730842193919 + c2 := madd0(m, 8063698428123676673, c0) + c1, c0 = madd1(y, x[1], c1) + c2, t[0] = madd2(m, 4764498181658371330, c2, c0) + c1, c0 = madd1(y, x[2], c1) + c2, t[1] = madd2(m, 16051339359738796768, c2, c0) + c1, c0 = madd1(y, x[3], c1) + c2, t[2] = madd2(m, 15273757526516850351, c2, c0) + c1, c0 = madd1(y, x[4], c1) + t[4], t[3] = madd3(m, 342900304943437392, c0, c2, c1) + } + { + // round 1 + m := t[0] * 8083954730842193919 + c2 := madd0(m, 8063698428123676673, t[0]) + c2, t[0] = madd2(m, 4764498181658371330, c2, t[1]) + c2, t[1] = madd2(m, 16051339359738796768, c2, t[2]) + c2, t[2] = madd2(m, 15273757526516850351, c2, t[3]) + t[4], t[3] = madd2(m, 342900304943437392, t[4], c2) + } + { + // round 2 + m := t[0] * 8083954730842193919 + c2 := madd0(m, 8063698428123676673, t[0]) + c2, t[0] = madd2(m, 4764498181658371330, c2, t[1]) + c2, t[1] = madd2(m, 16051339359738796768, c2, t[2]) + c2, t[2] = madd2(m, 15273757526516850351, c2, t[3]) + t[4], t[3] = madd2(m, 342900304943437392, t[4], c2) + } + { + // round 3 + m := t[0] * 8083954730842193919 + c2 := madd0(m, 8063698428123676673, t[0]) + c2, t[0] = madd2(m, 4764498181658371330, c2, t[1]) + c2, t[1] = madd2(m, 16051339359738796768, c2, t[2]) + c2, t[2] = madd2(m, 15273757526516850351, c2, t[3]) + t[4], t[3] = madd2(m, 342900304943437392, t[4], c2) + } + { + // round 4 + m := t[0] * 8083954730842193919 + c2 := madd0(m, 8063698428123676673, t[0]) + c2, z[0] = madd2(m, 4764498181658371330, c2, t[1]) + c2, z[1] = madd2(m, 16051339359738796768, c2, t[2]) + c2, z[2] = madd2(m, 15273757526516850351, c2, t[3]) + z[4], z[3] = madd2(m, 342900304943437392, t[4], c2) + } + + // if z >= q → z -= q + // note: this is NOT constant time + if !(z[4] < 342900304943437392 || (z[4] == 342900304943437392 && (z[3] < 15273757526516850351 || (z[3] == 15273757526516850351 && (z[2] < 16051339359738796768 || (z[2] == 16051339359738796768 && (z[1] < 4764498181658371330 || (z[1] == 4764498181658371330 && (z[0] < 8063698428123676673))))))))) { + var b uint64 + z[0], b = bits.Sub64(z[0], 8063698428123676673, 0) + z[1], b = bits.Sub64(z[1], 4764498181658371330, b) + z[2], b = bits.Sub64(z[2], 16051339359738796768, b) + z[3], b = bits.Sub64(z[3], 15273757526516850351, b) + z[4], _ = bits.Sub64(z[4], 342900304943437392, b) + } +} + func (z *Element) neg(x *Element, xHi uint64) uint64 { var b uint64 diff --git a/ecc/bw6-633/fr/element_ops_noasm.go b/ecc/bw6-633/fr/element_ops_noasm.go index ec1fac18d6..7891ac9cd3 100644 --- a/ecc/bw6-633/fr/element_ops_noasm.go +++ b/ecc/bw6-633/fr/element_ops_noasm.go @@ -46,7 +46,6 @@ func MulBy13(x *Element) { func Butterfly(a, b *Element) { _butterflyGeneric(a, b) } - func mul(z, x, y *Element) { _mulGeneric(z, x, y) } diff --git a/ecc/bw6-633/fr/element_test.go b/ecc/bw6-633/fr/element_test.go index 81404f41d6..5fb0fa7b63 100644 --- a/ecc/bw6-633/fr/element_test.go +++ b/ecc/bw6-633/fr/element_test.go @@ -22,10 +22,12 @@ import ( "fmt" "math/big" "math/bits" + + "github.com/consensys/gnark-crypto/field" mrand "math/rand" + "testing" - "github.com/consensys/gnark-crypto/field" "github.com/leanovate/gopter" ggen "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" @@ -322,7 +324,6 @@ func init() { a[4]-- staticTestValues = append(staticTestValues, a) } - { a := qElement a[4]-- @@ -336,6 +337,12 @@ func init() { staticTestValues = append(staticTestValues, a) } + { + a := qElement + a[4] = 0 + staticTestValues = append(staticTestValues, a) + } + } func TestElementReduce(t *testing.T) { @@ -473,7 +480,6 @@ func TestElementInverseExp(t *testing.T) { properties = gopter.NewProperties(parameters) properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly if supportAdx { t.Log("disabling ADX") @@ -1420,8 +1426,8 @@ func TestElementSquare(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1499,8 +1505,8 @@ func TestElementInverse(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1578,8 +1584,8 @@ func TestElementSqrt(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1674,8 +1680,8 @@ func TestElementDouble(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1770,8 +1776,8 @@ func TestElementNeg(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -2106,6 +2112,134 @@ func TestElementSetInterface(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + func TestElementFromMont(t *testing.T) { t.Parallel() @@ -2300,8 +2434,6 @@ func genFull() gopter.Gen { } } -// Some utils - func (z *Element) matchVeryBigInt(aHi uint64, aInt *big.Int) error { var modulus big.Int var aIntMod big.Int diff --git a/ecc/bw6-633/fr/mimc/mimc.go b/ecc/bw6-633/fr/mimc/mimc.go index 687bd79e46..a71c0e8423 100644 --- a/ecc/bw6-633/fr/mimc/mimc.go +++ b/ecc/bw6-633/fr/mimc/mimc.go @@ -64,7 +64,7 @@ func NewMiMC() hash.Hash { // Reset resets the Hash to its initial state. func (d *digest) Reset() { d.data = nil - d.h = fr.Element{0, 0, 0, 0} + d.h.SetZero() } // Sum appends the current hash to b and returns the resulting slice. diff --git a/ecc/bw6-756/fp/element.go b/ecc/bw6-756/fp/element.go index ce4fc1f8b8..aeb6ea15a6 100644 --- a/ecc/bw6-756/fp/element.go +++ b/ecc/bw6-756/fp/element.go @@ -91,9 +91,6 @@ var qElement = Element{ qElementWord11, } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = 18446744073709551615 - // rSquare var rSquare = Element{ 11214533042317621956, @@ -117,7 +114,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("366325390957376286590726555727219947825377821289246188278797409783441745356050456327989347160777465284190855125642086860525706497928518803244008749360363712553766506755227344593404398783886857865261088226271336335268413437902849", 10) + // base10: 366325390957376286590726555727219947825377821289246188278797409783441745356050456327989347160777465284190855125642086860525706497928518803244008749360363712553766506755227344593404398783886857865261088226271336335268413437902849 + _modulus.SetString("f76adbb5bb98ae2ac127e1e3568cf5c978cd2fac2ce89fbf23221455163a6ccc6ae73c42a46d9eb02c812ea04faaa0a7eb1cb3d06e646e292cd15edb646a54302aa3c258de7ded0b685e868524ec033c7e63f868400000000000000000001", 16) } // NewElement returns a new Element from a uint64 value @@ -431,7 +429,7 @@ func (z *Element) SetRandom() (*Element, error) { z[11] = binary.BigEndian.Uint64(bytes[88:96]) z[11] %= 4352613195430282 - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[11] < 4352613195430282 || (z[11] == 4352613195430282 && (z[10] < 16333450281447942351 || (z[10] == 16333450281447942351 && (z[9] < 6671956210750770825 || (z[9] == 6671956210750770825 && (z[8] < 18154628166362162086 || (z[8] == 18154628166362162086 && (z[7] < 14755673041361585881 || (z[7] == 14755673041361585881 && (z[6] < 16934317532427647658 || (z[6] == 16934317532427647658 && (z[5] < 756237273905161798 || (z[5] == 756237273905161798 && (z[4] < 16326337093237622437 || (z[4] == 16326337093237622437 && (z[3] < 4828608925799409630 || (z[3] == 4828608925799409630 && (z[2] < 15039355238879481536 || (z[2] == 15039355238879481536 && (z[1] < 3731203976813871104 || (z[1] == 3731203976813871104 && (z[0] < 1))))))))))))))))))))))) { var b uint64 @@ -461,10 +459,10 @@ func One() Element { // Halve sets z to z / 2 (mod p) func (z *Element) Halve() { - if z[0]&1 == 1 { + var carry uint64 + if z[0]&1 == 1 { // z = z + q - var carry uint64 z[0], carry = bits.Add64(z[0], 1, 0) z[1], carry = bits.Add64(z[1], 3731203976813871104, carry) z[2], carry = bits.Add64(z[2], 15039355238879481536, carry) @@ -479,9 +477,7 @@ func (z *Element) Halve() { z[11], _ = bits.Add64(z[11], 4352613195430282, carry) } - // z = z >> 1 - z[0] = z[0]>>1 | z[1]<<63 z[1] = z[1]>>1 | z[2]<<63 z[2] = z[2]>>1 | z[3]<<63 @@ -497,8 +493,6 @@ func (z *Element) Halve() { } -// API with assembly impl - // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *Element) Mul(x, y *Element) *Element { @@ -918,7 +912,7 @@ func _mulGeneric(z, x, y *Element) { z[11], z[10] = madd3(m, 4352613195430282, c[0], c[2], c[1]) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[11] < 4352613195430282 || (z[11] == 4352613195430282 && (z[10] < 16333450281447942351 || (z[10] == 16333450281447942351 && (z[9] < 6671956210750770825 || (z[9] == 6671956210750770825 && (z[8] < 18154628166362162086 || (z[8] == 18154628166362162086 && (z[7] < 14755673041361585881 || (z[7] == 14755673041361585881 && (z[6] < 16934317532427647658 || (z[6] == 16934317532427647658 && (z[5] < 756237273905161798 || (z[5] == 756237273905161798 && (z[4] < 16326337093237622437 || (z[4] == 16326337093237622437 && (z[3] < 4828608925799409630 || (z[3] == 4828608925799409630 && (z[2] < 15039355238879481536 || (z[2] == 15039355238879481536 && (z[1] < 3731203976813871104 || (z[1] == 3731203976813871104 && (z[0] < 1))))))))))))))))))))))) { var b uint64 @@ -935,233 +929,7 @@ func _mulGeneric(z, x, y *Element) { z[10], b = bits.Sub64(z[10], 16333450281447942351, b) z[11], _ = bits.Sub64(z[11], 4352613195430282, b) } -} - -func _mulWGeneric(z, x *Element, y uint64) { - - var t [12]uint64 - { - // round 0 - c1, c0 := bits.Mul64(y, x[0]) - m := c0 * 18446744073709551615 - c2 := madd0(m, 1, c0) - c1, c0 = madd1(y, x[1], c1) - c2, t[0] = madd2(m, 3731203976813871104, c2, c0) - c1, c0 = madd1(y, x[2], c1) - c2, t[1] = madd2(m, 15039355238879481536, c2, c0) - c1, c0 = madd1(y, x[3], c1) - c2, t[2] = madd2(m, 4828608925799409630, c2, c0) - c1, c0 = madd1(y, x[4], c1) - c2, t[3] = madd2(m, 16326337093237622437, c2, c0) - c1, c0 = madd1(y, x[5], c1) - c2, t[4] = madd2(m, 756237273905161798, c2, c0) - c1, c0 = madd1(y, x[6], c1) - c2, t[5] = madd2(m, 16934317532427647658, c2, c0) - c1, c0 = madd1(y, x[7], c1) - c2, t[6] = madd2(m, 14755673041361585881, c2, c0) - c1, c0 = madd1(y, x[8], c1) - c2, t[7] = madd2(m, 18154628166362162086, c2, c0) - c1, c0 = madd1(y, x[9], c1) - c2, t[8] = madd2(m, 6671956210750770825, c2, c0) - c1, c0 = madd1(y, x[10], c1) - c2, t[9] = madd2(m, 16333450281447942351, c2, c0) - c1, c0 = madd1(y, x[11], c1) - t[11], t[10] = madd3(m, 4352613195430282, c0, c2, c1) - } - { - // round 1 - m := t[0] * 18446744073709551615 - c2 := madd0(m, 1, t[0]) - c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) - c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) - c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) - c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) - c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) - c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) - c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) - c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) - c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) - c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) - t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) - } - { - // round 2 - m := t[0] * 18446744073709551615 - c2 := madd0(m, 1, t[0]) - c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) - c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) - c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) - c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) - c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) - c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) - c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) - c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) - c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) - c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) - t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) - } - { - // round 3 - m := t[0] * 18446744073709551615 - c2 := madd0(m, 1, t[0]) - c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) - c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) - c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) - c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) - c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) - c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) - c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) - c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) - c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) - c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) - t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) - } - { - // round 4 - m := t[0] * 18446744073709551615 - c2 := madd0(m, 1, t[0]) - c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) - c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) - c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) - c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) - c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) - c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) - c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) - c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) - c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) - c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) - t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) - } - { - // round 5 - m := t[0] * 18446744073709551615 - c2 := madd0(m, 1, t[0]) - c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) - c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) - c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) - c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) - c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) - c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) - c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) - c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) - c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) - c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) - t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) - } - { - // round 6 - m := t[0] * 18446744073709551615 - c2 := madd0(m, 1, t[0]) - c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) - c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) - c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) - c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) - c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) - c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) - c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) - c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) - c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) - c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) - t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) - } - { - // round 7 - m := t[0] * 18446744073709551615 - c2 := madd0(m, 1, t[0]) - c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) - c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) - c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) - c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) - c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) - c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) - c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) - c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) - c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) - c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) - t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) - } - { - // round 8 - m := t[0] * 18446744073709551615 - c2 := madd0(m, 1, t[0]) - c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) - c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) - c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) - c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) - c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) - c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) - c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) - c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) - c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) - c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) - t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) - } - { - // round 9 - m := t[0] * 18446744073709551615 - c2 := madd0(m, 1, t[0]) - c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) - c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) - c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) - c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) - c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) - c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) - c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) - c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) - c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) - c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) - t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) - } - { - // round 10 - m := t[0] * 18446744073709551615 - c2 := madd0(m, 1, t[0]) - c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) - c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) - c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) - c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) - c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) - c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) - c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) - c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) - c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) - c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) - t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) - } - { - // round 11 - m := t[0] * 18446744073709551615 - c2 := madd0(m, 1, t[0]) - c2, z[0] = madd2(m, 3731203976813871104, c2, t[1]) - c2, z[1] = madd2(m, 15039355238879481536, c2, t[2]) - c2, z[2] = madd2(m, 4828608925799409630, c2, t[3]) - c2, z[3] = madd2(m, 16326337093237622437, c2, t[4]) - c2, z[4] = madd2(m, 756237273905161798, c2, t[5]) - c2, z[5] = madd2(m, 16934317532427647658, c2, t[6]) - c2, z[6] = madd2(m, 14755673041361585881, c2, t[7]) - c2, z[7] = madd2(m, 18154628166362162086, c2, t[8]) - c2, z[8] = madd2(m, 6671956210750770825, c2, t[9]) - c2, z[9] = madd2(m, 16333450281447942351, c2, t[10]) - z[11], z[10] = madd2(m, 4352613195430282, t[11], c2) - } - // if z > q → z -= q - // note: this is NOT constant time - if !(z[11] < 4352613195430282 || (z[11] == 4352613195430282 && (z[10] < 16333450281447942351 || (z[10] == 16333450281447942351 && (z[9] < 6671956210750770825 || (z[9] == 6671956210750770825 && (z[8] < 18154628166362162086 || (z[8] == 18154628166362162086 && (z[7] < 14755673041361585881 || (z[7] == 14755673041361585881 && (z[6] < 16934317532427647658 || (z[6] == 16934317532427647658 && (z[5] < 756237273905161798 || (z[5] == 756237273905161798 && (z[4] < 16326337093237622437 || (z[4] == 16326337093237622437 && (z[3] < 4828608925799409630 || (z[3] == 4828608925799409630 && (z[2] < 15039355238879481536 || (z[2] == 15039355238879481536 && (z[1] < 3731203976813871104 || (z[1] == 3731203976813871104 && (z[0] < 1))))))))))))))))))))))) { - var b uint64 - z[0], b = bits.Sub64(z[0], 1, 0) - z[1], b = bits.Sub64(z[1], 3731203976813871104, b) - z[2], b = bits.Sub64(z[2], 15039355238879481536, b) - z[3], b = bits.Sub64(z[3], 4828608925799409630, b) - z[4], b = bits.Sub64(z[4], 16326337093237622437, b) - z[5], b = bits.Sub64(z[5], 756237273905161798, b) - z[6], b = bits.Sub64(z[6], 16934317532427647658, b) - z[7], b = bits.Sub64(z[7], 14755673041361585881, b) - z[8], b = bits.Sub64(z[8], 18154628166362162086, b) - z[9], b = bits.Sub64(z[9], 6671956210750770825, b) - z[10], b = bits.Sub64(z[10], 16333450281447942351, b) - z[11], _ = bits.Sub64(z[11], 4352613195430282, b) - } } func _fromMontGeneric(z *Element) { @@ -1372,7 +1140,7 @@ func _fromMontGeneric(z *Element) { z[11] = C } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[11] < 4352613195430282 || (z[11] == 4352613195430282 && (z[10] < 16333450281447942351 || (z[10] == 16333450281447942351 && (z[9] < 6671956210750770825 || (z[9] == 6671956210750770825 && (z[8] < 18154628166362162086 || (z[8] == 18154628166362162086 && (z[7] < 14755673041361585881 || (z[7] == 14755673041361585881 && (z[6] < 16934317532427647658 || (z[6] == 16934317532427647658 && (z[5] < 756237273905161798 || (z[5] == 756237273905161798 && (z[4] < 16326337093237622437 || (z[4] == 16326337093237622437 && (z[3] < 4828608925799409630 || (z[3] == 4828608925799409630 && (z[2] < 15039355238879481536 || (z[2] == 15039355238879481536 && (z[1] < 3731203976813871104 || (z[1] == 3731203976813871104 && (z[0] < 1))))))))))))))))))))))) { var b uint64 @@ -1407,7 +1175,7 @@ func _addGeneric(z, x, y *Element) { z[10], carry = bits.Add64(x[10], y[10], carry) z[11], _ = bits.Add64(x[11], y[11], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[11] < 4352613195430282 || (z[11] == 4352613195430282 && (z[10] < 16333450281447942351 || (z[10] == 16333450281447942351 && (z[9] < 6671956210750770825 || (z[9] == 6671956210750770825 && (z[8] < 18154628166362162086 || (z[8] == 18154628166362162086 && (z[7] < 14755673041361585881 || (z[7] == 14755673041361585881 && (z[6] < 16934317532427647658 || (z[6] == 16934317532427647658 && (z[5] < 756237273905161798 || (z[5] == 756237273905161798 && (z[4] < 16326337093237622437 || (z[4] == 16326337093237622437 && (z[3] < 4828608925799409630 || (z[3] == 4828608925799409630 && (z[2] < 15039355238879481536 || (z[2] == 15039355238879481536 && (z[1] < 3731203976813871104 || (z[1] == 3731203976813871104 && (z[0] < 1))))))))))))))))))))))) { var b uint64 @@ -1442,7 +1210,7 @@ func _doubleGeneric(z, x *Element) { z[10], carry = bits.Add64(x[10], x[10], carry) z[11], _ = bits.Add64(x[11], x[11], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[11] < 4352613195430282 || (z[11] == 4352613195430282 && (z[10] < 16333450281447942351 || (z[10] == 16333450281447942351 && (z[9] < 6671956210750770825 || (z[9] == 6671956210750770825 && (z[8] < 18154628166362162086 || (z[8] == 18154628166362162086 && (z[7] < 14755673041361585881 || (z[7] == 14755673041361585881 && (z[6] < 16934317532427647658 || (z[6] == 16934317532427647658 && (z[5] < 756237273905161798 || (z[5] == 756237273905161798 && (z[4] < 16326337093237622437 || (z[4] == 16326337093237622437 && (z[3] < 4828608925799409630 || (z[3] == 4828608925799409630 && (z[2] < 15039355238879481536 || (z[2] == 15039355238879481536 && (z[1] < 3731203976813871104 || (z[1] == 3731203976813871104 && (z[0] < 1))))))))))))))))))))))) { var b uint64 @@ -1514,7 +1282,7 @@ func _negGeneric(z, x *Element) { func _reduceGeneric(z *Element) { - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[11] < 4352613195430282 || (z[11] == 4352613195430282 && (z[10] < 16333450281447942351 || (z[10] == 16333450281447942351 && (z[9] < 6671956210750770825 || (z[9] == 6671956210750770825 && (z[8] < 18154628166362162086 || (z[8] == 18154628166362162086 && (z[7] < 14755673041361585881 || (z[7] == 14755673041361585881 && (z[6] < 16934317532427647658 || (z[6] == 16934317532427647658 && (z[5] < 756237273905161798 || (z[5] == 756237273905161798 && (z[4] < 16326337093237622437 || (z[4] == 16326337093237622437 && (z[3] < 4828608925799409630 || (z[3] == 4828608925799409630 && (z[2] < 15039355238879481536 || (z[2] == 15039355238879481536 && (z[1] < 3731203976813871104 || (z[1] == 3731203976813871104 && (z[0] < 1))))))))))))))))))))))) { var b uint64 @@ -1637,18 +1405,30 @@ func (z *Element) BitLen() int { return bits.Len64(z[0]) } -// Exp z = x^exponent mod q -func (z *Element) Exp(x Element, exponent *big.Int) *Element { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } @@ -2226,14 +2006,10 @@ func (z *Element) Inverse(x *Element) *Element { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *Element) inverseExp(x *Element) *Element { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) return z.Exp(*x, qMinusTwo) } @@ -2275,6 +2051,8 @@ func (z *Element) linearComb(x *Element, xC int64, y *Element, yC int64) { // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *Element) montReduceSigned(x *Element, xHi uint64) { + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 18446744073709551615 const signBitRemover = ^signBitSelector neg := xHi&signBitSelector != 0 @@ -2518,7 +2296,7 @@ func (z *Element) montReduceSigned(x *Element, xHi uint64) { z[11], z[10] = madd2(m, qElementWord11, t[i+11], C) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[11] < 4352613195430282 || (z[11] == 4352613195430282 && (z[10] < 16333450281447942351 || (z[10] == 16333450281447942351 && (z[9] < 6671956210750770825 || (z[9] == 6671956210750770825 && (z[8] < 18154628166362162086 || (z[8] == 18154628166362162086 && (z[7] < 14755673041361585881 || (z[7] == 14755673041361585881 && (z[6] < 16934317532427647658 || (z[6] == 16934317532427647658 && (z[5] < 756237273905161798 || (z[5] == 756237273905161798 && (z[4] < 16326337093237622437 || (z[4] == 16326337093237622437 && (z[3] < 4828608925799409630 || (z[3] == 4828608925799409630 && (z[2] < 15039355238879481536 || (z[2] == 15039355238879481536 && (z[1] < 3731203976813871104 || (z[1] == 3731203976813871104 && (z[0] < 1))))))))))))))))))))))) { var b uint64 @@ -2600,7 +2378,7 @@ func (z *Element) montReduceSignedSimpleButSlow(x *Element, xHi uint64) { z[10], c = bits.Add64(z[10], 0, c) z[11], _ = bits.Add64(z[11], 0, c) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[11] < 4352613195430282 || (z[11] == 4352613195430282 && (z[10] < 16333450281447942351 || (z[10] == 16333450281447942351 && (z[9] < 6671956210750770825 || (z[9] == 6671956210750770825 && (z[8] < 18154628166362162086 || (z[8] == 18154628166362162086 && (z[7] < 14755673041361585881 || (z[7] == 14755673041361585881 && (z[6] < 16934317532427647658 || (z[6] == 16934317532427647658 && (z[5] < 756237273905161798 || (z[5] == 756237273905161798 && (z[4] < 16326337093237622437 || (z[4] == 16326337093237622437 && (z[3] < 4828608925799409630 || (z[3] == 4828608925799409630 && (z[2] < 15039355238879481536 || (z[2] == 15039355238879481536 && (z[1] < 3731203976813871104 || (z[1] == 3731203976813871104 && (z[0] < 1))))))))))))))))))))))) { var b uint64 @@ -2669,6 +2447,233 @@ func (z *Element) mulWSigned(x *Element, y int64) { } } +func _mulWGeneric(z, x *Element, y uint64) { + + var t [12]uint64 + { + // round 0 + c1, c0 := bits.Mul64(y, x[0]) + m := c0 * 18446744073709551615 + c2 := madd0(m, 1, c0) + c1, c0 = madd1(y, x[1], c1) + c2, t[0] = madd2(m, 3731203976813871104, c2, c0) + c1, c0 = madd1(y, x[2], c1) + c2, t[1] = madd2(m, 15039355238879481536, c2, c0) + c1, c0 = madd1(y, x[3], c1) + c2, t[2] = madd2(m, 4828608925799409630, c2, c0) + c1, c0 = madd1(y, x[4], c1) + c2, t[3] = madd2(m, 16326337093237622437, c2, c0) + c1, c0 = madd1(y, x[5], c1) + c2, t[4] = madd2(m, 756237273905161798, c2, c0) + c1, c0 = madd1(y, x[6], c1) + c2, t[5] = madd2(m, 16934317532427647658, c2, c0) + c1, c0 = madd1(y, x[7], c1) + c2, t[6] = madd2(m, 14755673041361585881, c2, c0) + c1, c0 = madd1(y, x[8], c1) + c2, t[7] = madd2(m, 18154628166362162086, c2, c0) + c1, c0 = madd1(y, x[9], c1) + c2, t[8] = madd2(m, 6671956210750770825, c2, c0) + c1, c0 = madd1(y, x[10], c1) + c2, t[9] = madd2(m, 16333450281447942351, c2, c0) + c1, c0 = madd1(y, x[11], c1) + t[11], t[10] = madd3(m, 4352613195430282, c0, c2, c1) + } + { + // round 1 + m := t[0] * 18446744073709551615 + c2 := madd0(m, 1, t[0]) + c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) + c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) + c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) + c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) + c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) + c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) + c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) + c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) + c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) + c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) + t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) + } + { + // round 2 + m := t[0] * 18446744073709551615 + c2 := madd0(m, 1, t[0]) + c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) + c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) + c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) + c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) + c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) + c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) + c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) + c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) + c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) + c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) + t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) + } + { + // round 3 + m := t[0] * 18446744073709551615 + c2 := madd0(m, 1, t[0]) + c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) + c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) + c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) + c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) + c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) + c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) + c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) + c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) + c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) + c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) + t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) + } + { + // round 4 + m := t[0] * 18446744073709551615 + c2 := madd0(m, 1, t[0]) + c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) + c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) + c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) + c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) + c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) + c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) + c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) + c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) + c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) + c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) + t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) + } + { + // round 5 + m := t[0] * 18446744073709551615 + c2 := madd0(m, 1, t[0]) + c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) + c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) + c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) + c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) + c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) + c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) + c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) + c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) + c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) + c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) + t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) + } + { + // round 6 + m := t[0] * 18446744073709551615 + c2 := madd0(m, 1, t[0]) + c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) + c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) + c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) + c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) + c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) + c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) + c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) + c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) + c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) + c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) + t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) + } + { + // round 7 + m := t[0] * 18446744073709551615 + c2 := madd0(m, 1, t[0]) + c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) + c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) + c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) + c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) + c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) + c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) + c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) + c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) + c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) + c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) + t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) + } + { + // round 8 + m := t[0] * 18446744073709551615 + c2 := madd0(m, 1, t[0]) + c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) + c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) + c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) + c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) + c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) + c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) + c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) + c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) + c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) + c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) + t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) + } + { + // round 9 + m := t[0] * 18446744073709551615 + c2 := madd0(m, 1, t[0]) + c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) + c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) + c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) + c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) + c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) + c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) + c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) + c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) + c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) + c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) + t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) + } + { + // round 10 + m := t[0] * 18446744073709551615 + c2 := madd0(m, 1, t[0]) + c2, t[0] = madd2(m, 3731203976813871104, c2, t[1]) + c2, t[1] = madd2(m, 15039355238879481536, c2, t[2]) + c2, t[2] = madd2(m, 4828608925799409630, c2, t[3]) + c2, t[3] = madd2(m, 16326337093237622437, c2, t[4]) + c2, t[4] = madd2(m, 756237273905161798, c2, t[5]) + c2, t[5] = madd2(m, 16934317532427647658, c2, t[6]) + c2, t[6] = madd2(m, 14755673041361585881, c2, t[7]) + c2, t[7] = madd2(m, 18154628166362162086, c2, t[8]) + c2, t[8] = madd2(m, 6671956210750770825, c2, t[9]) + c2, t[9] = madd2(m, 16333450281447942351, c2, t[10]) + t[11], t[10] = madd2(m, 4352613195430282, t[11], c2) + } + { + // round 11 + m := t[0] * 18446744073709551615 + c2 := madd0(m, 1, t[0]) + c2, z[0] = madd2(m, 3731203976813871104, c2, t[1]) + c2, z[1] = madd2(m, 15039355238879481536, c2, t[2]) + c2, z[2] = madd2(m, 4828608925799409630, c2, t[3]) + c2, z[3] = madd2(m, 16326337093237622437, c2, t[4]) + c2, z[4] = madd2(m, 756237273905161798, c2, t[5]) + c2, z[5] = madd2(m, 16934317532427647658, c2, t[6]) + c2, z[6] = madd2(m, 14755673041361585881, c2, t[7]) + c2, z[7] = madd2(m, 18154628166362162086, c2, t[8]) + c2, z[8] = madd2(m, 6671956210750770825, c2, t[9]) + c2, z[9] = madd2(m, 16333450281447942351, c2, t[10]) + z[11], z[10] = madd2(m, 4352613195430282, t[11], c2) + } + + // if z >= q → z -= q + // note: this is NOT constant time + if !(z[11] < 4352613195430282 || (z[11] == 4352613195430282 && (z[10] < 16333450281447942351 || (z[10] == 16333450281447942351 && (z[9] < 6671956210750770825 || (z[9] == 6671956210750770825 && (z[8] < 18154628166362162086 || (z[8] == 18154628166362162086 && (z[7] < 14755673041361585881 || (z[7] == 14755673041361585881 && (z[6] < 16934317532427647658 || (z[6] == 16934317532427647658 && (z[5] < 756237273905161798 || (z[5] == 756237273905161798 && (z[4] < 16326337093237622437 || (z[4] == 16326337093237622437 && (z[3] < 4828608925799409630 || (z[3] == 4828608925799409630 && (z[2] < 15039355238879481536 || (z[2] == 15039355238879481536 && (z[1] < 3731203976813871104 || (z[1] == 3731203976813871104 && (z[0] < 1))))))))))))))))))))))) { + var b uint64 + z[0], b = bits.Sub64(z[0], 1, 0) + z[1], b = bits.Sub64(z[1], 3731203976813871104, b) + z[2], b = bits.Sub64(z[2], 15039355238879481536, b) + z[3], b = bits.Sub64(z[3], 4828608925799409630, b) + z[4], b = bits.Sub64(z[4], 16326337093237622437, b) + z[5], b = bits.Sub64(z[5], 756237273905161798, b) + z[6], b = bits.Sub64(z[6], 16934317532427647658, b) + z[7], b = bits.Sub64(z[7], 14755673041361585881, b) + z[8], b = bits.Sub64(z[8], 18154628166362162086, b) + z[9], b = bits.Sub64(z[9], 6671956210750770825, b) + z[10], b = bits.Sub64(z[10], 16333450281447942351, b) + z[11], _ = bits.Sub64(z[11], 4352613195430282, b) + } +} + func (z *Element) neg(x *Element, xHi uint64) uint64 { var b uint64 diff --git a/ecc/bw6-756/fp/element_ops_noasm.go b/ecc/bw6-756/fp/element_ops_noasm.go index fec6289183..7d02396a3f 100644 --- a/ecc/bw6-756/fp/element_ops_noasm.go +++ b/ecc/bw6-756/fp/element_ops_noasm.go @@ -46,7 +46,6 @@ func MulBy13(x *Element) { func Butterfly(a, b *Element) { _butterflyGeneric(a, b) } - func mul(z, x, y *Element) { _mulGeneric(z, x, y) } diff --git a/ecc/bw6-756/fp/element_test.go b/ecc/bw6-756/fp/element_test.go index 775993c4d8..2a8963aa08 100644 --- a/ecc/bw6-756/fp/element_test.go +++ b/ecc/bw6-756/fp/element_test.go @@ -22,10 +22,12 @@ import ( "fmt" "math/big" "math/bits" + + "github.com/consensys/gnark-crypto/field" mrand "math/rand" + "testing" - "github.com/consensys/gnark-crypto/field" "github.com/leanovate/gopter" ggen "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" @@ -336,7 +338,6 @@ func init() { a[11]-- staticTestValues = append(staticTestValues, a) } - { a := qElement a[11]-- @@ -350,6 +351,12 @@ func init() { staticTestValues = append(staticTestValues, a) } + { + a := qElement + a[11] = 0 + staticTestValues = append(staticTestValues, a) + } + } func TestElementReduce(t *testing.T) { @@ -487,7 +494,6 @@ func TestElementInverseExp(t *testing.T) { properties = gopter.NewProperties(parameters) properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly if supportAdx { t.Log("disabling ADX") @@ -1434,8 +1440,8 @@ func TestElementSquare(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1513,8 +1519,8 @@ func TestElementInverse(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1592,8 +1598,8 @@ func TestElementSqrt(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1688,8 +1694,8 @@ func TestElementDouble(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1784,8 +1790,8 @@ func TestElementNeg(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -2120,6 +2126,134 @@ func TestElementSetInterface(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + func TestElementFromMont(t *testing.T) { t.Parallel() @@ -2398,8 +2532,6 @@ func genFull() gopter.Gen { } } -// Some utils - func (z *Element) matchVeryBigInt(aHi uint64, aInt *big.Int) error { var modulus big.Int var aIntMod big.Int diff --git a/ecc/bw6-756/fr/element.go b/ecc/bw6-756/fr/element.go index 0e8b3cd78b..97bd40aadc 100644 --- a/ecc/bw6-756/fr/element.go +++ b/ecc/bw6-756/fr/element.go @@ -79,9 +79,6 @@ var qElement = Element{ qElementWord5, } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = 11045256207009841151 - // rSquare var rSquare = Element{ 13541478318970833666, @@ -99,7 +96,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("605248206075306171733248481581800960739847691770924913753520744034740935903401304776283802348837311170974282940417", 10) + // base10: 605248206075306171733248481581800960739847691770924913753520744034740935903401304776283802348837311170974282940417 + _modulus.SetString("3eeb0416684d19053cb5d240ed107a284059eb647102326980dc360d0a49d7fce97f76a822c00009948a20000000001", 16) } // NewElement returns a new Element from a uint64 value @@ -353,7 +351,7 @@ func (z *Element) SetRandom() (*Element, error) { z[5] = binary.BigEndian.Uint64(bytes[40:48]) z[5] %= 283357621510263184 - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { var b uint64 @@ -377,10 +375,10 @@ func One() Element { // Halve sets z to z / 2 (mod p) func (z *Element) Halve() { - if z[0]&1 == 1 { + var carry uint64 + if z[0]&1 == 1 { // z = z + q - var carry uint64 z[0], carry = bits.Add64(z[0], 11045256207009841153, 0) z[1], carry = bits.Add64(z[1], 14886639130118979584, carry) z[2], carry = bits.Add64(z[2], 10956628289047010687, carry) @@ -389,9 +387,7 @@ func (z *Element) Halve() { z[5], _ = bits.Add64(z[5], 283357621510263184, carry) } - // z = z >> 1 - z[0] = z[0]>>1 | z[1]<<63 z[1] = z[1]>>1 | z[2]<<63 z[2] = z[2]>>1 | z[3]<<63 @@ -401,8 +397,6 @@ func (z *Element) Halve() { } -// API with assembly impl - // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *Element) Mul(x, y *Element) *Element { @@ -570,7 +564,7 @@ func _mulGeneric(z, x, y *Element) { z[5], z[4] = madd3(m, 283357621510263184, c[0], c[2], c[1]) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { var b uint64 @@ -581,89 +575,7 @@ func _mulGeneric(z, x, y *Element) { z[4], b = bits.Sub64(z[4], 6038022134869067682, b) z[5], _ = bits.Sub64(z[5], 283357621510263184, b) } -} - -func _mulWGeneric(z, x *Element, y uint64) { - - var t [6]uint64 - { - // round 0 - c1, c0 := bits.Mul64(y, x[0]) - m := c0 * 11045256207009841151 - c2 := madd0(m, 11045256207009841153, c0) - c1, c0 = madd1(y, x[1], c1) - c2, t[0] = madd2(m, 14886639130118979584, c2, c0) - c1, c0 = madd1(y, x[2], c1) - c2, t[1] = madd2(m, 10956628289047010687, c2, c0) - c1, c0 = madd1(y, x[3], c1) - c2, t[2] = madd2(m, 9513184293603517222, c2, c0) - c1, c0 = madd1(y, x[4], c1) - c2, t[3] = madd2(m, 6038022134869067682, c2, c0) - c1, c0 = madd1(y, x[5], c1) - t[5], t[4] = madd3(m, 283357621510263184, c0, c2, c1) - } - { - // round 1 - m := t[0] * 11045256207009841151 - c2 := madd0(m, 11045256207009841153, t[0]) - c2, t[0] = madd2(m, 14886639130118979584, c2, t[1]) - c2, t[1] = madd2(m, 10956628289047010687, c2, t[2]) - c2, t[2] = madd2(m, 9513184293603517222, c2, t[3]) - c2, t[3] = madd2(m, 6038022134869067682, c2, t[4]) - t[5], t[4] = madd2(m, 283357621510263184, t[5], c2) - } - { - // round 2 - m := t[0] * 11045256207009841151 - c2 := madd0(m, 11045256207009841153, t[0]) - c2, t[0] = madd2(m, 14886639130118979584, c2, t[1]) - c2, t[1] = madd2(m, 10956628289047010687, c2, t[2]) - c2, t[2] = madd2(m, 9513184293603517222, c2, t[3]) - c2, t[3] = madd2(m, 6038022134869067682, c2, t[4]) - t[5], t[4] = madd2(m, 283357621510263184, t[5], c2) - } - { - // round 3 - m := t[0] * 11045256207009841151 - c2 := madd0(m, 11045256207009841153, t[0]) - c2, t[0] = madd2(m, 14886639130118979584, c2, t[1]) - c2, t[1] = madd2(m, 10956628289047010687, c2, t[2]) - c2, t[2] = madd2(m, 9513184293603517222, c2, t[3]) - c2, t[3] = madd2(m, 6038022134869067682, c2, t[4]) - t[5], t[4] = madd2(m, 283357621510263184, t[5], c2) - } - { - // round 4 - m := t[0] * 11045256207009841151 - c2 := madd0(m, 11045256207009841153, t[0]) - c2, t[0] = madd2(m, 14886639130118979584, c2, t[1]) - c2, t[1] = madd2(m, 10956628289047010687, c2, t[2]) - c2, t[2] = madd2(m, 9513184293603517222, c2, t[3]) - c2, t[3] = madd2(m, 6038022134869067682, c2, t[4]) - t[5], t[4] = madd2(m, 283357621510263184, t[5], c2) - } - { - // round 5 - m := t[0] * 11045256207009841151 - c2 := madd0(m, 11045256207009841153, t[0]) - c2, z[0] = madd2(m, 14886639130118979584, c2, t[1]) - c2, z[1] = madd2(m, 10956628289047010687, c2, t[2]) - c2, z[2] = madd2(m, 9513184293603517222, c2, t[3]) - c2, z[3] = madd2(m, 6038022134869067682, c2, t[4]) - z[5], z[4] = madd2(m, 283357621510263184, t[5], c2) - } - // if z > q → z -= q - // note: this is NOT constant time - if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { - var b uint64 - z[0], b = bits.Sub64(z[0], 11045256207009841153, 0) - z[1], b = bits.Sub64(z[1], 14886639130118979584, b) - z[2], b = bits.Sub64(z[2], 10956628289047010687, b) - z[3], b = bits.Sub64(z[3], 9513184293603517222, b) - z[4], b = bits.Sub64(z[4], 6038022134869067682, b) - z[5], _ = bits.Sub64(z[5], 283357621510263184, b) - } } func _fromMontGeneric(z *Element) { @@ -736,7 +648,7 @@ func _fromMontGeneric(z *Element) { z[5] = C } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { var b uint64 @@ -759,7 +671,7 @@ func _addGeneric(z, x, y *Element) { z[4], carry = bits.Add64(x[4], y[4], carry) z[5], _ = bits.Add64(x[5], y[5], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { var b uint64 @@ -782,7 +694,7 @@ func _doubleGeneric(z, x *Element) { z[4], carry = bits.Add64(x[4], x[4], carry) z[5], _ = bits.Add64(x[5], x[5], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { var b uint64 @@ -830,7 +742,7 @@ func _negGeneric(z, x *Element) { func _reduceGeneric(z *Element) { - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { var b uint64 @@ -929,18 +841,30 @@ func (z *Element) BitLen() int { return bits.Len64(z[0]) } -// Exp z = x^exponent mod q -func (z *Element) Exp(x Element, exponent *big.Int) *Element { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } @@ -1470,14 +1394,10 @@ func (z *Element) Inverse(x *Element) *Element { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *Element) inverseExp(x *Element) *Element { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) return z.Exp(*x, qMinusTwo) } @@ -1519,6 +1439,8 @@ func (z *Element) linearComb(x *Element, xC int64, y *Element, yC int64) { // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *Element) montReduceSigned(x *Element, xHi uint64) { + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 11045256207009841151 const signBitRemover = ^signBitSelector neg := xHi&signBitSelector != 0 @@ -1612,7 +1534,7 @@ func (z *Element) montReduceSigned(x *Element, xHi uint64) { z[5], z[4] = madd2(m, qElementWord5, t[i+5], C) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { var b uint64 @@ -1670,7 +1592,7 @@ func (z *Element) montReduceSignedSimpleButSlow(x *Element, xHi uint64) { z[4], c = bits.Add64(z[4], 0, c) z[5], _ = bits.Add64(z[5], 0, c) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { var b uint64 @@ -1721,6 +1643,89 @@ func (z *Element) mulWSigned(x *Element, y int64) { } } +func _mulWGeneric(z, x *Element, y uint64) { + + var t [6]uint64 + { + // round 0 + c1, c0 := bits.Mul64(y, x[0]) + m := c0 * 11045256207009841151 + c2 := madd0(m, 11045256207009841153, c0) + c1, c0 = madd1(y, x[1], c1) + c2, t[0] = madd2(m, 14886639130118979584, c2, c0) + c1, c0 = madd1(y, x[2], c1) + c2, t[1] = madd2(m, 10956628289047010687, c2, c0) + c1, c0 = madd1(y, x[3], c1) + c2, t[2] = madd2(m, 9513184293603517222, c2, c0) + c1, c0 = madd1(y, x[4], c1) + c2, t[3] = madd2(m, 6038022134869067682, c2, c0) + c1, c0 = madd1(y, x[5], c1) + t[5], t[4] = madd3(m, 283357621510263184, c0, c2, c1) + } + { + // round 1 + m := t[0] * 11045256207009841151 + c2 := madd0(m, 11045256207009841153, t[0]) + c2, t[0] = madd2(m, 14886639130118979584, c2, t[1]) + c2, t[1] = madd2(m, 10956628289047010687, c2, t[2]) + c2, t[2] = madd2(m, 9513184293603517222, c2, t[3]) + c2, t[3] = madd2(m, 6038022134869067682, c2, t[4]) + t[5], t[4] = madd2(m, 283357621510263184, t[5], c2) + } + { + // round 2 + m := t[0] * 11045256207009841151 + c2 := madd0(m, 11045256207009841153, t[0]) + c2, t[0] = madd2(m, 14886639130118979584, c2, t[1]) + c2, t[1] = madd2(m, 10956628289047010687, c2, t[2]) + c2, t[2] = madd2(m, 9513184293603517222, c2, t[3]) + c2, t[3] = madd2(m, 6038022134869067682, c2, t[4]) + t[5], t[4] = madd2(m, 283357621510263184, t[5], c2) + } + { + // round 3 + m := t[0] * 11045256207009841151 + c2 := madd0(m, 11045256207009841153, t[0]) + c2, t[0] = madd2(m, 14886639130118979584, c2, t[1]) + c2, t[1] = madd2(m, 10956628289047010687, c2, t[2]) + c2, t[2] = madd2(m, 9513184293603517222, c2, t[3]) + c2, t[3] = madd2(m, 6038022134869067682, c2, t[4]) + t[5], t[4] = madd2(m, 283357621510263184, t[5], c2) + } + { + // round 4 + m := t[0] * 11045256207009841151 + c2 := madd0(m, 11045256207009841153, t[0]) + c2, t[0] = madd2(m, 14886639130118979584, c2, t[1]) + c2, t[1] = madd2(m, 10956628289047010687, c2, t[2]) + c2, t[2] = madd2(m, 9513184293603517222, c2, t[3]) + c2, t[3] = madd2(m, 6038022134869067682, c2, t[4]) + t[5], t[4] = madd2(m, 283357621510263184, t[5], c2) + } + { + // round 5 + m := t[0] * 11045256207009841151 + c2 := madd0(m, 11045256207009841153, t[0]) + c2, z[0] = madd2(m, 14886639130118979584, c2, t[1]) + c2, z[1] = madd2(m, 10956628289047010687, c2, t[2]) + c2, z[2] = madd2(m, 9513184293603517222, c2, t[3]) + c2, z[3] = madd2(m, 6038022134869067682, c2, t[4]) + z[5], z[4] = madd2(m, 283357621510263184, t[5], c2) + } + + // if z >= q → z -= q + // note: this is NOT constant time + if !(z[5] < 283357621510263184 || (z[5] == 283357621510263184 && (z[4] < 6038022134869067682 || (z[4] == 6038022134869067682 && (z[3] < 9513184293603517222 || (z[3] == 9513184293603517222 && (z[2] < 10956628289047010687 || (z[2] == 10956628289047010687 && (z[1] < 14886639130118979584 || (z[1] == 14886639130118979584 && (z[0] < 11045256207009841153))))))))))) { + var b uint64 + z[0], b = bits.Sub64(z[0], 11045256207009841153, 0) + z[1], b = bits.Sub64(z[1], 14886639130118979584, b) + z[2], b = bits.Sub64(z[2], 10956628289047010687, b) + z[3], b = bits.Sub64(z[3], 9513184293603517222, b) + z[4], b = bits.Sub64(z[4], 6038022134869067682, b) + z[5], _ = bits.Sub64(z[5], 283357621510263184, b) + } +} + func (z *Element) neg(x *Element, xHi uint64) uint64 { var b uint64 diff --git a/ecc/bw6-756/fr/element_ops_noasm.go b/ecc/bw6-756/fr/element_ops_noasm.go index ec1fac18d6..7891ac9cd3 100644 --- a/ecc/bw6-756/fr/element_ops_noasm.go +++ b/ecc/bw6-756/fr/element_ops_noasm.go @@ -46,7 +46,6 @@ func MulBy13(x *Element) { func Butterfly(a, b *Element) { _butterflyGeneric(a, b) } - func mul(z, x, y *Element) { _mulGeneric(z, x, y) } diff --git a/ecc/bw6-756/fr/element_test.go b/ecc/bw6-756/fr/element_test.go index c3fa33f4c1..3c2c23d546 100644 --- a/ecc/bw6-756/fr/element_test.go +++ b/ecc/bw6-756/fr/element_test.go @@ -22,10 +22,12 @@ import ( "fmt" "math/big" "math/bits" + + "github.com/consensys/gnark-crypto/field" mrand "math/rand" + "testing" - "github.com/consensys/gnark-crypto/field" "github.com/leanovate/gopter" ggen "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" @@ -324,7 +326,6 @@ func init() { a[5]-- staticTestValues = append(staticTestValues, a) } - { a := qElement a[5]-- @@ -338,6 +339,12 @@ func init() { staticTestValues = append(staticTestValues, a) } + { + a := qElement + a[5] = 0 + staticTestValues = append(staticTestValues, a) + } + } func TestElementReduce(t *testing.T) { @@ -475,7 +482,6 @@ func TestElementInverseExp(t *testing.T) { properties = gopter.NewProperties(parameters) properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly if supportAdx { t.Log("disabling ADX") @@ -1422,8 +1428,8 @@ func TestElementSquare(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1501,8 +1507,8 @@ func TestElementInverse(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1580,8 +1586,8 @@ func TestElementSqrt(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1676,8 +1682,8 @@ func TestElementDouble(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1772,8 +1778,8 @@ func TestElementNeg(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -2108,6 +2114,134 @@ func TestElementSetInterface(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + func TestElementFromMont(t *testing.T) { t.Parallel() @@ -2314,8 +2448,6 @@ func genFull() gopter.Gen { } } -// Some utils - func (z *Element) matchVeryBigInt(aHi uint64, aInt *big.Int) error { var modulus big.Int var aIntMod big.Int diff --git a/ecc/bw6-756/fr/mimc/mimc.go b/ecc/bw6-756/fr/mimc/mimc.go index 26f6bb9e84..6b550b9271 100644 --- a/ecc/bw6-756/fr/mimc/mimc.go +++ b/ecc/bw6-756/fr/mimc/mimc.go @@ -64,7 +64,7 @@ func NewMiMC() hash.Hash { // Reset resets the Hash to its initial state. func (d *digest) Reset() { d.data = nil - d.h = fr.Element{0, 0, 0, 0} + d.h.SetZero() } // Sum appends the current hash to b and returns the resulting slice. diff --git a/ecc/bw6-761/fp/element.go b/ecc/bw6-761/fp/element.go index 1232e1315d..c6adeb08f2 100644 --- a/ecc/bw6-761/fp/element.go +++ b/ecc/bw6-761/fp/element.go @@ -91,9 +91,6 @@ var qElement = Element{ qElementWord11, } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = 744663313386281181 - // rSquare var rSquare = Element{ 14305184132582319705, @@ -117,7 +114,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("6891450384315732539396789682275657542479668912536150109513790160209623422243491736087683183289411687640864567753786613451161759120554247759349511699125301598951605099378508850372543631423596795951899700429969112842764913119068299", 10) + // base10: 6891450384315732539396789682275657542479668912536150109513790160209623422243491736087683183289411687640864567753786613451161759120554247759349511699125301598951605099378508850372543631423596795951899700429969112842764913119068299 + _modulus.SetString("122e824fb83ce0ad187c94004faff3eb926186a81d14688528275ef8087be41707ba638e584e91903cebaff25b423048689c8ed12f9fd9071dcd3dc73ebff2e98a116c25667a8f8160cf8aeeaf0a437e6913e6870000082f49d00000000008b", 16) } // NewElement returns a new Element from a uint64 value @@ -431,7 +429,7 @@ func (z *Element) SetRandom() (*Element, error) { z[11] = binary.BigEndian.Uint64(bytes[88:96]) z[11] %= 81882988782276106 - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[11] < 81882988782276106 || (z[11] == 81882988782276106 && (z[10] < 15098257552581525310 || (z[10] == 15098257552581525310 && (z[9] < 13341377791855249032 || (z[9] == 13341377791855249032 && (z[8] < 5945444129596489281 || (z[8] == 5945444129596489281 && (z[7] < 8105254717682411801 || (z[7] == 8105254717682411801 && (z[6] < 274362232328168196 || (z[6] == 274362232328168196 && (z[5] < 9694500593442880912 || (z[5] == 9694500593442880912 && (z[4] < 8204665564953313070 || (z[4] == 8204665564953313070 && (z[3] < 10998096788944562424 || (z[3] == 10998096788944562424 && (z[2] < 1588918198704579639 || (z[2] == 1588918198704579639 && (z[1] < 16614129118623039618 || (z[1] == 16614129118623039618 && (z[0] < 17626244516597989515))))))))))))))))))))))) { var b uint64 @@ -461,10 +459,10 @@ func One() Element { // Halve sets z to z / 2 (mod p) func (z *Element) Halve() { - if z[0]&1 == 1 { + var carry uint64 + if z[0]&1 == 1 { // z = z + q - var carry uint64 z[0], carry = bits.Add64(z[0], 17626244516597989515, 0) z[1], carry = bits.Add64(z[1], 16614129118623039618, carry) z[2], carry = bits.Add64(z[2], 1588918198704579639, carry) @@ -479,9 +477,7 @@ func (z *Element) Halve() { z[11], _ = bits.Add64(z[11], 81882988782276106, carry) } - // z = z >> 1 - z[0] = z[0]>>1 | z[1]<<63 z[1] = z[1]>>1 | z[2]<<63 z[2] = z[2]>>1 | z[3]<<63 @@ -497,8 +493,6 @@ func (z *Element) Halve() { } -// API with assembly impl - // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *Element) Mul(x, y *Element) *Element { @@ -918,7 +912,7 @@ func _mulGeneric(z, x, y *Element) { z[11], z[10] = madd3(m, 81882988782276106, c[0], c[2], c[1]) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[11] < 81882988782276106 || (z[11] == 81882988782276106 && (z[10] < 15098257552581525310 || (z[10] == 15098257552581525310 && (z[9] < 13341377791855249032 || (z[9] == 13341377791855249032 && (z[8] < 5945444129596489281 || (z[8] == 5945444129596489281 && (z[7] < 8105254717682411801 || (z[7] == 8105254717682411801 && (z[6] < 274362232328168196 || (z[6] == 274362232328168196 && (z[5] < 9694500593442880912 || (z[5] == 9694500593442880912 && (z[4] < 8204665564953313070 || (z[4] == 8204665564953313070 && (z[3] < 10998096788944562424 || (z[3] == 10998096788944562424 && (z[2] < 1588918198704579639 || (z[2] == 1588918198704579639 && (z[1] < 16614129118623039618 || (z[1] == 16614129118623039618 && (z[0] < 17626244516597989515))))))))))))))))))))))) { var b uint64 @@ -935,233 +929,7 @@ func _mulGeneric(z, x, y *Element) { z[10], b = bits.Sub64(z[10], 15098257552581525310, b) z[11], _ = bits.Sub64(z[11], 81882988782276106, b) } -} - -func _mulWGeneric(z, x *Element, y uint64) { - - var t [12]uint64 - { - // round 0 - c1, c0 := bits.Mul64(y, x[0]) - m := c0 * 744663313386281181 - c2 := madd0(m, 17626244516597989515, c0) - c1, c0 = madd1(y, x[1], c1) - c2, t[0] = madd2(m, 16614129118623039618, c2, c0) - c1, c0 = madd1(y, x[2], c1) - c2, t[1] = madd2(m, 1588918198704579639, c2, c0) - c1, c0 = madd1(y, x[3], c1) - c2, t[2] = madd2(m, 10998096788944562424, c2, c0) - c1, c0 = madd1(y, x[4], c1) - c2, t[3] = madd2(m, 8204665564953313070, c2, c0) - c1, c0 = madd1(y, x[5], c1) - c2, t[4] = madd2(m, 9694500593442880912, c2, c0) - c1, c0 = madd1(y, x[6], c1) - c2, t[5] = madd2(m, 274362232328168196, c2, c0) - c1, c0 = madd1(y, x[7], c1) - c2, t[6] = madd2(m, 8105254717682411801, c2, c0) - c1, c0 = madd1(y, x[8], c1) - c2, t[7] = madd2(m, 5945444129596489281, c2, c0) - c1, c0 = madd1(y, x[9], c1) - c2, t[8] = madd2(m, 13341377791855249032, c2, c0) - c1, c0 = madd1(y, x[10], c1) - c2, t[9] = madd2(m, 15098257552581525310, c2, c0) - c1, c0 = madd1(y, x[11], c1) - t[11], t[10] = madd3(m, 81882988782276106, c0, c2, c1) - } - { - // round 1 - m := t[0] * 744663313386281181 - c2 := madd0(m, 17626244516597989515, t[0]) - c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) - c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) - c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) - c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) - c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) - c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) - c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) - c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) - c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) - c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) - t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) - } - { - // round 2 - m := t[0] * 744663313386281181 - c2 := madd0(m, 17626244516597989515, t[0]) - c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) - c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) - c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) - c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) - c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) - c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) - c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) - c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) - c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) - c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) - t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) - } - { - // round 3 - m := t[0] * 744663313386281181 - c2 := madd0(m, 17626244516597989515, t[0]) - c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) - c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) - c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) - c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) - c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) - c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) - c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) - c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) - c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) - c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) - t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) - } - { - // round 4 - m := t[0] * 744663313386281181 - c2 := madd0(m, 17626244516597989515, t[0]) - c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) - c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) - c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) - c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) - c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) - c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) - c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) - c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) - c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) - c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) - t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) - } - { - // round 5 - m := t[0] * 744663313386281181 - c2 := madd0(m, 17626244516597989515, t[0]) - c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) - c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) - c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) - c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) - c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) - c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) - c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) - c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) - c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) - c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) - t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) - } - { - // round 6 - m := t[0] * 744663313386281181 - c2 := madd0(m, 17626244516597989515, t[0]) - c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) - c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) - c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) - c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) - c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) - c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) - c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) - c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) - c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) - c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) - t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) - } - { - // round 7 - m := t[0] * 744663313386281181 - c2 := madd0(m, 17626244516597989515, t[0]) - c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) - c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) - c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) - c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) - c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) - c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) - c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) - c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) - c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) - c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) - t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) - } - { - // round 8 - m := t[0] * 744663313386281181 - c2 := madd0(m, 17626244516597989515, t[0]) - c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) - c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) - c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) - c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) - c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) - c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) - c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) - c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) - c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) - c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) - t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) - } - { - // round 9 - m := t[0] * 744663313386281181 - c2 := madd0(m, 17626244516597989515, t[0]) - c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) - c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) - c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) - c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) - c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) - c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) - c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) - c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) - c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) - c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) - t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) - } - { - // round 10 - m := t[0] * 744663313386281181 - c2 := madd0(m, 17626244516597989515, t[0]) - c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) - c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) - c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) - c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) - c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) - c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) - c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) - c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) - c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) - c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) - t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) - } - { - // round 11 - m := t[0] * 744663313386281181 - c2 := madd0(m, 17626244516597989515, t[0]) - c2, z[0] = madd2(m, 16614129118623039618, c2, t[1]) - c2, z[1] = madd2(m, 1588918198704579639, c2, t[2]) - c2, z[2] = madd2(m, 10998096788944562424, c2, t[3]) - c2, z[3] = madd2(m, 8204665564953313070, c2, t[4]) - c2, z[4] = madd2(m, 9694500593442880912, c2, t[5]) - c2, z[5] = madd2(m, 274362232328168196, c2, t[6]) - c2, z[6] = madd2(m, 8105254717682411801, c2, t[7]) - c2, z[7] = madd2(m, 5945444129596489281, c2, t[8]) - c2, z[8] = madd2(m, 13341377791855249032, c2, t[9]) - c2, z[9] = madd2(m, 15098257552581525310, c2, t[10]) - z[11], z[10] = madd2(m, 81882988782276106, t[11], c2) - } - // if z > q → z -= q - // note: this is NOT constant time - if !(z[11] < 81882988782276106 || (z[11] == 81882988782276106 && (z[10] < 15098257552581525310 || (z[10] == 15098257552581525310 && (z[9] < 13341377791855249032 || (z[9] == 13341377791855249032 && (z[8] < 5945444129596489281 || (z[8] == 5945444129596489281 && (z[7] < 8105254717682411801 || (z[7] == 8105254717682411801 && (z[6] < 274362232328168196 || (z[6] == 274362232328168196 && (z[5] < 9694500593442880912 || (z[5] == 9694500593442880912 && (z[4] < 8204665564953313070 || (z[4] == 8204665564953313070 && (z[3] < 10998096788944562424 || (z[3] == 10998096788944562424 && (z[2] < 1588918198704579639 || (z[2] == 1588918198704579639 && (z[1] < 16614129118623039618 || (z[1] == 16614129118623039618 && (z[0] < 17626244516597989515))))))))))))))))))))))) { - var b uint64 - z[0], b = bits.Sub64(z[0], 17626244516597989515, 0) - z[1], b = bits.Sub64(z[1], 16614129118623039618, b) - z[2], b = bits.Sub64(z[2], 1588918198704579639, b) - z[3], b = bits.Sub64(z[3], 10998096788944562424, b) - z[4], b = bits.Sub64(z[4], 8204665564953313070, b) - z[5], b = bits.Sub64(z[5], 9694500593442880912, b) - z[6], b = bits.Sub64(z[6], 274362232328168196, b) - z[7], b = bits.Sub64(z[7], 8105254717682411801, b) - z[8], b = bits.Sub64(z[8], 5945444129596489281, b) - z[9], b = bits.Sub64(z[9], 13341377791855249032, b) - z[10], b = bits.Sub64(z[10], 15098257552581525310, b) - z[11], _ = bits.Sub64(z[11], 81882988782276106, b) - } } func _fromMontGeneric(z *Element) { @@ -1372,7 +1140,7 @@ func _fromMontGeneric(z *Element) { z[11] = C } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[11] < 81882988782276106 || (z[11] == 81882988782276106 && (z[10] < 15098257552581525310 || (z[10] == 15098257552581525310 && (z[9] < 13341377791855249032 || (z[9] == 13341377791855249032 && (z[8] < 5945444129596489281 || (z[8] == 5945444129596489281 && (z[7] < 8105254717682411801 || (z[7] == 8105254717682411801 && (z[6] < 274362232328168196 || (z[6] == 274362232328168196 && (z[5] < 9694500593442880912 || (z[5] == 9694500593442880912 && (z[4] < 8204665564953313070 || (z[4] == 8204665564953313070 && (z[3] < 10998096788944562424 || (z[3] == 10998096788944562424 && (z[2] < 1588918198704579639 || (z[2] == 1588918198704579639 && (z[1] < 16614129118623039618 || (z[1] == 16614129118623039618 && (z[0] < 17626244516597989515))))))))))))))))))))))) { var b uint64 @@ -1407,7 +1175,7 @@ func _addGeneric(z, x, y *Element) { z[10], carry = bits.Add64(x[10], y[10], carry) z[11], _ = bits.Add64(x[11], y[11], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[11] < 81882988782276106 || (z[11] == 81882988782276106 && (z[10] < 15098257552581525310 || (z[10] == 15098257552581525310 && (z[9] < 13341377791855249032 || (z[9] == 13341377791855249032 && (z[8] < 5945444129596489281 || (z[8] == 5945444129596489281 && (z[7] < 8105254717682411801 || (z[7] == 8105254717682411801 && (z[6] < 274362232328168196 || (z[6] == 274362232328168196 && (z[5] < 9694500593442880912 || (z[5] == 9694500593442880912 && (z[4] < 8204665564953313070 || (z[4] == 8204665564953313070 && (z[3] < 10998096788944562424 || (z[3] == 10998096788944562424 && (z[2] < 1588918198704579639 || (z[2] == 1588918198704579639 && (z[1] < 16614129118623039618 || (z[1] == 16614129118623039618 && (z[0] < 17626244516597989515))))))))))))))))))))))) { var b uint64 @@ -1442,7 +1210,7 @@ func _doubleGeneric(z, x *Element) { z[10], carry = bits.Add64(x[10], x[10], carry) z[11], _ = bits.Add64(x[11], x[11], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[11] < 81882988782276106 || (z[11] == 81882988782276106 && (z[10] < 15098257552581525310 || (z[10] == 15098257552581525310 && (z[9] < 13341377791855249032 || (z[9] == 13341377791855249032 && (z[8] < 5945444129596489281 || (z[8] == 5945444129596489281 && (z[7] < 8105254717682411801 || (z[7] == 8105254717682411801 && (z[6] < 274362232328168196 || (z[6] == 274362232328168196 && (z[5] < 9694500593442880912 || (z[5] == 9694500593442880912 && (z[4] < 8204665564953313070 || (z[4] == 8204665564953313070 && (z[3] < 10998096788944562424 || (z[3] == 10998096788944562424 && (z[2] < 1588918198704579639 || (z[2] == 1588918198704579639 && (z[1] < 16614129118623039618 || (z[1] == 16614129118623039618 && (z[0] < 17626244516597989515))))))))))))))))))))))) { var b uint64 @@ -1514,7 +1282,7 @@ func _negGeneric(z, x *Element) { func _reduceGeneric(z *Element) { - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[11] < 81882988782276106 || (z[11] == 81882988782276106 && (z[10] < 15098257552581525310 || (z[10] == 15098257552581525310 && (z[9] < 13341377791855249032 || (z[9] == 13341377791855249032 && (z[8] < 5945444129596489281 || (z[8] == 5945444129596489281 && (z[7] < 8105254717682411801 || (z[7] == 8105254717682411801 && (z[6] < 274362232328168196 || (z[6] == 274362232328168196 && (z[5] < 9694500593442880912 || (z[5] == 9694500593442880912 && (z[4] < 8204665564953313070 || (z[4] == 8204665564953313070 && (z[3] < 10998096788944562424 || (z[3] == 10998096788944562424 && (z[2] < 1588918198704579639 || (z[2] == 1588918198704579639 && (z[1] < 16614129118623039618 || (z[1] == 16614129118623039618 && (z[0] < 17626244516597989515))))))))))))))))))))))) { var b uint64 @@ -1637,18 +1405,30 @@ func (z *Element) BitLen() int { return bits.Len64(z[0]) } -// Exp z = x^exponent mod q -func (z *Element) Exp(x Element, exponent *big.Int) *Element { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } @@ -2166,14 +1946,10 @@ func (z *Element) Inverse(x *Element) *Element { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *Element) inverseExp(x *Element) *Element { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) return z.Exp(*x, qMinusTwo) } @@ -2215,6 +1991,8 @@ func (z *Element) linearComb(x *Element, xC int64, y *Element, yC int64) { // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *Element) montReduceSigned(x *Element, xHi uint64) { + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 744663313386281181 const signBitRemover = ^signBitSelector neg := xHi&signBitSelector != 0 @@ -2458,7 +2236,7 @@ func (z *Element) montReduceSigned(x *Element, xHi uint64) { z[11], z[10] = madd2(m, qElementWord11, t[i+11], C) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[11] < 81882988782276106 || (z[11] == 81882988782276106 && (z[10] < 15098257552581525310 || (z[10] == 15098257552581525310 && (z[9] < 13341377791855249032 || (z[9] == 13341377791855249032 && (z[8] < 5945444129596489281 || (z[8] == 5945444129596489281 && (z[7] < 8105254717682411801 || (z[7] == 8105254717682411801 && (z[6] < 274362232328168196 || (z[6] == 274362232328168196 && (z[5] < 9694500593442880912 || (z[5] == 9694500593442880912 && (z[4] < 8204665564953313070 || (z[4] == 8204665564953313070 && (z[3] < 10998096788944562424 || (z[3] == 10998096788944562424 && (z[2] < 1588918198704579639 || (z[2] == 1588918198704579639 && (z[1] < 16614129118623039618 || (z[1] == 16614129118623039618 && (z[0] < 17626244516597989515))))))))))))))))))))))) { var b uint64 @@ -2540,7 +2318,7 @@ func (z *Element) montReduceSignedSimpleButSlow(x *Element, xHi uint64) { z[10], c = bits.Add64(z[10], 0, c) z[11], _ = bits.Add64(z[11], 0, c) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[11] < 81882988782276106 || (z[11] == 81882988782276106 && (z[10] < 15098257552581525310 || (z[10] == 15098257552581525310 && (z[9] < 13341377791855249032 || (z[9] == 13341377791855249032 && (z[8] < 5945444129596489281 || (z[8] == 5945444129596489281 && (z[7] < 8105254717682411801 || (z[7] == 8105254717682411801 && (z[6] < 274362232328168196 || (z[6] == 274362232328168196 && (z[5] < 9694500593442880912 || (z[5] == 9694500593442880912 && (z[4] < 8204665564953313070 || (z[4] == 8204665564953313070 && (z[3] < 10998096788944562424 || (z[3] == 10998096788944562424 && (z[2] < 1588918198704579639 || (z[2] == 1588918198704579639 && (z[1] < 16614129118623039618 || (z[1] == 16614129118623039618 && (z[0] < 17626244516597989515))))))))))))))))))))))) { var b uint64 @@ -2609,6 +2387,233 @@ func (z *Element) mulWSigned(x *Element, y int64) { } } +func _mulWGeneric(z, x *Element, y uint64) { + + var t [12]uint64 + { + // round 0 + c1, c0 := bits.Mul64(y, x[0]) + m := c0 * 744663313386281181 + c2 := madd0(m, 17626244516597989515, c0) + c1, c0 = madd1(y, x[1], c1) + c2, t[0] = madd2(m, 16614129118623039618, c2, c0) + c1, c0 = madd1(y, x[2], c1) + c2, t[1] = madd2(m, 1588918198704579639, c2, c0) + c1, c0 = madd1(y, x[3], c1) + c2, t[2] = madd2(m, 10998096788944562424, c2, c0) + c1, c0 = madd1(y, x[4], c1) + c2, t[3] = madd2(m, 8204665564953313070, c2, c0) + c1, c0 = madd1(y, x[5], c1) + c2, t[4] = madd2(m, 9694500593442880912, c2, c0) + c1, c0 = madd1(y, x[6], c1) + c2, t[5] = madd2(m, 274362232328168196, c2, c0) + c1, c0 = madd1(y, x[7], c1) + c2, t[6] = madd2(m, 8105254717682411801, c2, c0) + c1, c0 = madd1(y, x[8], c1) + c2, t[7] = madd2(m, 5945444129596489281, c2, c0) + c1, c0 = madd1(y, x[9], c1) + c2, t[8] = madd2(m, 13341377791855249032, c2, c0) + c1, c0 = madd1(y, x[10], c1) + c2, t[9] = madd2(m, 15098257552581525310, c2, c0) + c1, c0 = madd1(y, x[11], c1) + t[11], t[10] = madd3(m, 81882988782276106, c0, c2, c1) + } + { + // round 1 + m := t[0] * 744663313386281181 + c2 := madd0(m, 17626244516597989515, t[0]) + c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) + c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) + c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) + c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) + c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) + c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) + c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) + c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) + c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) + c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) + t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) + } + { + // round 2 + m := t[0] * 744663313386281181 + c2 := madd0(m, 17626244516597989515, t[0]) + c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) + c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) + c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) + c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) + c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) + c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) + c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) + c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) + c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) + c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) + t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) + } + { + // round 3 + m := t[0] * 744663313386281181 + c2 := madd0(m, 17626244516597989515, t[0]) + c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) + c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) + c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) + c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) + c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) + c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) + c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) + c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) + c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) + c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) + t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) + } + { + // round 4 + m := t[0] * 744663313386281181 + c2 := madd0(m, 17626244516597989515, t[0]) + c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) + c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) + c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) + c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) + c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) + c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) + c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) + c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) + c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) + c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) + t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) + } + { + // round 5 + m := t[0] * 744663313386281181 + c2 := madd0(m, 17626244516597989515, t[0]) + c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) + c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) + c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) + c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) + c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) + c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) + c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) + c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) + c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) + c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) + t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) + } + { + // round 6 + m := t[0] * 744663313386281181 + c2 := madd0(m, 17626244516597989515, t[0]) + c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) + c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) + c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) + c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) + c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) + c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) + c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) + c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) + c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) + c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) + t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) + } + { + // round 7 + m := t[0] * 744663313386281181 + c2 := madd0(m, 17626244516597989515, t[0]) + c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) + c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) + c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) + c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) + c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) + c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) + c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) + c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) + c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) + c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) + t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) + } + { + // round 8 + m := t[0] * 744663313386281181 + c2 := madd0(m, 17626244516597989515, t[0]) + c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) + c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) + c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) + c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) + c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) + c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) + c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) + c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) + c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) + c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) + t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) + } + { + // round 9 + m := t[0] * 744663313386281181 + c2 := madd0(m, 17626244516597989515, t[0]) + c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) + c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) + c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) + c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) + c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) + c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) + c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) + c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) + c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) + c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) + t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) + } + { + // round 10 + m := t[0] * 744663313386281181 + c2 := madd0(m, 17626244516597989515, t[0]) + c2, t[0] = madd2(m, 16614129118623039618, c2, t[1]) + c2, t[1] = madd2(m, 1588918198704579639, c2, t[2]) + c2, t[2] = madd2(m, 10998096788944562424, c2, t[3]) + c2, t[3] = madd2(m, 8204665564953313070, c2, t[4]) + c2, t[4] = madd2(m, 9694500593442880912, c2, t[5]) + c2, t[5] = madd2(m, 274362232328168196, c2, t[6]) + c2, t[6] = madd2(m, 8105254717682411801, c2, t[7]) + c2, t[7] = madd2(m, 5945444129596489281, c2, t[8]) + c2, t[8] = madd2(m, 13341377791855249032, c2, t[9]) + c2, t[9] = madd2(m, 15098257552581525310, c2, t[10]) + t[11], t[10] = madd2(m, 81882988782276106, t[11], c2) + } + { + // round 11 + m := t[0] * 744663313386281181 + c2 := madd0(m, 17626244516597989515, t[0]) + c2, z[0] = madd2(m, 16614129118623039618, c2, t[1]) + c2, z[1] = madd2(m, 1588918198704579639, c2, t[2]) + c2, z[2] = madd2(m, 10998096788944562424, c2, t[3]) + c2, z[3] = madd2(m, 8204665564953313070, c2, t[4]) + c2, z[4] = madd2(m, 9694500593442880912, c2, t[5]) + c2, z[5] = madd2(m, 274362232328168196, c2, t[6]) + c2, z[6] = madd2(m, 8105254717682411801, c2, t[7]) + c2, z[7] = madd2(m, 5945444129596489281, c2, t[8]) + c2, z[8] = madd2(m, 13341377791855249032, c2, t[9]) + c2, z[9] = madd2(m, 15098257552581525310, c2, t[10]) + z[11], z[10] = madd2(m, 81882988782276106, t[11], c2) + } + + // if z >= q → z -= q + // note: this is NOT constant time + if !(z[11] < 81882988782276106 || (z[11] == 81882988782276106 && (z[10] < 15098257552581525310 || (z[10] == 15098257552581525310 && (z[9] < 13341377791855249032 || (z[9] == 13341377791855249032 && (z[8] < 5945444129596489281 || (z[8] == 5945444129596489281 && (z[7] < 8105254717682411801 || (z[7] == 8105254717682411801 && (z[6] < 274362232328168196 || (z[6] == 274362232328168196 && (z[5] < 9694500593442880912 || (z[5] == 9694500593442880912 && (z[4] < 8204665564953313070 || (z[4] == 8204665564953313070 && (z[3] < 10998096788944562424 || (z[3] == 10998096788944562424 && (z[2] < 1588918198704579639 || (z[2] == 1588918198704579639 && (z[1] < 16614129118623039618 || (z[1] == 16614129118623039618 && (z[0] < 17626244516597989515))))))))))))))))))))))) { + var b uint64 + z[0], b = bits.Sub64(z[0], 17626244516597989515, 0) + z[1], b = bits.Sub64(z[1], 16614129118623039618, b) + z[2], b = bits.Sub64(z[2], 1588918198704579639, b) + z[3], b = bits.Sub64(z[3], 10998096788944562424, b) + z[4], b = bits.Sub64(z[4], 8204665564953313070, b) + z[5], b = bits.Sub64(z[5], 9694500593442880912, b) + z[6], b = bits.Sub64(z[6], 274362232328168196, b) + z[7], b = bits.Sub64(z[7], 8105254717682411801, b) + z[8], b = bits.Sub64(z[8], 5945444129596489281, b) + z[9], b = bits.Sub64(z[9], 13341377791855249032, b) + z[10], b = bits.Sub64(z[10], 15098257552581525310, b) + z[11], _ = bits.Sub64(z[11], 81882988782276106, b) + } +} + func (z *Element) neg(x *Element, xHi uint64) uint64 { var b uint64 diff --git a/ecc/bw6-761/fp/element_ops_noasm.go b/ecc/bw6-761/fp/element_ops_noasm.go index fec6289183..7d02396a3f 100644 --- a/ecc/bw6-761/fp/element_ops_noasm.go +++ b/ecc/bw6-761/fp/element_ops_noasm.go @@ -46,7 +46,6 @@ func MulBy13(x *Element) { func Butterfly(a, b *Element) { _butterflyGeneric(a, b) } - func mul(z, x, y *Element) { _mulGeneric(z, x, y) } diff --git a/ecc/bw6-761/fp/element_test.go b/ecc/bw6-761/fp/element_test.go index 548569a653..4314224e94 100644 --- a/ecc/bw6-761/fp/element_test.go +++ b/ecc/bw6-761/fp/element_test.go @@ -22,10 +22,12 @@ import ( "fmt" "math/big" "math/bits" + + "github.com/consensys/gnark-crypto/field" mrand "math/rand" + "testing" - "github.com/consensys/gnark-crypto/field" "github.com/leanovate/gopter" ggen "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" @@ -336,7 +338,6 @@ func init() { a[11]-- staticTestValues = append(staticTestValues, a) } - { a := qElement a[11]-- @@ -350,6 +351,12 @@ func init() { staticTestValues = append(staticTestValues, a) } + { + a := qElement + a[11] = 0 + staticTestValues = append(staticTestValues, a) + } + } func TestElementReduce(t *testing.T) { @@ -487,7 +494,6 @@ func TestElementInverseExp(t *testing.T) { properties = gopter.NewProperties(parameters) properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly if supportAdx { t.Log("disabling ADX") @@ -1434,8 +1440,8 @@ func TestElementSquare(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1513,8 +1519,8 @@ func TestElementInverse(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1592,8 +1598,8 @@ func TestElementSqrt(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1688,8 +1694,8 @@ func TestElementDouble(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1784,8 +1790,8 @@ func TestElementNeg(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -2120,6 +2126,134 @@ func TestElementSetInterface(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + func TestElementFromMont(t *testing.T) { t.Parallel() @@ -2398,8 +2532,6 @@ func genFull() gopter.Gen { } } -// Some utils - func (z *Element) matchVeryBigInt(aHi uint64, aInt *big.Int) error { var modulus big.Int var aIntMod big.Int diff --git a/ecc/bw6-761/fr/element.go b/ecc/bw6-761/fr/element.go index 067ba73d91..9f18c0210a 100644 --- a/ecc/bw6-761/fr/element.go +++ b/ecc/bw6-761/fr/element.go @@ -79,9 +79,6 @@ var qElement = Element{ qElementWord5, } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = 9586122913090633727 - // rSquare var rSquare = Element{ 13224372171368877346, @@ -99,7 +96,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177", 10) + // base10: 258664426012969094010652733694893533536393512754914660539884262666720468348340822774968888139573360124440321458177 + _modulus.SetString("1ae3a4617c510eac63b05c06ca1493b1a22d9f300f5138f1ef3622fba094800170b5d44300000008508c00000000001", 16) } // NewElement returns a new Element from a uint64 value @@ -353,7 +351,7 @@ func (z *Element) SetRandom() (*Element, error) { z[5] = binary.BigEndian.Uint64(bytes[40:48]) z[5] %= 121098312706494698 - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { var b uint64 @@ -377,10 +375,10 @@ func One() Element { // Halve sets z to z / 2 (mod p) func (z *Element) Halve() { - if z[0]&1 == 1 { + var carry uint64 + if z[0]&1 == 1 { // z = z + q - var carry uint64 z[0], carry = bits.Add64(z[0], 9586122913090633729, 0) z[1], carry = bits.Add64(z[1], 1660523435060625408, carry) z[2], carry = bits.Add64(z[2], 2230234197602682880, carry) @@ -389,9 +387,7 @@ func (z *Element) Halve() { z[5], _ = bits.Add64(z[5], 121098312706494698, carry) } - // z = z >> 1 - z[0] = z[0]>>1 | z[1]<<63 z[1] = z[1]>>1 | z[2]<<63 z[2] = z[2]>>1 | z[3]<<63 @@ -401,8 +397,6 @@ func (z *Element) Halve() { } -// API with assembly impl - // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *Element) Mul(x, y *Element) *Element { @@ -570,7 +564,7 @@ func _mulGeneric(z, x, y *Element) { z[5], z[4] = madd3(m, 121098312706494698, c[0], c[2], c[1]) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { var b uint64 @@ -581,89 +575,7 @@ func _mulGeneric(z, x, y *Element) { z[4], b = bits.Sub64(z[4], 14284016967150029115, b) z[5], _ = bits.Sub64(z[5], 121098312706494698, b) } -} - -func _mulWGeneric(z, x *Element, y uint64) { - - var t [6]uint64 - { - // round 0 - c1, c0 := bits.Mul64(y, x[0]) - m := c0 * 9586122913090633727 - c2 := madd0(m, 9586122913090633729, c0) - c1, c0 = madd1(y, x[1], c1) - c2, t[0] = madd2(m, 1660523435060625408, c2, c0) - c1, c0 = madd1(y, x[2], c1) - c2, t[1] = madd2(m, 2230234197602682880, c2, c0) - c1, c0 = madd1(y, x[3], c1) - c2, t[2] = madd2(m, 1883307231910630287, c2, c0) - c1, c0 = madd1(y, x[4], c1) - c2, t[3] = madd2(m, 14284016967150029115, c2, c0) - c1, c0 = madd1(y, x[5], c1) - t[5], t[4] = madd3(m, 121098312706494698, c0, c2, c1) - } - { - // round 1 - m := t[0] * 9586122913090633727 - c2 := madd0(m, 9586122913090633729, t[0]) - c2, t[0] = madd2(m, 1660523435060625408, c2, t[1]) - c2, t[1] = madd2(m, 2230234197602682880, c2, t[2]) - c2, t[2] = madd2(m, 1883307231910630287, c2, t[3]) - c2, t[3] = madd2(m, 14284016967150029115, c2, t[4]) - t[5], t[4] = madd2(m, 121098312706494698, t[5], c2) - } - { - // round 2 - m := t[0] * 9586122913090633727 - c2 := madd0(m, 9586122913090633729, t[0]) - c2, t[0] = madd2(m, 1660523435060625408, c2, t[1]) - c2, t[1] = madd2(m, 2230234197602682880, c2, t[2]) - c2, t[2] = madd2(m, 1883307231910630287, c2, t[3]) - c2, t[3] = madd2(m, 14284016967150029115, c2, t[4]) - t[5], t[4] = madd2(m, 121098312706494698, t[5], c2) - } - { - // round 3 - m := t[0] * 9586122913090633727 - c2 := madd0(m, 9586122913090633729, t[0]) - c2, t[0] = madd2(m, 1660523435060625408, c2, t[1]) - c2, t[1] = madd2(m, 2230234197602682880, c2, t[2]) - c2, t[2] = madd2(m, 1883307231910630287, c2, t[3]) - c2, t[3] = madd2(m, 14284016967150029115, c2, t[4]) - t[5], t[4] = madd2(m, 121098312706494698, t[5], c2) - } - { - // round 4 - m := t[0] * 9586122913090633727 - c2 := madd0(m, 9586122913090633729, t[0]) - c2, t[0] = madd2(m, 1660523435060625408, c2, t[1]) - c2, t[1] = madd2(m, 2230234197602682880, c2, t[2]) - c2, t[2] = madd2(m, 1883307231910630287, c2, t[3]) - c2, t[3] = madd2(m, 14284016967150029115, c2, t[4]) - t[5], t[4] = madd2(m, 121098312706494698, t[5], c2) - } - { - // round 5 - m := t[0] * 9586122913090633727 - c2 := madd0(m, 9586122913090633729, t[0]) - c2, z[0] = madd2(m, 1660523435060625408, c2, t[1]) - c2, z[1] = madd2(m, 2230234197602682880, c2, t[2]) - c2, z[2] = madd2(m, 1883307231910630287, c2, t[3]) - c2, z[3] = madd2(m, 14284016967150029115, c2, t[4]) - z[5], z[4] = madd2(m, 121098312706494698, t[5], c2) - } - // if z > q → z -= q - // note: this is NOT constant time - if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { - var b uint64 - z[0], b = bits.Sub64(z[0], 9586122913090633729, 0) - z[1], b = bits.Sub64(z[1], 1660523435060625408, b) - z[2], b = bits.Sub64(z[2], 2230234197602682880, b) - z[3], b = bits.Sub64(z[3], 1883307231910630287, b) - z[4], b = bits.Sub64(z[4], 14284016967150029115, b) - z[5], _ = bits.Sub64(z[5], 121098312706494698, b) - } } func _fromMontGeneric(z *Element) { @@ -736,7 +648,7 @@ func _fromMontGeneric(z *Element) { z[5] = C } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { var b uint64 @@ -759,7 +671,7 @@ func _addGeneric(z, x, y *Element) { z[4], carry = bits.Add64(x[4], y[4], carry) z[5], _ = bits.Add64(x[5], y[5], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { var b uint64 @@ -782,7 +694,7 @@ func _doubleGeneric(z, x *Element) { z[4], carry = bits.Add64(x[4], x[4], carry) z[5], _ = bits.Add64(x[5], x[5], carry) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { var b uint64 @@ -830,7 +742,7 @@ func _negGeneric(z, x *Element) { func _reduceGeneric(z *Element) { - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { var b uint64 @@ -929,18 +841,30 @@ func (z *Element) BitLen() int { return bits.Len64(z[0]) } -// Exp z = x^exponent mod q -func (z *Element) Exp(x Element, exponent *big.Int) *Element { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } @@ -1470,14 +1394,10 @@ func (z *Element) Inverse(x *Element) *Element { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *Element) inverseExp(x *Element) *Element { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) return z.Exp(*x, qMinusTwo) } @@ -1519,6 +1439,8 @@ func (z *Element) linearComb(x *Element, xC int64, y *Element, yC int64) { // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *Element) montReduceSigned(x *Element, xHi uint64) { + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 9586122913090633727 const signBitRemover = ^signBitSelector neg := xHi&signBitSelector != 0 @@ -1612,7 +1534,7 @@ func (z *Element) montReduceSigned(x *Element, xHi uint64) { z[5], z[4] = madd2(m, qElementWord5, t[i+5], C) } - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { var b uint64 @@ -1670,7 +1592,7 @@ func (z *Element) montReduceSignedSimpleButSlow(x *Element, xHi uint64) { z[4], c = bits.Add64(z[4], 0, c) z[5], _ = bits.Add64(z[5], 0, c) - // if z > q → z -= q + // if z >= q → z -= q // note: this is NOT constant time if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { var b uint64 @@ -1721,6 +1643,89 @@ func (z *Element) mulWSigned(x *Element, y int64) { } } +func _mulWGeneric(z, x *Element, y uint64) { + + var t [6]uint64 + { + // round 0 + c1, c0 := bits.Mul64(y, x[0]) + m := c0 * 9586122913090633727 + c2 := madd0(m, 9586122913090633729, c0) + c1, c0 = madd1(y, x[1], c1) + c2, t[0] = madd2(m, 1660523435060625408, c2, c0) + c1, c0 = madd1(y, x[2], c1) + c2, t[1] = madd2(m, 2230234197602682880, c2, c0) + c1, c0 = madd1(y, x[3], c1) + c2, t[2] = madd2(m, 1883307231910630287, c2, c0) + c1, c0 = madd1(y, x[4], c1) + c2, t[3] = madd2(m, 14284016967150029115, c2, c0) + c1, c0 = madd1(y, x[5], c1) + t[5], t[4] = madd3(m, 121098312706494698, c0, c2, c1) + } + { + // round 1 + m := t[0] * 9586122913090633727 + c2 := madd0(m, 9586122913090633729, t[0]) + c2, t[0] = madd2(m, 1660523435060625408, c2, t[1]) + c2, t[1] = madd2(m, 2230234197602682880, c2, t[2]) + c2, t[2] = madd2(m, 1883307231910630287, c2, t[3]) + c2, t[3] = madd2(m, 14284016967150029115, c2, t[4]) + t[5], t[4] = madd2(m, 121098312706494698, t[5], c2) + } + { + // round 2 + m := t[0] * 9586122913090633727 + c2 := madd0(m, 9586122913090633729, t[0]) + c2, t[0] = madd2(m, 1660523435060625408, c2, t[1]) + c2, t[1] = madd2(m, 2230234197602682880, c2, t[2]) + c2, t[2] = madd2(m, 1883307231910630287, c2, t[3]) + c2, t[3] = madd2(m, 14284016967150029115, c2, t[4]) + t[5], t[4] = madd2(m, 121098312706494698, t[5], c2) + } + { + // round 3 + m := t[0] * 9586122913090633727 + c2 := madd0(m, 9586122913090633729, t[0]) + c2, t[0] = madd2(m, 1660523435060625408, c2, t[1]) + c2, t[1] = madd2(m, 2230234197602682880, c2, t[2]) + c2, t[2] = madd2(m, 1883307231910630287, c2, t[3]) + c2, t[3] = madd2(m, 14284016967150029115, c2, t[4]) + t[5], t[4] = madd2(m, 121098312706494698, t[5], c2) + } + { + // round 4 + m := t[0] * 9586122913090633727 + c2 := madd0(m, 9586122913090633729, t[0]) + c2, t[0] = madd2(m, 1660523435060625408, c2, t[1]) + c2, t[1] = madd2(m, 2230234197602682880, c2, t[2]) + c2, t[2] = madd2(m, 1883307231910630287, c2, t[3]) + c2, t[3] = madd2(m, 14284016967150029115, c2, t[4]) + t[5], t[4] = madd2(m, 121098312706494698, t[5], c2) + } + { + // round 5 + m := t[0] * 9586122913090633727 + c2 := madd0(m, 9586122913090633729, t[0]) + c2, z[0] = madd2(m, 1660523435060625408, c2, t[1]) + c2, z[1] = madd2(m, 2230234197602682880, c2, t[2]) + c2, z[2] = madd2(m, 1883307231910630287, c2, t[3]) + c2, z[3] = madd2(m, 14284016967150029115, c2, t[4]) + z[5], z[4] = madd2(m, 121098312706494698, t[5], c2) + } + + // if z >= q → z -= q + // note: this is NOT constant time + if !(z[5] < 121098312706494698 || (z[5] == 121098312706494698 && (z[4] < 14284016967150029115 || (z[4] == 14284016967150029115 && (z[3] < 1883307231910630287 || (z[3] == 1883307231910630287 && (z[2] < 2230234197602682880 || (z[2] == 2230234197602682880 && (z[1] < 1660523435060625408 || (z[1] == 1660523435060625408 && (z[0] < 9586122913090633729))))))))))) { + var b uint64 + z[0], b = bits.Sub64(z[0], 9586122913090633729, 0) + z[1], b = bits.Sub64(z[1], 1660523435060625408, b) + z[2], b = bits.Sub64(z[2], 2230234197602682880, b) + z[3], b = bits.Sub64(z[3], 1883307231910630287, b) + z[4], b = bits.Sub64(z[4], 14284016967150029115, b) + z[5], _ = bits.Sub64(z[5], 121098312706494698, b) + } +} + func (z *Element) neg(x *Element, xHi uint64) uint64 { var b uint64 diff --git a/ecc/bw6-761/fr/element_ops_noasm.go b/ecc/bw6-761/fr/element_ops_noasm.go index ec1fac18d6..7891ac9cd3 100644 --- a/ecc/bw6-761/fr/element_ops_noasm.go +++ b/ecc/bw6-761/fr/element_ops_noasm.go @@ -46,7 +46,6 @@ func MulBy13(x *Element) { func Butterfly(a, b *Element) { _butterflyGeneric(a, b) } - func mul(z, x, y *Element) { _mulGeneric(z, x, y) } diff --git a/ecc/bw6-761/fr/element_test.go b/ecc/bw6-761/fr/element_test.go index c396171eb8..cd4a72e3b7 100644 --- a/ecc/bw6-761/fr/element_test.go +++ b/ecc/bw6-761/fr/element_test.go @@ -22,10 +22,12 @@ import ( "fmt" "math/big" "math/bits" + + "github.com/consensys/gnark-crypto/field" mrand "math/rand" + "testing" - "github.com/consensys/gnark-crypto/field" "github.com/leanovate/gopter" ggen "github.com/leanovate/gopter/gen" "github.com/leanovate/gopter/prop" @@ -324,7 +326,6 @@ func init() { a[5]-- staticTestValues = append(staticTestValues, a) } - { a := qElement a[5]-- @@ -338,6 +339,12 @@ func init() { staticTestValues = append(staticTestValues, a) } + { + a := qElement + a[5] = 0 + staticTestValues = append(staticTestValues, a) + } + } func TestElementReduce(t *testing.T) { @@ -475,7 +482,6 @@ func TestElementInverseExp(t *testing.T) { properties = gopter.NewProperties(parameters) properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly if supportAdx { t.Log("disabling ADX") @@ -1422,8 +1428,8 @@ func TestElementSquare(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1501,8 +1507,8 @@ func TestElementInverse(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1580,8 +1586,8 @@ func TestElementSqrt(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1676,8 +1682,8 @@ func TestElementDouble(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -1772,8 +1778,8 @@ func TestElementNeg(t *testing.T) { specialValueTest() // if we have ADX instruction enabled, test both path in assembly if supportAdx { - supportAdx = false t.Log("disabling ADX") + supportAdx = false properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() supportAdx = true @@ -2108,6 +2114,134 @@ func TestElementSetInterface(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) } +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + func TestElementFromMont(t *testing.T) { t.Parallel() @@ -2314,8 +2448,6 @@ func genFull() gopter.Gen { } } -// Some utils - func (z *Element) matchVeryBigInt(aHi uint64, aInt *big.Int) error { var modulus big.Int var aIntMod big.Int diff --git a/ecc/bw6-761/fr/mimc/mimc.go b/ecc/bw6-761/fr/mimc/mimc.go index 5c8bc52e7b..1862c70edd 100644 --- a/ecc/bw6-761/fr/mimc/mimc.go +++ b/ecc/bw6-761/fr/mimc/mimc.go @@ -64,7 +64,7 @@ func NewMiMC() hash.Hash { // Reset resets the Hash to its initial state. func (d *digest) Reset() { d.data = nil - d.h = fr.Element{0, 0, 0, 0} + d.h.SetZero() } // Sum appends the current hash to b and returns the resulting slice. diff --git a/ecc/ecc.go b/ecc/ecc.go index 522ad0c259..6b57127696 100644 --- a/ecc/ecc.go +++ b/ecc/ecc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package ecc provides bls12-381, bls12-377, bls12-378, bn254, bw6-761, bls24-315, bw6-633, bls12-378 and bw6-756 elliptic curves implementation (+pairing). +// Package ecc provides bls12-381, bls12-377, bls12-378, bn254, bls24-315, bls12-39, bw6-761, bw6-633 and bw6-756 elliptic curves implementation (+pairing). // // Also // @@ -39,6 +39,7 @@ type ID uint16 const ( UNKNOWN ID = iota BN254 + BLS12_39 BLS12_377 BLS12_378 BLS12_381 @@ -55,6 +56,8 @@ func Implemented() []ID { func (id ID) String() string { switch id { + case BLS12_39: + return "bls12_39" case BLS12_377: return "bls12_377" case BLS12_378: @@ -81,6 +84,8 @@ func (id ID) Info() Info { // note to avoid circular dependency these are hard coded // values are checked for non regression in code generation switch id { + case BLS12_39: + return newInfo(&config.BLS12_39) case BLS12_377: return newInfo(&config.BLS12_377) case BLS12_378: diff --git a/field/field.go b/field/field.go index e81d2da1f6..dbf4bf387e 100644 --- a/field/field.go +++ b/field/field.go @@ -20,7 +20,6 @@ import ( "fmt" "math/big" "math/bits" - "unicode" "github.com/consensys/gnark-crypto/field/internal/addchain" ) @@ -75,12 +74,7 @@ type Field struct { func NewField(packageName, elementName, modulus string, useAddChain bool) (*Field, error) { // parse modulus var bModulus big.Int - base := 10 - if modulus[0] == '0' && (unicode.ToUpper(rune(modulus[1])) == 'X') { - base = 16 - modulus = modulus[2:] - } - if _, ok := bModulus.SetString(modulus, base); !ok { + if _, ok := bModulus.SetString(modulus, 0); !ok { return nil, errParseModulus } @@ -88,7 +82,7 @@ func NewField(packageName, elementName, modulus string, useAddChain bool) (*Fiel F := &Field{ PackageName: packageName, ElementName: elementName, - Modulus: modulus, + Modulus: bModulus.Text(10), ModulusHex: bModulus.Text(16), ModulusBig: new(big.Int).Set(&bModulus), UseAddChain: useAddChain, @@ -245,7 +239,7 @@ func NewField(packageName, elementName, modulus string, useAddChain bool) (*Fiel // note: to simplify output files generated, we generated ASM code only for // moduli that meet the condition F.NoCarry // asm code generation for moduli with more than 6 words can be optimized further - F.ASM = F.NoCarry && F.NbWords <= 12 + F.ASM = F.NoCarry && F.NbWords <= 12 && F.NbWords > 1 return F, nil } diff --git a/field/generator/generator.go b/field/generator/generator.go index 32f7d45818..fc6e5a81a2 100644 --- a/field/generator/generator.go +++ b/field/generator/generator.go @@ -60,10 +60,17 @@ func GenerateFF(F *field.Field, outputDir string) error { // remove old format generated files oldFiles := []string{"_mul.go", "_mul_amd64.go", - "_square.go", "_square_amd64.go", "_ops_decl.go", "_square_amd64.s", "_ops_amd64.go"} + "_square.go", "_square_amd64.go", "_ops_decl.go", "_square_amd64.s", + "_mul_amd64.s", + "_ops_amd64.s", + "_mul_adx_amd64.s", + "_ops_amd64.go"} + for _, of := range oldFiles { _ = os.Remove(filepath.Join(outputDir, eName+of)) } + _ = os.Remove(filepath.Join(outputDir, "asm.go")) + _ = os.Remove(filepath.Join(outputDir, "asm_noadx.go")) funcs := template.FuncMap{} if F.UseAddChain { @@ -192,7 +199,7 @@ func GenerateFF(F *field.Field, outputDir string) error { } - { + if F.ASM { // generate ops_amd64.go src := []string{ element.OpsAMD64, @@ -233,7 +240,7 @@ func GenerateFF(F *field.Field, outputDir string) error { } } - { + if F.ASM { // generate asm.go and asm_noadx.go src := []string{ element.Asm, @@ -246,7 +253,7 @@ func GenerateFF(F *field.Field, outputDir string) error { return err } } - { + if F.ASM { // generate asm.go and asm_noadx.go src := []string{ element.AsmNoAdx, diff --git a/field/goldilocks/arith.go b/field/goldilocks/arith.go new file mode 100644 index 0000000000..ec9b1faf50 --- /dev/null +++ b/field/goldilocks/arith.go @@ -0,0 +1,30 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package goldilocks + +import ( + "math/bits" +) + +// madd0 hi = a*b + c (discards lo bits) +func madd0(a, b, c uint64) (hi uint64) { + var carry, lo uint64 + hi, lo = bits.Mul64(a, b) + _, carry = bits.Add64(lo, c, 0) + hi, _ = bits.Add64(hi, 0, carry) + return +} diff --git a/field/goldilocks/doc.go b/field/goldilocks/doc.go new file mode 100644 index 0000000000..63f0a0e62a --- /dev/null +++ b/field/goldilocks/doc.go @@ -0,0 +1,43 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +// Package goldilocks contains field arithmetic operations for modulus = 0xffffff...000001. +// +// The API is similar to math/big (big.Int), but the operations are significantly faster (up to 20x for the modular multiplication on amd64, see also https://hackmd.io/@gnark/modular_multiplication) +// +// The modulus is hardcoded in all the operations. +// +// Field elements are represented as an array, and assumed to be in Montgomery form in all methods: +// type Element [1]uint64 +// +// Example API signature +// // Mul z = x * y mod q +// func (z *Element) Mul(x, y *Element) *Element +// +// and can be used like so: +// var a, b Element +// a.SetUint64(2) +// b.SetString("984896738") +// a.Mul(a, b) +// a.Sub(a, a) +// .Add(a, b) +// .Inv(a) +// b.Exp(b, new(big.Int).SetUint64(42)) +// +// Modulus +// 0xffffffff00000001 // base 16 +// 18446744069414584321 // base 10 +package goldilocks diff --git a/field/goldilocks/element.go b/field/goldilocks/element.go new file mode 100644 index 0000000000..669f685218 --- /dev/null +++ b/field/goldilocks/element.go @@ -0,0 +1,972 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package goldilocks + +// /!\ WARNING /!\ +// this code has not been audited and is provided as-is. In particular, +// there is no security guarantees such as constant time implementation +// or side-channel attack resistance +// /!\ WARNING /!\ + +import ( + "crypto/rand" + "encoding/binary" + "errors" + "io" + "math/big" + "math/bits" + "reflect" + "strconv" + "strings" + "sync" +) + +// Element represents a field element stored on 1 words (uint64) +// Element are assumed to be in Montgomery form in all methods +// field modulus q = +// +// 18446744069414584321 +type Element [1]uint64 + +// Limbs number of 64 bits words needed to represent Element +const Limbs = 1 + +// Bits number bits needed to represent Element +const Bits = 64 + +// Bytes number bytes needed to represent Element +const Bytes = Limbs * 8 + +// field modulus stored as big.Int +var _modulus big.Int + +// Modulus returns q as a big.Int +// q = +// +// 18446744069414584321 +func Modulus() *big.Int { + return new(big.Int).Set(&_modulus) +} + +// q (modulus) +const qElementWord0 uint64 = 18446744069414584321 +const q uint64 = qElementWord0 + +var qElement = Element{ + qElementWord0, +} + +// rSquare +var rSquare = Element{ + 18446744065119617025, +} + +var bigIntPool = sync.Pool{ + New: func() interface{} { + return new(big.Int) + }, +} + +func init() { + // base10: 18446744069414584321 + _modulus.SetString("ffffffff00000001", 16) +} + +// NewElement returns a new Element from a uint64 value +// +// it is equivalent to +// var v NewElement +// v.SetUint64(...) +func NewElement(v uint64) Element { + z := Element{v} + z.Mul(&z, &rSquare) + return z +} + +// SetUint64 sets z to v and returns z +func (z *Element) SetUint64(v uint64) *Element { + // sets z LSB to v (non-Montgomery form) and convert z to Montgomery form + *z = Element{v} + return z.Mul(z, &rSquare) // z.ToMont() +} + +// SetInt64 sets z to v and returns z +func (z *Element) SetInt64(v int64) *Element { + + // absolute value of v + m := v >> 63 + z.SetUint64(uint64((v ^ m) - m)) + + if m != 0 { + // v is negative + z.Neg(z) + } + + return z +} + +// Set z = x +func (z *Element) Set(x *Element) *Element { + z[0] = x[0] + return z +} + +// SetInterface converts provided interface into Element +// returns an error if provided type is not supported +// supported types: Element, *Element, uint64, int, string (interpreted as base10 integer), +// *big.Int, big.Int, []byte +func (z *Element) SetInterface(i1 interface{}) (*Element, error) { + switch c1 := i1.(type) { + case Element: + return z.Set(&c1), nil + case *Element: + if c1 == nil { + return nil, errors.New("can't set goldilocks.Element with ") + } + return z.Set(c1), nil + case uint8: + return z.SetUint64(uint64(c1)), nil + case uint16: + return z.SetUint64(uint64(c1)), nil + case uint32: + return z.SetUint64(uint64(c1)), nil + case uint: + return z.SetUint64(uint64(c1)), nil + case uint64: + return z.SetUint64(c1), nil + case int8: + return z.SetInt64(int64(c1)), nil + case int16: + return z.SetInt64(int64(c1)), nil + case int32: + return z.SetInt64(int64(c1)), nil + case int64: + return z.SetInt64(c1), nil + case int: + return z.SetInt64(int64(c1)), nil + case string: + return z.SetString(c1), nil + case *big.Int: + if c1 == nil { + return nil, errors.New("can't set goldilocks.Element with ") + } + return z.SetBigInt(c1), nil + case big.Int: + return z.SetBigInt(&c1), nil + case []byte: + return z.SetBytes(c1), nil + default: + return nil, errors.New("can't set goldilocks.Element from type " + reflect.TypeOf(i1).String()) + } +} + +// SetZero z = 0 +func (z *Element) SetZero() *Element { + z[0] = 0 + return z +} + +// SetOne z = 1 (in Montgomery form) +func (z *Element) SetOne() *Element { + z[0] = 4294967295 + return z +} + +// Div z = x*y^-1 mod q +func (z *Element) Div(x, y *Element) *Element { + var yInv Element + yInv.Inverse(y) + z.Mul(x, &yInv) + return z +} + +// Bit returns the i'th bit, with lsb == bit 0. +// It is the responsibility of the caller to convert from Montgomery to Regular form if needed +func (z *Element) Bit(i uint64) uint64 { + j := i / 64 + if j >= 1 { + return 0 + } + return uint64(z[j] >> (i % 64) & 1) +} + +// Equal returns z == x; constant-time +func (z *Element) Equal(x *Element) bool { + return z.NotEqual(x) == 0 +} + +// NotEqual returns 0 if and only if z == x; constant-time +func (z *Element) NotEqual(x *Element) uint64 { + return (z[0] ^ x[0]) +} + +// IsZero returns z == 0 +func (z *Element) IsZero() bool { + return (z[0]) == 0 +} + +// IsOne returns z == 1 +func (z *Element) IsOne() bool { + return z[0] == 4294967295 +} + +// IsUint64 reports whether z can be represented as an uint64. +func (z *Element) IsUint64() bool { + return true +} + +// Uint64 returns the uint64 representation of x. If x cannot be represented in a uint64, the result is undefined. +func (z *Element) Uint64() uint64 { + zz := *z + zz.FromMont() + return zz[0] +} + +// FitsOnOneWord reports whether z words (except the least significant word) are 0 +func (z *Element) FitsOnOneWord() bool { + return true +} + +// Cmp compares (lexicographic order) z and x and returns: +// +// -1 if z < x +// 0 if z == x +// +1 if z > x +// +func (z *Element) Cmp(x *Element) int { + _z := *z + _x := *x + _z.FromMont() + _x.FromMont() + if _z[0] > _x[0] { + return 1 + } else if _z[0] < _x[0] { + return -1 + } + return 0 +} + +// LexicographicallyLargest returns true if this element is strictly lexicographically +// larger than its negation, false otherwise +func (z *Element) LexicographicallyLargest() bool { + // adapted from github.com/zkcrypto/bls12_381 + // we check if the element is larger than (q-1) / 2 + // if z - (((q -1) / 2) + 1) have no underflow, then z > (q-1) / 2 + + _z := *z + _z.FromMont() + + var b uint64 + _, b = bits.Sub64(_z[0], 9223372034707292161, 0) + + return b == 0 +} + +// SetRandom sets z to a random element < q +func (z *Element) SetRandom() (*Element, error) { + var bytes [8]byte + if _, err := io.ReadFull(rand.Reader, bytes[:]); err != nil { + return nil, err + } + z[0] = binary.BigEndian.Uint64(bytes[0:8]) + z[0] %= 18446744069414584321 + + return z, nil +} + +// One returns 1 (in montgommery form) +func One() Element { + var one Element + one.SetOne() + return one +} + +// Halve sets z to z / 2 (mod p) +func (z *Element) Halve() { + var carry uint64 + + if z[0]&1 == 1 { + // z = z + q + z[0], carry = bits.Add64(z[0], 18446744069414584321, 0) + + } + // z = z >> 1 + z[0] >>= 1 + + if carry != 0 { + // when we added q, the result was larger than our avaible limbs + // when we shift right, we need to set the highest bit + z[0] |= (1 << 63) + } + +} + +// Mul z = x * y mod q +// see https://hackmd.io/@gnark/modular_multiplication +func (z *Element) Mul(x, y *Element) *Element { + + // CIOS multiplication + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 18446744069414584319 + + var r uint64 + hi, lo := bits.Mul64(x[0], y[0]) + m := lo * qInvNegLsw + hi2, lo2 := bits.Mul64(m, q) + _, carry := bits.Add64(lo2, lo, 0) + r, carry = bits.Add64(hi2, hi, carry) + + if carry != 0 || r >= q { + // we need to reduce + r -= q + + } + z[0] = r + + return z +} + +// Square z = x * x mod q +// see https://hackmd.io/@gnark/modular_multiplication +func (z *Element) Square(x *Element) *Element { + + // CIOS multiplication + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 18446744069414584319 + + var r uint64 + hi, lo := bits.Mul64(x[0], x[0]) + m := lo * qInvNegLsw + hi2, lo2 := bits.Mul64(m, q) + _, carry := bits.Add64(lo2, lo, 0) + r, carry = bits.Add64(hi2, hi, carry) + + if carry != 0 || r >= q { + // we need to reduce + r -= q + + } + z[0] = r + + return z +} + +// FromMont converts z in place (i.e. mutates) from Montgomery to regular representation +// sets and returns z = z * 1 +func (z *Element) FromMont() *Element { + fromMont(z) + return z +} + +// Add z = x + y mod q +func (z *Element) Add(x, y *Element) *Element { + add(z, x, y) + return z +} + +// Double z = x + x mod q, aka Lsh 1 +func (z *Element) Double(x *Element) *Element { + double(z, x) + return z +} + +// Sub z = x - y mod q +func (z *Element) Sub(x, y *Element) *Element { + sub(z, x, y) + return z +} + +// Neg z = q - x +func (z *Element) Neg(x *Element) *Element { + neg(z, x) + return z +} + +// Select is a constant-time conditional move. +// If c=0, z = x0. Else z = x1 +func (z *Element) Select(c int, x0 *Element, x1 *Element) *Element { + cC := uint64((int64(c) | -int64(c)) >> 63) // "canonicized" into: 0 if c=0, -1 otherwise + z[0] = x0[0] ^ cC&(x0[0]^x1[0]) + return z +} + +// Generic (no ADX instructions, no AMD64) versions of multiplication and squaring algorithms + +func _mulGeneric(z, x, y *Element) { + + // CIOS multiplication + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = 18446744069414584319 + + var r uint64 + hi, lo := bits.Mul64(x[0], y[0]) + m := lo * qInvNegLsw + hi2, lo2 := bits.Mul64(m, q) + _, carry := bits.Add64(lo2, lo, 0) + r, carry = bits.Add64(hi2, hi, carry) + + if carry != 0 || r >= q { + // we need to reduce + r -= q + + } + z[0] = r + +} + +func _fromMontGeneric(z *Element) { + // the following lines implement z = z * 1 + // with a modified CIOS montgomery multiplication + { + // m = z[0]n'[0] mod W + m := z[0] * 18446744069414584319 + C := madd0(m, 18446744069414584321, z[0]) + z[0] = C + } + + // if z >= q → z -= q + // note: this is NOT constant time + if z[0] >= q { + z[0] -= q + } +} + +func _addGeneric(z, x, y *Element) { + + var carry uint64 + z[0], carry = bits.Add64(x[0], y[0], 0) + if carry != 0 || z[0] >= q { + z[0] -= q + } +} + +func _doubleGeneric(z, x *Element) { + if x[0]&(1<<63) == (1 << 63) { + // if highest bit is set, then we have a carry to x + x, we shift and subtract q + z[0] = (x[0] << 1) - q + } else { + // highest bit is not set, but x + x can still be >= q + z[0] = (x[0] << 1) + if z[0] >= q { + z[0] -= q + } + } +} + +func _subGeneric(z, x, y *Element) { + var b uint64 + z[0], b = bits.Sub64(x[0], y[0], 0) + if b != 0 { + z[0] += q + } +} + +func _negGeneric(z, x *Element) { + if x.IsZero() { + z.SetZero() + return + } + z[0] = q - x[0] +} + +func _reduceGeneric(z *Element) { + + // if z >= q → z -= q + // note: this is NOT constant time + if z[0] >= q { + z[0] -= q + } +} + +func mulByConstant(z *Element, c uint8) { + switch c { + case 0: + z.SetZero() + return + case 1: + return + case 2: + z.Double(z) + return + case 3: + _z := *z + z.Double(z).Add(z, &_z) + case 5: + _z := *z + z.Double(z).Double(z).Add(z, &_z) + case 11: + _z := *z + z.Double(z).Double(z).Add(z, &_z).Double(z).Add(z, &_z) + default: + var y Element + y.SetUint64(uint64(c)) + z.Mul(z, &y) + } +} + +// BatchInvert returns a new slice with every element inverted. +// Uses Montgomery batch inversion trick +func BatchInvert(a []Element) []Element { + res := make([]Element, len(a)) + if len(a) == 0 { + return res + } + + zeroes := make([]bool, len(a)) + accumulator := One() + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + zeroes[i] = true + continue + } + res[i] = accumulator + accumulator.Mul(&accumulator, &a[i]) + } + + accumulator.Inverse(&accumulator) + + for i := len(a) - 1; i >= 0; i-- { + if zeroes[i] { + continue + } + res[i].Mul(&res[i], &accumulator) + accumulator.Mul(&accumulator, &a[i]) + } + + return res +} + +func _butterflyGeneric(a, b *Element) { + t := *a + a.Add(a, b) + b.Sub(&t, b) +} + +// BitLen returns the minimum number of bits needed to represent z +// returns 0 if z == 0 +func (z *Element) BitLen() int { + return bits.Len64(z[0]) +} + +// Exp z = xᵏ mod q +func (z *Element) Exp(x Element, k *big.Int) *Element { + if k.IsUint64() && k.Uint64() == 0 { + return z.SetOne() + } + + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + + z.Set(&x) + + for i := e.BitLen() - 2; i >= 0; i-- { + z.Square(z) + if e.Bit(i) == 1 { + z.Mul(z, &x) + } + } + + return z +} + +// ToMont converts z to Montgomery form +// sets and returns z = z * r² +func (z *Element) ToMont() *Element { + return z.Mul(z, &rSquare) +} + +// ToRegular returns z in regular form (doesn't mutate z) +func (z Element) ToRegular() Element { + return *z.FromMont() +} + +// String returns the decimal representation of z as generated by +// z.Text(10). +func (z *Element) String() string { + return z.Text(10) +} + +// Text returns the string representation of z in the given base. +// Base must be between 2 and 36, inclusive. The result uses the +// lower-case letters 'a' to 'z' for digit values 10 to 35. +// No prefix (such as "0x") is added to the string. If z is a nil +// pointer it returns "". +// If base == 10 and -z fits in a uint16 prefix "-" is added to the string. +func (z *Element) Text(base int) string { + if base < 2 || base > 36 { + panic("invalid base") + } + if z == nil { + return "" + } + + const maxUint16 = 65535 + if base == 10 { + var zzNeg Element + zzNeg.Neg(z) + zzNeg.FromMont() + if zzNeg[0] <= maxUint16 && zzNeg[0] != 0 { + return "-" + strconv.FormatUint(zzNeg[0], base) + } + } + zz := *z + zz.FromMont() + return strconv.FormatUint(zz[0], base) +} + +// ToBigInt returns z as a big.Int in Montgomery form +func (z *Element) ToBigInt(res *big.Int) *big.Int { + var b [Limbs * 8]byte + binary.BigEndian.PutUint64(b[0:8], z[0]) + + return res.SetBytes(b[:]) +} + +// ToBigIntRegular returns z as a big.Int in regular form +func (z Element) ToBigIntRegular(res *big.Int) *big.Int { + z.FromMont() + return z.ToBigInt(res) +} + +// Bytes returns the regular (non montgomery) value +// of z as a big-endian byte array. +func (z *Element) Bytes() (res [Limbs * 8]byte) { + _z := z.ToRegular() + binary.BigEndian.PutUint64(res[0:8], _z[0]) + + return +} + +// Marshal returns the regular (non montgomery) value +// of z as a big-endian byte slice. +func (z *Element) Marshal() []byte { + b := z.Bytes() + return b[:] +} + +// SetBytes interprets e as the bytes of a big-endian unsigned integer, +// sets z to that value (in Montgomery form), and returns z. +func (z *Element) SetBytes(e []byte) *Element { + if len(e) == 8 { + // fast path + z[0] = binary.BigEndian.Uint64(e) + return z.ToMont() + } + // get a big int from our pool + vv := bigIntPool.Get().(*big.Int) + vv.SetBytes(e) + + // set big int + z.SetBigInt(vv) + + // put temporary object back in pool + bigIntPool.Put(vv) + + return z +} + +// SetBigInt sets z to v (regular form) and returns z in Montgomery form +func (z *Element) SetBigInt(v *big.Int) *Element { + z.SetZero() + + var zero big.Int + + // fast path + c := v.Cmp(&_modulus) + if c == 0 { + // v == 0 + return z + } else if c != 1 && v.Cmp(&zero) != -1 { + // 0 < v < q + return z.setBigInt(v) + } + + // get temporary big int from the pool + vv := bigIntPool.Get().(*big.Int) + + // copy input + modular reduction + vv.Set(v) + vv.Mod(v, &_modulus) + + // set big int byte value + z.setBigInt(vv) + + // release object into pool + bigIntPool.Put(vv) + return z +} + +// setBigInt assumes 0 ⩽ v < q +func (z *Element) setBigInt(v *big.Int) *Element { + vBits := v.Bits() + + if bits.UintSize == 64 { + for i := 0; i < len(vBits); i++ { + z[i] = uint64(vBits[i]) + } + } else { + for i := 0; i < len(vBits); i++ { + if i%2 == 0 { + z[i/2] = uint64(vBits[i]) + } else { + z[i/2] |= uint64(vBits[i]) << 32 + } + } + } + + return z.ToMont() +} + +// SetString creates a big.Int with number and calls SetBigInt on z +// +// The number prefix determines the actual base: A prefix of +// ''0b'' or ''0B'' selects base 2, ''0'', ''0o'' or ''0O'' selects base 8, +// and ''0x'' or ''0X'' selects base 16. Otherwise, the selected base is 10 +// and no prefix is accepted. +// +// For base 16, lower and upper case letters are considered the same: +// The letters 'a' to 'f' and 'A' to 'F' represent digit values 10 to 15. +// +// An underscore character ''_'' may appear between a base +// prefix and an adjacent digit, and between successive digits; such +// underscores do not change the value of the number. +// Incorrect placement of underscores is reported as a panic if there +// are no other errors. +// +func (z *Element) SetString(number string) *Element { + // get temporary big int from the pool + vv := bigIntPool.Get().(*big.Int) + + if _, ok := vv.SetString(number, 0); !ok { + panic("Element.SetString failed -> can't parse number into a big.Int " + number) + } + + z.SetBigInt(vv) + + // release object into pool + bigIntPool.Put(vv) + + return z +} + +// MarshalJSON returns json encoding of z (z.Text(10)) +// If z == nil, returns null +func (z *Element) MarshalJSON() ([]byte, error) { + if z == nil { + return []byte("null"), nil + } + const maxSafeBound = 15 // we encode it as number if it's small + s := z.Text(10) + if len(s) <= maxSafeBound { + return []byte(s), nil + } + var sbb strings.Builder + sbb.WriteByte('"') + sbb.WriteString(s) + sbb.WriteByte('"') + return []byte(sbb.String()), nil +} + +// UnmarshalJSON accepts numbers and strings as input +// See Element.SetString for valid prefixes (0x, 0b, ...) +func (z *Element) UnmarshalJSON(data []byte) error { + s := string(data) + if len(s) > Bits*3 { + return errors.New("value too large (max = Element.Bits * 3)") + } + + // we accept numbers and strings, remove leading and trailing quotes if any + if len(s) > 0 && s[0] == '"' { + s = s[1:] + } + if len(s) > 0 && s[len(s)-1] == '"' { + s = s[:len(s)-1] + } + + // get temporary big int from the pool + vv := bigIntPool.Get().(*big.Int) + + if _, ok := vv.SetString(s, 0); !ok { + return errors.New("can't parse into a big.Int: " + s) + } + + z.SetBigInt(vv) + + // release object into pool + bigIntPool.Put(vv) + return nil +} + +// Legendre returns the Legendre symbol of z (either +1, -1, or 0.) +func (z *Element) Legendre() int { + var l Element + // z^((q-1)/2) + l.expByLegendreExp(*z) + + if l.IsZero() { + return 0 + } + + // if l == 1 + if l[0] == 4294967295 { + return 1 + } + return -1 +} + +// Sqrt z = √x mod q +// if the square root doesn't exist (x is not a square mod q) +// Sqrt leaves z unchanged and returns nil +func (z *Element) Sqrt(x *Element) *Element { + // q ≡ 1 (mod 4) + // see modSqrtTonelliShanks in math/big/int.go + // using https://www.maa.org/sites/default/files/pdf/upload_library/22/Polya/07468342.di020786.02p0470a.pdf + + var y, b, t, w Element + // w = x^((s-1)/2)) + w.expBySqrtExp(*x) + + // y = x^((s+1)/2)) = w * x + y.Mul(x, &w) + + // b = x^s = w * w * x = y * x + b.Mul(&w, &y) + + // g = nonResidue ^ s + var g = Element{ + 15733474329512464024, + } + r := uint64(32) + + // compute legendre symbol + // t = x^((q-1)/2) = r-1 squaring of x^s + t = b + for i := uint64(0); i < r-1; i++ { + t.Square(&t) + } + if t.IsZero() { + return z.SetZero() + } + if !(t[0] == 4294967295) { + // t != 1, we don't have a square root + return nil + } + for { + var m uint64 + t = b + + // for t != 1 + for !(t[0] == 4294967295) { + t.Square(&t) + m++ + } + + if m == 0 { + return z.Set(&y) + } + // t = g^(2^(r-m-1)) mod q + ge := int(r - m - 1) + t = g + for ge > 0 { + t.Square(&t) + ge-- + } + + g.Square(&t) + y.Mul(&y, &t) + b.Mul(&b, &g) + r = m + } +} + +// Inverse z = x⁻¹ mod q +// Algorithm 16 in "Efficient Software-Implementation of Finite Fields with Applications to Cryptography" +// if x == 0, sets and returns z = x +func (z *Element) Inverse(x *Element) *Element { + const q uint64 = qElementWord0 + if x.IsZero() { + z.SetZero() + return z + } + + var r, s, u, v uint64 + u = q // u = q + s = 18446744065119617025 // s = r^2 + r = 0 + v = x[0] + + var carry, borrow uint64 + + for (u != 1) && (v != 1) { + for v&1 == 0 { + v >>= 1 + if s&1 == 0 { + s >>= 1 + } else { + s, carry = bits.Add64(s, q, 0) + s >>= 1 + if carry != 0 { + s |= (1 << 63) + } + } + } + for u&1 == 0 { + u >>= 1 + if r&1 == 0 { + r >>= 1 + } else { + r, carry = bits.Add64(r, q, 0) + r >>= 1 + if carry != 0 { + r |= (1 << 63) + } + } + } + if v >= u { + v -= u + s, borrow = bits.Sub64(s, r, 0) + if borrow == 1 { + s += q + } + } else { + u -= v + r, borrow = bits.Sub64(r, s, 0) + if borrow == 1 { + r += q + } + } + } + + if u == 1 { + z[0] = r + } else { + z[0] = s + } + + return z +} diff --git a/field/goldilocks/element_exp.go b/field/goldilocks/element_exp.go new file mode 100644 index 0000000000..46d391d031 --- /dev/null +++ b/field/goldilocks/element_exp.go @@ -0,0 +1,173 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package goldilocks + +// expBySqrtExp is equivalent to z.Exp(x, 7fffffff) +// +// uses github.com/mmcloughlin/addchain v0.4.0 to generate a shorter addition chain +func (z *Element) expBySqrtExp(x Element) *Element { + // addition chain: + // + // _10 = 2*1 + // _11 = 1 + _10 + // _110 = 2*_11 + // _111 = 1 + _110 + // _111000 = _111 << 3 + // _111111 = _111 + _111000 + // _1111110 = 2*_111111 + // _1111111 = 1 + _1111110 + // x12 = _1111110 << 5 + _111111 + // x24 = x12 << 12 + x12 + // return x24 << 7 + _1111111 + // + // Operations: 30 squares 7 multiplies + + // Allocate Temporaries. + var ( + t0 = new(Element) + t1 = new(Element) + ) + + // var t0,t1 Element + // Step 1: z = x^0x2 + z.Square(&x) + + // Step 2: z = x^0x3 + z.Mul(&x, z) + + // Step 3: z = x^0x6 + z.Square(z) + + // Step 4: z = x^0x7 + z.Mul(&x, z) + + // Step 7: t0 = x^0x38 + t0.Square(z) + for s := 1; s < 3; s++ { + t0.Square(t0) + } + + // Step 8: t0 = x^0x3f + t0.Mul(z, t0) + + // Step 9: t1 = x^0x7e + t1.Square(t0) + + // Step 10: z = x^0x7f + z.Mul(&x, t1) + + // Step 15: t1 = x^0xfc0 + for s := 0; s < 5; s++ { + t1.Square(t1) + } + + // Step 16: t0 = x^0xfff + t0.Mul(t0, t1) + + // Step 28: t1 = x^0xfff000 + t1.Square(t0) + for s := 1; s < 12; s++ { + t1.Square(t1) + } + + // Step 29: t0 = x^0xffffff + t0.Mul(t0, t1) + + // Step 36: t0 = x^0x7fffff80 + for s := 0; s < 7; s++ { + t0.Square(t0) + } + + // Step 37: z = x^0x7fffffff + z.Mul(z, t0) + + return z +} + +// expByLegendreExp is equivalent to z.Exp(x, 7fffffff80000000) +// +// uses github.com/mmcloughlin/addchain v0.4.0 to generate a shorter addition chain +func (z *Element) expByLegendreExp(x Element) *Element { + // addition chain: + // + // _10 = 2*1 + // _11 = 1 + _10 + // _1100 = _11 << 2 + // _1111 = _11 + _1100 + // _11110000 = _1111 << 4 + // _11111111 = _1111 + _11110000 + // x16 = _11111111 << 8 + _11111111 + // x32 = x16 << 16 + x16 + // return x32 << 31 + // + // Operations: 62 squares 5 multiplies + + // Allocate Temporaries. + var ( + t0 = new(Element) + ) + + // var t0 Element + // Step 1: z = x^0x2 + z.Square(&x) + + // Step 2: z = x^0x3 + z.Mul(&x, z) + + // Step 4: t0 = x^0xc + t0.Square(z) + for s := 1; s < 2; s++ { + t0.Square(t0) + } + + // Step 5: z = x^0xf + z.Mul(z, t0) + + // Step 9: t0 = x^0xf0 + t0.Square(z) + for s := 1; s < 4; s++ { + t0.Square(t0) + } + + // Step 10: z = x^0xff + z.Mul(z, t0) + + // Step 18: t0 = x^0xff00 + t0.Square(z) + for s := 1; s < 8; s++ { + t0.Square(t0) + } + + // Step 19: z = x^0xffff + z.Mul(z, t0) + + // Step 35: t0 = x^0xffff0000 + t0.Square(z) + for s := 1; s < 16; s++ { + t0.Square(t0) + } + + // Step 36: z = x^0xffffffff + z.Mul(z, t0) + + // Step 67: z = x^0x7fffffff80000000 + for s := 0; s < 31; s++ { + z.Square(z) + } + + return z +} diff --git a/field/goldilocks/element_fuzz.go b/field/goldilocks/element_fuzz.go new file mode 100644 index 0000000000..1b42870966 --- /dev/null +++ b/field/goldilocks/element_fuzz.go @@ -0,0 +1,113 @@ +//go:build gofuzz +// +build gofuzz + +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package goldilocks + +import ( + "bytes" + "encoding/binary" + "io" + "math/big" + "math/bits" +) + +const ( + fuzzInteresting = 1 + fuzzNormal = 0 + fuzzDiscard = -1 +) + +// Fuzz arithmetic operations fuzzer +func Fuzz(data []byte) int { + r := bytes.NewReader(data) + + var e1, e2 Element + e1.SetRawBytes(r) + e2.SetRawBytes(r) + + { + // mul assembly + + var c, _c Element + a, _a, b, _b := e1, e1, e2, e2 + c.Mul(&a, &b) + _mulGeneric(&_c, &_a, &_b) + + if !c.Equal(&_c) { + panic("mul asm != mul generic on Element") + } + } + + { + // inverse + inv := e1 + inv.Inverse(&inv) + + var bInv, b1, b2 big.Int + e1.ToBigIntRegular(&b1) + bInv.ModInverse(&b1, Modulus()) + inv.ToBigIntRegular(&b2) + + if b2.Cmp(&bInv) != 0 { + panic("inverse operation doesn't match big int result") + } + } + + { + // a + -a == 0 + a, b := e1, e1 + b.Neg(&b) + a.Add(&a, &b) + if !a.IsZero() { + panic("a + -a != 0") + } + } + + return fuzzNormal + +} + +// SetRawBytes reads up to Bytes (bytes needed to represent Element) from reader +// and interpret it as big endian uint64 +// used for fuzzing purposes only +func (z *Element) SetRawBytes(r io.Reader) { + + buf := make([]byte, 8) + + for i := 0; i < len(z); i++ { + if _, err := io.ReadFull(r, buf); err != nil { + goto eof + } + z[i] = binary.BigEndian.Uint64(buf[:]) + } +eof: + z[0] %= qElement[0] + + if z.BiggerModulus() { + var b uint64 + z[0], b = bits.Sub64(z[0], qElement[0], 0) + } + + return +} + +func (z *Element) BiggerModulus() bool { + + return z[0] >= qElement[0] +} diff --git a/field/goldilocks/element_ops_noasm.go b/field/goldilocks/element_ops_noasm.go new file mode 100644 index 0000000000..5069715e74 --- /dev/null +++ b/field/goldilocks/element_ops_noasm.go @@ -0,0 +1,77 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package goldilocks + +// /!\ WARNING /!\ +// this code has not been audited and is provided as-is. In particular, +// there is no security guarantees such as constant time implementation +// or side-channel attack resistance +// /!\ WARNING /!\ + +// MulBy3 x *= 3 +func MulBy3(x *Element) { + var y Element + y.SetUint64(3) + x.Mul(x, &y) +} + +// MulBy5 x *= 5 +func MulBy5(x *Element) { + var y Element + y.SetUint64(5) + x.Mul(x, &y) +} + +// MulBy13 x *= 13 +func MulBy13(x *Element) { + var y Element + y.SetUint64(13) + x.Mul(x, &y) +} + +// Butterfly sets +// a = a + b +// b = a - b +func Butterfly(a, b *Element) { + _butterflyGeneric(a, b) +} + +// FromMont converts z in place (i.e. mutates) from Montgomery to regular representation +// sets and returns z = z * 1 +func fromMont(z *Element) { + _fromMontGeneric(z) +} + +func add(z, x, y *Element) { + _addGeneric(z, x, y) +} + +func double(z, x *Element) { + _doubleGeneric(z, x) +} + +func sub(z, x, y *Element) { + _subGeneric(z, x, y) +} + +func neg(z, x *Element) { + _negGeneric(z, x) +} + +func reduce(z *Element) { + _reduceGeneric(z) +} diff --git a/field/goldilocks/element_test.go b/field/goldilocks/element_test.go new file mode 100644 index 0000000000..a60a5d8a97 --- /dev/null +++ b/field/goldilocks/element_test.go @@ -0,0 +1,2249 @@ +// Copyright 2020 ConsenSys Software Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by consensys/gnark-crypto DO NOT EDIT + +package goldilocks + +import ( + "crypto/rand" + "encoding/json" + "fmt" + "math/big" + "math/bits" + + "testing" + + "github.com/leanovate/gopter" + ggen "github.com/leanovate/gopter/gen" + "github.com/leanovate/gopter/prop" + + "github.com/stretchr/testify/require" +) + +// ------------------------------------------------------------------------------------------------- +// benchmarks +// most benchmarks are rudimentary and should sample a large number of random inputs +// or be run multiple times to ensure it didn't measure the fastest path of the function + +var benchResElement Element + +func BenchmarkElementSelect(b *testing.B) { + var x, y Element + x.SetRandom() + y.SetRandom() + + for i := 0; i < b.N; i++ { + benchResElement.Select(i%3, &x, &y) + } +} + +func BenchmarkElementSetBytes(b *testing.B) { + var x Element + x.SetRandom() + bb := x.Bytes() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + benchResElement.SetBytes(bb[:]) + } + +} + +func BenchmarkElementMulByConstants(b *testing.B) { + b.Run("mulBy3", func(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + MulBy3(&benchResElement) + } + }) + b.Run("mulBy5", func(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + MulBy5(&benchResElement) + } + }) + b.Run("mulBy13", func(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + MulBy13(&benchResElement) + } + }) +} + +func BenchmarkElementInverse(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + benchResElement.Inverse(&x) + } + +} + +func BenchmarkElementButterfly(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + Butterfly(&x, &benchResElement) + } +} + +func BenchmarkElementExp(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b1, _ := rand.Int(rand.Reader, Modulus()) + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Exp(x, b1) + } +} + +func BenchmarkElementDouble(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Double(&benchResElement) + } +} + +func BenchmarkElementAdd(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Add(&x, &benchResElement) + } +} + +func BenchmarkElementSub(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Sub(&x, &benchResElement) + } +} + +func BenchmarkElementNeg(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Neg(&benchResElement) + } +} + +func BenchmarkElementDiv(b *testing.B) { + var x Element + x.SetRandom() + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Div(&x, &benchResElement) + } +} + +func BenchmarkElementFromMont(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.FromMont() + } +} + +func BenchmarkElementToMont(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.ToMont() + } +} +func BenchmarkElementSquare(b *testing.B) { + benchResElement.SetRandom() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Square(&benchResElement) + } +} + +func BenchmarkElementSqrt(b *testing.B) { + var a Element + a.SetUint64(4) + a.Neg(&a) + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Sqrt(&a) + } +} + +func BenchmarkElementMul(b *testing.B) { + x := Element{ + 18446744065119617025, + } + benchResElement.SetOne() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Mul(&benchResElement, &x) + } +} + +func BenchmarkElementCmp(b *testing.B) { + x := Element{ + 18446744065119617025, + } + benchResElement = x + benchResElement[0] = 0 + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchResElement.Cmp(&x) + } +} + +func TestElementCmp(t *testing.T) { + var x, y Element + + if x.Cmp(&y) != 0 { + t.Fatal("x == y") + } + + one := One() + y.Sub(&y, &one) + + if x.Cmp(&y) != -1 { + t.Fatal("x < y") + } + if y.Cmp(&x) != 1 { + t.Fatal("x < y") + } + + x = y + if x.Cmp(&y) != 0 { + t.Fatal("x == y") + } + + x.Sub(&x, &one) + if x.Cmp(&y) != -1 { + t.Fatal("x < y") + } + if y.Cmp(&x) != 1 { + t.Fatal("x < y") + } +} + +func TestElementNegZero(t *testing.T) { + var a, b Element + b.SetZero() + for a.IsZero() { + a.SetRandom() + } + a.Neg(&b) + if !a.IsZero() { + t.Fatal("neg(0) != 0") + } +} + +// ------------------------------------------------------------------------------------------------- +// Gopter tests +// most of them are generated with a template + +const ( + nbFuzzShort = 200 + nbFuzz = 1000 +) + +// special values to be used in tests +var staticTestValues []Element + +func init() { + staticTestValues = append(staticTestValues, Element{}) // zero + staticTestValues = append(staticTestValues, One()) // one + staticTestValues = append(staticTestValues, rSquare) // r² + var e, one Element + one.SetOne() + e.Sub(&qElement, &one) + staticTestValues = append(staticTestValues, e) // q - 1 + e.Double(&one) + staticTestValues = append(staticTestValues, e) // 2 + + { + a := qElement + a[0]-- + staticTestValues = append(staticTestValues, a) + } + staticTestValues = append(staticTestValues, Element{0}) + staticTestValues = append(staticTestValues, Element{1}) + staticTestValues = append(staticTestValues, Element{2}) + + { + a := qElement + a[0]-- + staticTestValues = append(staticTestValues, a) + } + + { + a := qElement + a[0] = 0 + staticTestValues = append(staticTestValues, a) + } + + { + a := qElement + a[0] = 0 + staticTestValues = append(staticTestValues, a) + } + +} + +func TestElementReduce(t *testing.T) { + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, s := range testValues { + expected := s + reduce(&s) + _reduceGeneric(&expected) + if !s.Equal(&expected) { + t.Fatal("reduce failed: asm and generic impl don't match") + } + } + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := genFull() + + properties.Property("reduce should output a result smaller than modulus", prop.ForAll( + func(a Element) bool { + b := a + reduce(&a) + _reduceGeneric(&b) + return !a.biggerOrEqualModulus() && a.Equal(&b) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestElementEqual(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("x.Equal(&y) iff x == y; likely false for random pairs", prop.ForAll( + func(a testPairElement, b testPairElement) bool { + return a.element.Equal(&b.element) == (a.element == b.element) + }, + genA, + genB, + )) + + properties.Property("x.Equal(&y) if x == y", prop.ForAll( + func(a testPairElement) bool { + b := a.element + return a.element.Equal(&b) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBytes(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("SetBytes(Bytes()) should stay constant", prop.ForAll( + func(a testPairElement) bool { + var b Element + bytes := a.element.Bytes() + b.SetBytes(bytes[:]) + return a.element.Equal(&b) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementInverseExp(t *testing.T) { + // inverse must be equal to exp^-2 + exp := Modulus() + exp.Sub(exp, new(big.Int).SetUint64(2)) + + invMatchExp := func(a testPairElement) bool { + var b Element + b.Set(&a.element) + a.element.Inverse(&a.element) + b.Exp(b, exp) + + return a.element.Equal(&b) + } + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + properties := gopter.NewProperties(parameters) + genA := gen() + properties.Property("inv == exp^-2", prop.ForAll(invMatchExp, genA)) + properties.TestingRun(t, gopter.ConsoleReporter(false)) + + parameters.MinSuccessfulTests = 1 + properties = gopter.NewProperties(parameters) + properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPairElement{}))) + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementMulByConstants(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + implemented := []uint8{0, 1, 2, 3, 5, 13} + properties.Property("mulByConstant", prop.ForAll( + func(a testPairElement) bool { + for _, c := range implemented { + var constant Element + constant.SetUint64(uint64(c)) + + b := a.element + b.Mul(&b, &constant) + + aa := a.element + mulByConstant(&aa, c) + + if !aa.Equal(&b) { + return false + } + } + + return true + }, + genA, + )) + + properties.Property("MulBy3(x) == Mul(x, 3)", prop.ForAll( + func(a testPairElement) bool { + var constant Element + constant.SetUint64(3) + + b := a.element + b.Mul(&b, &constant) + + MulBy3(&a.element) + + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("MulBy5(x) == Mul(x, 5)", prop.ForAll( + func(a testPairElement) bool { + var constant Element + constant.SetUint64(5) + + b := a.element + b.Mul(&b, &constant) + + MulBy5(&a.element) + + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("MulBy13(x) == Mul(x, 13)", prop.ForAll( + func(a testPairElement) bool { + var constant Element + constant.SetUint64(13) + + b := a.element + b.Mul(&b, &constant) + + MulBy13(&a.element) + + return a.element.Equal(&b) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestElementLegendre(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("legendre should output same result than big.Int.Jacobi", prop.ForAll( + func(a testPairElement) bool { + return a.element.Legendre() == big.Jacobi(&a.bigint, Modulus()) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestElementButterflies(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("butterfly0 == a -b; a +b", prop.ForAll( + func(a, b testPairElement) bool { + a0, b0 := a.element, b.element + + _butterflyGeneric(&a.element, &b.element) + Butterfly(&a0, &b0) + + return a.element.Equal(&a0) && b.element.Equal(&b0) + }, + genA, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestElementLexicographicallyLargest(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("element.Cmp should match LexicographicallyLargest output", prop.ForAll( + func(a testPairElement) bool { + var negA Element + negA.Neg(&a.element) + + cmpResult := a.element.Cmp(&negA) + lResult := a.element.LexicographicallyLargest() + + if lResult && cmpResult == 1 { + return true + } + if !lResult && cmpResult != 1 { + return true + } + return false + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + +} + +func TestElementAdd(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("Add: having the receiver as operand should output the same result", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + d.Set(&a.element) + + c.Add(&a.element, &b.element) + a.element.Add(&a.element, &b.element) + b.element.Add(&d, &b.element) + + return a.element.Equal(&b.element) && a.element.Equal(&c) && b.element.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("Add: operation result must match big.Int result", prop.ForAll( + func(a, b testPairElement) bool { + { + var c Element + + c.Add(&a.element, &b.element) + + var d, e big.Int + d.Add(&a.bigint, &b.bigint).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + + // fixed elements + // a is random + // r takes special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, r := range testValues { + var d, e, rb big.Int + r.ToBigIntRegular(&rb) + + var c Element + c.Add(&a.element, &r) + d.Add(&a.bigint, &rb).Mod(&d, Modulus()) + + // checking generic impl against asm path + var cGeneric Element + _addGeneric(&cGeneric, &a.element, &r) + if !cGeneric.Equal(&c) { + // need to give context to failing error. + return false + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + return true + }, + genA, + genB, + )) + + properties.Property("Add: operation result must be smaller than modulus", prop.ForAll( + func(a, b testPairElement) bool { + var c Element + + c.Add(&a.element, &b.element) + + return !c.biggerOrEqualModulus() + }, + genA, + genB, + )) + + properties.Property("Add: assembly implementation must be consistent with generic one", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + c.Add(&a.element, &b.element) + _addGeneric(&d, &a.element, &b.element) + return c.Equal(&d) + }, + genA, + genB, + )) + + specialValueTest := func() { + // test special values against special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + for _, b := range testValues { + + var bBig, d, e big.Int + b.ToBigIntRegular(&bBig) + + var c Element + c.Add(&a, &b) + d.Add(&aBig, &bBig).Mod(&d, Modulus()) + + // checking asm against generic impl + var cGeneric Element + _addGeneric(&cGeneric, &a, &b) + if !cGeneric.Equal(&c) { + t.Fatal("Add failed special test values: asm and generic impl don't match") + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Add failed special test values") + } + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementSub(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("Sub: having the receiver as operand should output the same result", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + d.Set(&a.element) + + c.Sub(&a.element, &b.element) + a.element.Sub(&a.element, &b.element) + b.element.Sub(&d, &b.element) + + return a.element.Equal(&b.element) && a.element.Equal(&c) && b.element.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("Sub: operation result must match big.Int result", prop.ForAll( + func(a, b testPairElement) bool { + { + var c Element + + c.Sub(&a.element, &b.element) + + var d, e big.Int + d.Sub(&a.bigint, &b.bigint).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + + // fixed elements + // a is random + // r takes special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, r := range testValues { + var d, e, rb big.Int + r.ToBigIntRegular(&rb) + + var c Element + c.Sub(&a.element, &r) + d.Sub(&a.bigint, &rb).Mod(&d, Modulus()) + + // checking generic impl against asm path + var cGeneric Element + _subGeneric(&cGeneric, &a.element, &r) + if !cGeneric.Equal(&c) { + // need to give context to failing error. + return false + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + return true + }, + genA, + genB, + )) + + properties.Property("Sub: operation result must be smaller than modulus", prop.ForAll( + func(a, b testPairElement) bool { + var c Element + + c.Sub(&a.element, &b.element) + + return !c.biggerOrEqualModulus() + }, + genA, + genB, + )) + + properties.Property("Sub: assembly implementation must be consistent with generic one", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + c.Sub(&a.element, &b.element) + _subGeneric(&d, &a.element, &b.element) + return c.Equal(&d) + }, + genA, + genB, + )) + + specialValueTest := func() { + // test special values against special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + for _, b := range testValues { + + var bBig, d, e big.Int + b.ToBigIntRegular(&bBig) + + var c Element + c.Sub(&a, &b) + d.Sub(&aBig, &bBig).Mod(&d, Modulus()) + + // checking asm against generic impl + var cGeneric Element + _subGeneric(&cGeneric, &a, &b) + if !cGeneric.Equal(&c) { + t.Fatal("Sub failed special test values: asm and generic impl don't match") + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Sub failed special test values") + } + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementMul(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("Mul: having the receiver as operand should output the same result", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + d.Set(&a.element) + + c.Mul(&a.element, &b.element) + a.element.Mul(&a.element, &b.element) + b.element.Mul(&d, &b.element) + + return a.element.Equal(&b.element) && a.element.Equal(&c) && b.element.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("Mul: operation result must match big.Int result", prop.ForAll( + func(a, b testPairElement) bool { + { + var c Element + + c.Mul(&a.element, &b.element) + + var d, e big.Int + d.Mul(&a.bigint, &b.bigint).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + + // fixed elements + // a is random + // r takes special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, r := range testValues { + var d, e, rb big.Int + r.ToBigIntRegular(&rb) + + var c Element + c.Mul(&a.element, &r) + d.Mul(&a.bigint, &rb).Mod(&d, Modulus()) + + // checking generic impl against asm path + var cGeneric Element + _mulGeneric(&cGeneric, &a.element, &r) + if !cGeneric.Equal(&c) { + // need to give context to failing error. + return false + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + return true + }, + genA, + genB, + )) + + properties.Property("Mul: operation result must be smaller than modulus", prop.ForAll( + func(a, b testPairElement) bool { + var c Element + + c.Mul(&a.element, &b.element) + + return !c.biggerOrEqualModulus() + }, + genA, + genB, + )) + + properties.Property("Mul: assembly implementation must be consistent with generic one", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + c.Mul(&a.element, &b.element) + _mulGeneric(&d, &a.element, &b.element) + return c.Equal(&d) + }, + genA, + genB, + )) + + specialValueTest := func() { + // test special values against special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + for _, b := range testValues { + + var bBig, d, e big.Int + b.ToBigIntRegular(&bBig) + + var c Element + c.Mul(&a, &b) + d.Mul(&aBig, &bBig).Mod(&d, Modulus()) + + // checking asm against generic impl + var cGeneric Element + _mulGeneric(&cGeneric, &a, &b) + if !cGeneric.Equal(&c) { + t.Fatal("Mul failed special test values: asm and generic impl don't match") + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Mul failed special test values") + } + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementDiv(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("Div: having the receiver as operand should output the same result", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + d.Set(&a.element) + + c.Div(&a.element, &b.element) + a.element.Div(&a.element, &b.element) + b.element.Div(&d, &b.element) + + return a.element.Equal(&b.element) && a.element.Equal(&c) && b.element.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("Div: operation result must match big.Int result", prop.ForAll( + func(a, b testPairElement) bool { + { + var c Element + + c.Div(&a.element, &b.element) + + var d, e big.Int + d.ModInverse(&b.bigint, Modulus()) + d.Mul(&d, &a.bigint).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + + // fixed elements + // a is random + // r takes special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, r := range testValues { + var d, e, rb big.Int + r.ToBigIntRegular(&rb) + + var c Element + c.Div(&a.element, &r) + d.ModInverse(&rb, Modulus()) + d.Mul(&d, &a.bigint).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + return true + }, + genA, + genB, + )) + + properties.Property("Div: operation result must be smaller than modulus", prop.ForAll( + func(a, b testPairElement) bool { + var c Element + + c.Div(&a.element, &b.element) + + return !c.biggerOrEqualModulus() + }, + genA, + genB, + )) + + specialValueTest := func() { + // test special values against special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + for _, b := range testValues { + + var bBig, d, e big.Int + b.ToBigIntRegular(&bBig) + + var c Element + c.Div(&a, &b) + d.ModInverse(&bBig, Modulus()) + d.Mul(&d, &aBig).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Div failed special test values") + } + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementExp(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genB := gen() + + properties.Property("Exp: having the receiver as operand should output the same result", prop.ForAll( + func(a, b testPairElement) bool { + var c, d Element + d.Set(&a.element) + + c.Exp(a.element, &b.bigint) + a.element.Exp(a.element, &b.bigint) + b.element.Exp(d, &b.bigint) + + return a.element.Equal(&b.element) && a.element.Equal(&c) && b.element.Equal(&c) + }, + genA, + genB, + )) + + properties.Property("Exp: operation result must match big.Int result", prop.ForAll( + func(a, b testPairElement) bool { + { + var c Element + + c.Exp(a.element, &b.bigint) + + var d, e big.Int + d.Exp(&a.bigint, &b.bigint, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + + // fixed elements + // a is random + // r takes special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, r := range testValues { + var d, e, rb big.Int + r.ToBigIntRegular(&rb) + + var c Element + c.Exp(a.element, &rb) + d.Exp(&a.bigint, &rb, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + return false + } + } + return true + }, + genA, + genB, + )) + + properties.Property("Exp: operation result must be smaller than modulus", prop.ForAll( + func(a, b testPairElement) bool { + var c Element + + c.Exp(a.element, &b.bigint) + + return !c.biggerOrEqualModulus() + }, + genA, + genB, + )) + + specialValueTest := func() { + // test special values against special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + for _, b := range testValues { + + var bBig, d, e big.Int + b.ToBigIntRegular(&bBig) + + var c Element + c.Exp(a, &bBig) + d.Exp(&aBig, &bBig, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Exp failed special test values") + } + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementSquare(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Square: having the receiver as operand should output the same result", prop.ForAll( + func(a testPairElement) bool { + + var b Element + + b.Square(&a.element) + a.element.Square(&a.element) + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("Square: operation result must match big.Int result", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Square(&a.element) + + var d, e big.Int + d.Mul(&a.bigint, &a.bigint).Mod(&d, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, + )) + + properties.Property("Square: operation result must be smaller than modulus", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Square(&a.element) + return !c.biggerOrEqualModulus() + }, + genA, + )) + + specialValueTest := func() { + // test special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + var c Element + c.Square(&a) + + var d, e big.Int + d.Mul(&aBig, &aBig).Mod(&d, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Square failed special test values") + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementInverse(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Inverse: having the receiver as operand should output the same result", prop.ForAll( + func(a testPairElement) bool { + + var b Element + + b.Inverse(&a.element) + a.element.Inverse(&a.element) + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("Inverse: operation result must match big.Int result", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Inverse(&a.element) + + var d, e big.Int + d.ModInverse(&a.bigint, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, + )) + + properties.Property("Inverse: operation result must be smaller than modulus", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Inverse(&a.element) + return !c.biggerOrEqualModulus() + }, + genA, + )) + + specialValueTest := func() { + // test special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + var c Element + c.Inverse(&a) + + var d, e big.Int + d.ModInverse(&aBig, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Inverse failed special test values") + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementSqrt(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Sqrt: having the receiver as operand should output the same result", prop.ForAll( + func(a testPairElement) bool { + + b := a.element + + b.Sqrt(&a.element) + a.element.Sqrt(&a.element) + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("Sqrt: operation result must match big.Int result", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Sqrt(&a.element) + + var d, e big.Int + d.ModSqrt(&a.bigint, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, + )) + + properties.Property("Sqrt: operation result must be smaller than modulus", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Sqrt(&a.element) + return !c.biggerOrEqualModulus() + }, + genA, + )) + + specialValueTest := func() { + // test special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + var c Element + c.Sqrt(&a) + + var d, e big.Int + d.ModSqrt(&aBig, Modulus()) + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Sqrt failed special test values") + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementDouble(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Double: having the receiver as operand should output the same result", prop.ForAll( + func(a testPairElement) bool { + + var b Element + + b.Double(&a.element) + a.element.Double(&a.element) + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("Double: operation result must match big.Int result", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Double(&a.element) + + var d, e big.Int + d.Lsh(&a.bigint, 1).Mod(&d, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, + )) + + properties.Property("Double: operation result must be smaller than modulus", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Double(&a.element) + return !c.biggerOrEqualModulus() + }, + genA, + )) + + properties.Property("Double: assembly implementation must be consistent with generic one", prop.ForAll( + func(a testPairElement) bool { + var c, d Element + c.Double(&a.element) + _doubleGeneric(&d, &a.element) + return c.Equal(&d) + }, + genA, + )) + + specialValueTest := func() { + // test special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + var c Element + c.Double(&a) + + var d, e big.Int + d.Lsh(&aBig, 1).Mod(&d, Modulus()) + + // checking asm against generic impl + var cGeneric Element + _doubleGeneric(&cGeneric, &a) + if !cGeneric.Equal(&c) { + t.Fatal("Double failed special test values: asm and generic impl don't match") + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Double failed special test values") + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementNeg(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Neg: having the receiver as operand should output the same result", prop.ForAll( + func(a testPairElement) bool { + + var b Element + + b.Neg(&a.element) + a.element.Neg(&a.element) + return a.element.Equal(&b) + }, + genA, + )) + + properties.Property("Neg: operation result must match big.Int result", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Neg(&a.element) + + var d, e big.Int + d.Neg(&a.bigint).Mod(&d, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, + )) + + properties.Property("Neg: operation result must be smaller than modulus", prop.ForAll( + func(a testPairElement) bool { + var c Element + c.Neg(&a.element) + return !c.biggerOrEqualModulus() + }, + genA, + )) + + properties.Property("Neg: assembly implementation must be consistent with generic one", prop.ForAll( + func(a testPairElement) bool { + var c, d Element + c.Neg(&a.element) + _negGeneric(&d, &a.element) + return c.Equal(&d) + }, + genA, + )) + + specialValueTest := func() { + // test special values + testValues := make([]Element, len(staticTestValues)) + copy(testValues, staticTestValues) + + for _, a := range testValues { + var aBig big.Int + a.ToBigIntRegular(&aBig) + var c Element + c.Neg(&a) + + var d, e big.Int + d.Neg(&aBig).Mod(&d, Modulus()) + + // checking asm against generic impl + var cGeneric Element + _negGeneric(&cGeneric, &a) + if !cGeneric.Equal(&c) { + t.Fatal("Neg failed special test values: asm and generic impl don't match") + } + + if c.FromMont().ToBigInt(&e).Cmp(&d) != 0 { + t.Fatal("Neg failed special test values") + } + } + } + + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() +} + +func TestElementFixedExp(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + var ( + _bLegendreExponentElement *big.Int + _bSqrtExponentElement *big.Int + ) + + _bLegendreExponentElement, _ = new(big.Int).SetString("7fffffff80000000", 16) + const sqrtExponentElement = "7fffffff" + _bSqrtExponentElement, _ = new(big.Int).SetString(sqrtExponentElement, 16) + + genA := gen() + + properties.Property(fmt.Sprintf("expBySqrtExp must match Exp(%s)", sqrtExponentElement), prop.ForAll( + func(a testPairElement) bool { + c := a.element + d := a.element + c.expBySqrtExp(c) + d.Exp(d, _bSqrtExponentElement) + return c.Equal(&d) + }, + genA, + )) + + properties.Property("expByLegendreExp must match Exp(7fffffff80000000)", prop.ForAll( + func(a testPairElement) bool { + c := a.element + d := a.element + c.expByLegendreExp(c) + d.Exp(d, _bLegendreExponentElement) + return c.Equal(&d) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementHalve(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + var twoInv Element + twoInv.SetUint64(2) + twoInv.Inverse(&twoInv) + + properties.Property("z.Halve must match z / 2", prop.ForAll( + func(a testPairElement) bool { + c := a.element + d := a.element + c.Halve() + d.Mul(&d, &twoInv) + return c.Equal(&d) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func combineSelectionArguments(c int64, z int8) int { + if z%3 == 0 { + return 0 + } + return int(c) +} + +func TestElementSelect(t *testing.T) { + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := genFull() + genB := genFull() + genC := ggen.Int64() //the condition + genZ := ggen.Int8() //to make zeros artificially more likely + + properties.Property("Select: must select correctly", prop.ForAll( + func(a, b Element, cond int64, z int8) bool { + condC := combineSelectionArguments(cond, z) + + var c Element + c.Select(condC, &a, &b) + + if condC == 0 { + return c.Equal(&a) + } + return c.Equal(&b) + }, + genA, + genB, + genC, + genZ, + )) + + properties.Property("Select: having the receiver as operand should output the same result", prop.ForAll( + func(a, b Element, cond int64, z int8) bool { + condC := combineSelectionArguments(cond, z) + + var c, d Element + d.Set(&a) + c.Select(condC, &a, &b) + a.Select(condC, &a, &b) + b.Select(condC, &d, &b) + return a.Equal(&b) && a.Equal(&c) && b.Equal(&c) + }, + genA, + genB, + genC, + genZ, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementSetInt64(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("z.SetInt64 must match z.SetString", prop.ForAll( + func(a testPairElement, v int64) bool { + c := a.element + d := a.element + + c.SetInt64(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, ggen.Int64(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementSetInterface(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + genInt := ggen.Int + genInt8 := ggen.Int8 + genInt16 := ggen.Int16 + genInt32 := ggen.Int32 + genInt64 := ggen.Int64 + + genUint := ggen.UInt + genUint8 := ggen.UInt8 + genUint16 := ggen.UInt16 + genUint32 := ggen.UInt32 + genUint64 := ggen.UInt64 + + properties.Property("z.SetInterface must match z.SetString with int8", prop.ForAll( + func(a testPairElement, v int8) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genInt8(), + )) + + properties.Property("z.SetInterface must match z.SetString with int16", prop.ForAll( + func(a testPairElement, v int16) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genInt16(), + )) + + properties.Property("z.SetInterface must match z.SetString with int32", prop.ForAll( + func(a testPairElement, v int32) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genInt32(), + )) + + properties.Property("z.SetInterface must match z.SetString with int64", prop.ForAll( + func(a testPairElement, v int64) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genInt64(), + )) + + properties.Property("z.SetInterface must match z.SetString with int", prop.ForAll( + func(a testPairElement, v int) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genInt(), + )) + + properties.Property("z.SetInterface must match z.SetString with uint8", prop.ForAll( + func(a testPairElement, v uint8) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genUint8(), + )) + + properties.Property("z.SetInterface must match z.SetString with uint16", prop.ForAll( + func(a testPairElement, v uint16) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genUint16(), + )) + + properties.Property("z.SetInterface must match z.SetString with uint32", prop.ForAll( + func(a testPairElement, v uint32) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genUint32(), + )) + + properties.Property("z.SetInterface must match z.SetString with uint64", prop.ForAll( + func(a testPairElement, v uint64) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genUint64(), + )) + + properties.Property("z.SetInterface must match z.SetString with uint", prop.ForAll( + func(a testPairElement, v uint) bool { + c := a.element + d := a.element + + c.SetInterface(v) + d.SetString(fmt.Sprintf("%v", v)) + + return c.Equal(&d) + }, + genA, genUint(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementNegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a, b testPairElement) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c Element + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementBatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i := int64(-1); i <= 2; i++ { + var e, eInv Element + e.SetInt64(i) + eInv.Inverse(&e) + + a := []Element{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64{ + {-1, 1, 2, 3}, + {0, -1, 1, 2, 3, 0}, + {0, -1, 1, 0, 2, 3, 0}, + {-1, 1, 0, 2, 3}, + {0, 0, 1}, + {1, 0, 0}, + {0, 0, 0}, + } + + for _, t := range tData { + a := make([]Element, len(t)) + for i := 0; i < len(a); i++ { + a[i].SetInt64(t[i]) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + assert.True(aInv[i].IsZero(), "0⁻¹ != 0") + } else { + assert.True(a[i].Mul(&a[i], &aInv[i]).IsOne(), "x * x⁻¹ != 1") + } + } + } + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("batchInvert --> x * x⁻¹ == 1", prop.ForAll( + func(tp testPairElement, r uint8) bool { + + a := make([]Element, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i := 1; i < len(a); i++ { + a[i].Add(&a[i-1], &one) + } + + aInv := BatchInvert(a) + + assert.True(len(aInv) == len(a)) + + for i := 0; i < len(a); i++ { + if a[i].IsZero() { + if !aInv[i].IsZero() { + return false + } + } else { + if !a[i].Mul(&a[i], &aInv[i]).IsOne() { + return false + } + } + } + return true + }, + genA, ggen.UInt8(), + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementFromMont(t *testing.T) { + + t.Parallel() + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("Assembly implementation must be consistent with generic one", prop.ForAll( + func(a testPairElement) bool { + c := a.element + d := a.element + c.FromMont() + _fromMontGeneric(&d) + return c.Equal(&d) + }, + genA, + )) + + properties.Property("x.FromMont().ToMont() == x", prop.ForAll( + func(a testPairElement) bool { + c := a.element + c.FromMont().ToMont() + return c.Equal(&a.element) + }, + genA, + )) + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + +func TestElementJSON(t *testing.T) { + assert := require.New(t) + + type S struct { + A Element + B [3]Element + C *Element + D *Element + } + + // encode to JSON + var s S + s.A.SetString("-1") + s.B[2].SetUint64(42) + s.D = new(Element).SetUint64(8000) + + encoded, err := json.Marshal(&s) + assert.NoError(err) + // since our modulus is on 1 word, we may need to adjust "42" and "8000" values; + formatValue := func(v int64) string { + const maxUint16 = 65535 + var a, aNeg big.Int + a.SetInt64(v) + a.Mod(&a, Modulus()) + aNeg.Neg(&a).Mod(&aNeg, Modulus()) + fmt.Println("aNeg", aNeg.Text(10)) + if aNeg.Uint64() != 0 && aNeg.Uint64() <= maxUint16 { + return "-" + aNeg.Text(10) + } + return a.Text(10) + } + expected := fmt.Sprintf("{\"A\":-1,\"B\":[0,0,%s],\"C\":null,\"D\":%s}", formatValue(42), formatValue(8000)) + assert.Equal(expected, string(encoded)) + + // decode valid + var decoded S + err = json.Unmarshal([]byte(expected), &decoded) + assert.NoError(err) + + assert.Equal(s, decoded, "element -> json -> element round trip failed") + + // decode hex and string values + withHexValues := "{\"A\":\"-1\",\"B\":[0,\"0x00000\",\"0x2A\"],\"C\":null,\"D\":\"8000\"}" + + var decodedS S + err = json.Unmarshal([]byte(withHexValues), &decodedS) + assert.NoError(err) + + assert.Equal(s, decodedS, " json with strings -> element failed") + +} + +type testPairElement struct { + element Element + bigint big.Int +} + +func (z *Element) biggerOrEqualModulus() bool { + + return z[0] >= qElement[0] +} + +func gen() gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + var g testPairElement + + g.element = Element{ + genParams.NextUint64(), + } + if qElement[0] != ^uint64(0) { + g.element[0] %= (qElement[0] + 1) + } + + for g.element.biggerOrEqualModulus() { + g.element = Element{ + genParams.NextUint64(), + } + if qElement[0] != ^uint64(0) { + g.element[0] %= (qElement[0] + 1) + } + } + + g.element.ToBigIntRegular(&g.bigint) + genResult := gopter.NewGenResult(g, gopter.NoShrinker) + return genResult + } +} + +func genFull() gopter.Gen { + return func(genParams *gopter.GenParameters) *gopter.GenResult { + + genRandomFq := func() Element { + var g Element + + g = Element{ + genParams.NextUint64(), + } + + if qElement[0] != ^uint64(0) { + g[0] %= (qElement[0] + 1) + } + + for g.biggerOrEqualModulus() { + g = Element{ + genParams.NextUint64(), + } + if qElement[0] != ^uint64(0) { + g[0] %= (qElement[0] + 1) + } + } + + return g + } + a := genRandomFq() + + var carry uint64 + a[0], _ = bits.Add64(a[0], qElement[0], carry) + + genResult := gopter.NewGenResult(a, gopter.NoShrinker) + return genResult + } +} diff --git a/field/goldilocks/internal/addchain/7fffffff b/field/goldilocks/internal/addchain/7fffffff new file mode 100644 index 0000000000..b281f66761 Binary files /dev/null and b/field/goldilocks/internal/addchain/7fffffff differ diff --git a/field/goldilocks/internal/addchain/7fffffff80000000 b/field/goldilocks/internal/addchain/7fffffff80000000 new file mode 100644 index 0000000000..c1d158b8e6 Binary files /dev/null and b/field/goldilocks/internal/addchain/7fffffff80000000 differ diff --git a/field/goldilocks/internal/main.go b/field/goldilocks/internal/main.go new file mode 100644 index 0000000000..9571942d02 --- /dev/null +++ b/field/goldilocks/internal/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "fmt" + + "github.com/consensys/gnark-crypto/field" + "github.com/consensys/gnark-crypto/field/generator" +) + +//go:generate go run main.go +func main() { + const modulus = "0xFFFFFFFF00000001" + goldilocks, err := field.NewField("goldilocks", "Element", modulus, true) + if err != nil { + panic(err) + } + if err := generator.GenerateFF(goldilocks, "../"); err != nil { + panic(err) + } + fmt.Println("successfully generated goldilocks field") +} diff --git a/field/internal/templates/element/arith.go b/field/internal/templates/element/arith.go index 06a7805588..edd98b6197 100644 --- a/field/internal/templates/element/arith.go +++ b/field/internal/templates/element/arith.go @@ -14,6 +14,7 @@ func madd0(a, b, c uint64) (hi uint64) { return } +{{- if ne .NbWords 1}} // madd1 hi, lo = a*b + c func madd1(a, b, c uint64) (hi uint64, lo uint64) { var carry uint64 @@ -45,5 +46,6 @@ func madd3(a, b, c, d, e uint64) (hi uint64, lo uint64) { return } +{{- end}} ` diff --git a/field/internal/templates/element/base.go b/field/internal/templates/element/base.go index 24926e01ae..fef47f6e63 100644 --- a/field/internal/templates/element/base.go +++ b/field/internal/templates/element/base.go @@ -52,15 +52,15 @@ func Modulus() *big.Int { {{- range $i := $.NbWordsIndexesFull}} const q{{$.ElementName}}Word{{$i}} uint64 = {{index $.Q $i}} {{- end}} +{{- if eq .NbWords 1}} +const q uint64 = q{{$.ElementName}}Word0 +{{- end}} var q{{.ElementName}} = {{.ElementName}}{ {{- range $i := $.NbWordsIndexesFull}} q{{$.ElementName}}Word{{$i}},{{end}} } -// Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r -const qInvNegLsw uint64 = {{index .QInverse 0}} - // rSquare var rSquare = {{.ElementName}}{ {{- range $i := .RSquare}} @@ -75,7 +75,8 @@ var bigIntPool = sync.Pool{ } func init() { - _modulus.SetString("{{.Modulus}}", 10) + // base10: {{.Modulus}} + _modulus.SetString("{{.ModulusHex}}", 16) } // New{{.ElementName}} returns a new {{.ElementName}} from a uint64 value @@ -221,14 +222,22 @@ func (z *{{.ElementName}}) IsZero() bool { // IsOne returns z == 1 func (z *{{.ElementName}}) IsOne() bool { + {{- if eq .NbWords 1}} + return z[0] == {{index $.One 0}} + {{- else}} return ( {{- range $i := reverse .NbWordsIndexesNoZero }} z[{{$i}}] ^ {{index $.One $i}} | {{- end}} z[0] ^ {{index $.One 0}} ) == 0 + {{- end}} } // IsUint64 reports whether z can be represented as an uint64. func (z *{{.ElementName}}) IsUint64() bool { - zz := *z - zz.FromMont() - return zz.FitsOnOneWord() + {{- if eq .NbWords 1}} + return true + {{- else}} + zz := *z + zz.FromMont() + return zz.FitsOnOneWord() + {{- end}} } // Uint64 returns the uint64 representation of x. If x cannot be represented in a uint64, the result is undefined. @@ -302,7 +311,9 @@ func (z *{{.ElementName}}) SetRandom() (*{{.ElementName}}, error) { {{- end}} z[{{$.NbWordsLastIndex}}] %= {{index $.Q $.NbWordsLastIndex}} - {{ template "reduce" . }} + {{- if ne .NbWords 1}} + {{ template "reduce" . }} + {{- end}} return z, nil } @@ -316,32 +327,53 @@ func One() {{.ElementName}} { // Halve sets z to z / 2 (mod p) func (z *{{.ElementName}}) Halve() { - {{- if .NoCarry}} - if z[0]&1 == 1 { - {{ template "add_q" dict "all" . "V1" "z" }} + {{- if not (and (eq .NbWords 1) (.NoCarry))}} + var carry uint64 + {{- end}} + + if z[0]&1 == 1 { + {{- template "add_q" dict "all" . "V1" "z" }} + } + {{- rsh "z" .NbWords}} + + {{- if not .NoCarry}} + if carry != 0 { + // when we added q, the result was larger than our avaible limbs + // when we shift right, we need to set the highest bit + z[{{.NbWordsLastIndex}}] |= (1 << 63) } - {{ rsh "z" .NbWords}} - {{ else}} - var twoInv {{.ElementName}} - twoInv.SetOne().Double(&twoInv).Inverse(&twoInv) - z.Mul(z, &twoInv) {{end}} } +{{ define "add_q" }} + // {{$.V1}} = {{$.V1}} + q + {{- range $i := $.all.NbWordsIndexesFull }} + {{- $carryIn := ne $i 0}} + {{- $carryOut := or (ne $i $.all.NbWordsLastIndex) (and (eq $i $.all.NbWordsLastIndex) (not $.all.NoCarry))}} + {{$.V1}}[{{$i}}], {{- if $carryOut}}carry{{- else}}_{{- end}} = bits.Add64({{$.V1}}[{{$i}}], {{index $.all.Q $i}}, {{- if $carryIn}}carry{{- else}}0{{- end}}) + {{- end}} +{{ end }} -// API with assembly impl // Mul z = x * y mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *{{.ElementName}}) Mul(x, y *{{.ElementName}}) *{{.ElementName}} { - mul(z, x, y) + {{- if eq $.NbWords 1}} + {{ template "mul_cios_one_limb" dict "all" . "V1" "x" "V2" "y" }} + {{- else }} + mul(z, x, y) + {{- end }} return z } // Square z = x * x mod q // see https://hackmd.io/@gnark/modular_multiplication func (z *{{.ElementName}}) Square(x *{{.ElementName}}) *{{.ElementName}} { - mul(z,x, x) + {{- if eq $.NbWords 1}} + {{ template "mul_cios_one_limb" dict "all" . "V1" "x" "V2" "x" }} + {{- else }} + mul(z, x, x) + {{- end }} return z } @@ -390,17 +422,15 @@ func (z *{{.ElementName}}) Select(c int, x0 *{{.ElementName}}, x1 *{{.ElementNam // Generic (no ADX instructions, no AMD64) versions of multiplication and squaring algorithms func _mulGeneric(z,x,y *{{.ElementName}}) { - {{ if .NoCarry}} + {{ if eq $.NbWords 1}} + {{ template "mul_cios_one_limb" dict "all" . "V1" "x" "V2" "y" }} + {{ else if .NoCarry}} {{ template "mul_nocarry" dict "all" . "V1" "x" "V2" "y"}} + {{ template "reduce" . }} {{ else }} - {{ template "mul_cios" dict "all" . "V1" "x" "V2" "y" "NoReturn" true}} + {{ template "mul_cios" dict "all" . "V1" "x" "V2" "y" }} + {{ template "reduce" . }} {{ end }} - {{ template "reduce" . }} -} - -func _mulWGeneric(z,x *{{.ElementName}}, y uint64) { - {{ template "mul_nocarry_v2" dict "all" . "V2" "x"}} - {{ template "reduce" . }} } @@ -433,23 +463,43 @@ func _addGeneric(z, x, y *{{.ElementName}}) { {{- $hasCarry := or (not $.NoCarry) (lt $i $.NbWordsLastIndex)}} z[{{$i}}], {{- if $hasCarry}}carry{{- else}}_{{- end}} = bits.Add64(x[{{$i}}], y[{{$i}}], {{- if eq $i 0}}0{{- else}}carry{{- end}}) {{- end}} - {{- if not .NoCarry}} - // if we overflowed the last addition, z >= q - // if z >= q, z = z - q - if carry != 0 { - // we overflowed, so z >= q - z[0], {{- if gt $.NbWords 1}}carry{{- else}}_{{- end}} = bits.Sub64(z[0], {{index $.Q 0}}, 0) - {{- range $i := .NbWordsIndexesNoZero}} - z[{{$i}}], carry = bits.Sub64(z[{{$i}}], {{index $.Q $i}}, carry) - {{- end}} - return + + {{- if eq $.NbWords 1}} + if {{- if not .NoCarry}} carry != 0 ||{{- end }} z[0] >= q { + z[0] -= q } - {{- end}} + {{- else}} + {{- if not .NoCarry}} + // if we overflowed the last addition, z >= q + // if z >= q, z = z - q + if carry != 0 { + var b uint64 + // we overflowed, so z >= q + {{- range $i := iterate 0 $.NbWords}} + {{- $hasBorrow := lt $i $.NbWordsLastIndex}} + z[{{$i}}], {{- if $hasBorrow}}b{{- else}}_{{- end}} = bits.Sub64(z[{{$i}}], {{index $.Q $i}}, {{- if eq $i 0}}0{{- else}}b{{- end}}) + {{- end}} + return + } + {{- end}} - {{ template "reduce" .}} + {{ template "reduce" .}} + {{- end}} } func _doubleGeneric(z, x *{{.ElementName}}) { + {{- if eq .NbWords 1}} + if x[0] & (1 << 63) == (1 << 63) { + // if highest bit is set, then we have a carry to x + x, we shift and subtract q + z[0] = (x[0] << 1) - q + } else { + // highest bit is not set, but x + x can still be >= q + z[0] = (x[0] << 1) + if z[0] >= q { + z[0] -= q + } + } + {{- else}} {{ $hasCarry := or (not $.NoCarry) (gt $.NbWords 1)}} {{- if $hasCarry}} var carry uint64 @@ -462,16 +512,18 @@ func _doubleGeneric(z, x *{{.ElementName}}) { // if we overflowed the last addition, z >= q // if z >= q, z = z - q if carry != 0 { + var b uint64 // we overflowed, so z >= q - z[0], {{- if gt $.NbWords 1}}carry{{- else}}_{{- end}} = bits.Sub64(z[0], {{index $.Q 0}}, 0) - {{- range $i := .NbWordsIndexesNoZero}} - z[{{$i}}], carry = bits.Sub64(z[{{$i}}], {{index $.Q $i}}, carry) + {{- range $i := iterate 0 $.NbWords}} + {{- $hasBorrow := lt $i $.NbWordsLastIndex}} + z[{{$i}}], {{- if $hasBorrow}}b{{- else}}_{{- end}} = bits.Sub64(z[{{$i}}], {{index $.Q $i}}, {{- if eq $i 0}}0{{- else}}b{{- end}}) {{- end}} return } {{- end}} {{ template "reduce" .}} + {{- end}} } @@ -483,7 +535,7 @@ func _subGeneric(z, x, y *{{.ElementName}}) { {{- end}} if b != 0 { {{- if eq .NbWords 1}} - z[0], _ = bits.Add64(z[0], {{index $.Q 0}}, 0) + z[0] += q {{- else}} var c uint64 z[0], c = bits.Add64(z[0], {{index $.Q 0}}, 0) @@ -504,7 +556,7 @@ func _negGeneric(z, x *{{.ElementName}}) { return } {{- if eq .NbWords 1}} - z[0], _ = bits.Sub64({{index $.Q 0}}, x[0], 0) + z[0] = q - x[0] {{- else}} var borrow uint64 z[0], borrow = bits.Sub64({{index $.Q 0}}, x[0], 0) @@ -520,7 +572,7 @@ func _negGeneric(z, x *{{.ElementName}}) { func _reduceGeneric(z *{{.ElementName}}) { - {{ template "reduce" . }} + {{ template "reduce" . }} } func mulByConstant(z *{{.ElementName}}, c uint8) { @@ -600,26 +652,11 @@ func (z *{{.ElementName}}) BitLen() int { return bits.Len64(z[0]) } -{{ define "add_q" }} - // {{$.V1}} = {{$.V1}} + q - {{- if eq .all.NbWordsLastIndex 0}} - {{$.V1}}[0], _ = bits.Add64({{$.V1}}[0], {{index $.all.Q 0}}, 0) - {{- else}} - var carry uint64 - {{$.V1}}[0], carry = bits.Add64({{$.V1}}[0], {{index $.all.Q 0}}, 0) - {{- range $i := .all.NbWordsIndexesNoZero}} - {{- if eq $i $.all.NbWordsLastIndex}} - {{$.V1}}[{{$i}}], _ = bits.Add64({{$.V1}}[{{$i}}], {{index $.all.Q $i}}, carry) - {{- else}} - {{$.V1}}[{{$i}}], carry = bits.Add64({{$.V1}}[{{$i}}], {{index $.all.Q $i}}, carry) - {{- end}} - {{- end}} - {{- end}} -{{ end }} + {{ define "rsh V nbWords" }} // {{$.V}} = {{$.V}} >> 1 - {{$lastIndex := sub .nbWords 1}} + {{- $lastIndex := sub .nbWords 1}} {{- range $i := iterate 0 $lastIndex}} {{$.V}}[{{$i}}] = {{$.V}}[{{$i}}] >> 1 | {{$.V}}[{{(add $i 1)}}] << 63 {{- end}} @@ -627,4 +664,5 @@ func (z *{{.ElementName}}) BitLen() int { {{ end }} + ` diff --git a/field/internal/templates/element/conv.go b/field/internal/templates/element/conv.go index dccf2ab5b5..6bf69f2b10 100644 --- a/field/internal/templates/element/conv.go +++ b/field/internal/templates/element/conv.go @@ -112,6 +112,13 @@ func (z *{{.ElementName}}) Marshal() []byte { // SetBytes interprets e as the bytes of a big-endian unsigned integer, // sets z to that value (in Montgomery form), and returns z. func (z *{{.ElementName}}) SetBytes(e []byte) *{{.ElementName}} { + {{- if eq .NbWords 1}} + if len(e) == 8 { + // fast path + z[0] = binary.BigEndian.Uint64(e) + return z.ToMont() + } + {{- end}} // get a big int from our pool vv := bigIntPool.Get().(*big.Int) vv.SetBytes(e) diff --git a/field/internal/templates/element/exp.go b/field/internal/templates/element/exp.go index 5221a3ac4d..be5080cfa3 100644 --- a/field/internal/templates/element/exp.go +++ b/field/internal/templates/element/exp.go @@ -1,18 +1,30 @@ package element const Exp = ` -// Exp z = x^exponent mod q -func (z *{{.ElementName}}) Exp(x {{.ElementName}}, exponent *big.Int) *{{.ElementName}} { - var bZero big.Int - if exponent.Cmp(&bZero) == 0 { +// Exp z = xᵏ mod q +func (z *{{.ElementName}}) Exp(x {{.ElementName}}, k *big.Int) *{{.ElementName}} { + if k.IsUint64() && k.Uint64() == 0 { return z.SetOne() } + e := k + if k.Sign() == -1 { + // negative k, we invert + // if k < 0: xᵏ mod q == (x⁻¹)ᵏ mod q + x.Inverse(&x) + + // we negate k in a temp big.Int since + // Int.Bit(_) of k and -k is different + e = bigIntPool.Get().(*big.Int) + defer bigIntPool.Put(e) + e.Neg(k) + } + z.Set(&x) - for i := exponent.BitLen() - 2; i >= 0; i-- { + for i := e.BitLen() - 2; i >= 0; i-- { z.Square(z) - if exponent.Bit(i) == 1 { + if e.Bit(i) == 1 { z.Mul(z, &x) } } diff --git a/field/internal/templates/element/inverse.go b/field/internal/templates/element/inverse.go index bffec9235d..3be7b10539 100644 --- a/field/internal/templates/element/inverse.go +++ b/field/internal/templates/element/inverse.go @@ -22,15 +22,84 @@ if b != 0 { {{/* We use big.Int for Inverse for these type of moduli */}} {{if not $UsingP20Inverse}} +{{- if eq .NbWords 1}} +// Inverse z = x⁻¹ mod q +// Algorithm 16 in "Efficient Software-Implementation of Finite Fields with Applications to Cryptography" +// if x == 0, sets and returns z = x +func (z *{{.ElementName}}) Inverse( x *{{.ElementName}}) *{{.ElementName}} { + const q uint64 = q{{.ElementName}}Word0 + if x.IsZero() { + z.SetZero() + return z + } + + var r,s,u,v uint64 + u = q // u = q + s = {{index .RSquare 0}} // s = r^2 + r = 0 + v = x[0] + + var carry, borrow uint64 + + for (u != 1) && (v != 1){ + for v&1 == 0 { + v >>= 1 + if s&1 == 0 { + s >>= 1 + } else { + s, carry = bits.Add64(s, q, 0) + s >>= 1 + if carry != 0 { + s |= (1 << 63) + } + } + } + for u&1 == 0 { + u >>= 1 + if r&1 == 0 { + r >>= 1 + } else { + r, carry = bits.Add64(r, q, 0) + r >>= 1 + if carry != 0 { + r |= (1 << 63) + } + } + } + if v >= u { + v -= u + s, borrow = bits.Sub64(s, r, 0) + if borrow == 1 { + s += q + } + } else { + u -= v + r, borrow = bits.Sub64(r, s, 0) + if borrow == 1 { + r += q + } + } + } + + if u == 1 { + z[0] = r + } else { + z[0] = s + } + + return z +} +{{- else}} // Inverse z = x⁻¹ mod q // note: allocates a big.Int (math/big) func (z *{{.ElementName}}) Inverse( x *{{.ElementName}}) *{{.ElementName}} { var _xNonMont big.Int - x.ToBigIntRegular( &_xNonMont) + x.ToBigIntRegular(&_xNonMont) _xNonMont.ModInverse(&_xNonMont, Modulus()) z.SetBigInt(&_xNonMont) return z } +{{- end}} {{ else }} @@ -225,15 +294,11 @@ func (z *{{.ElementName}}) Inverse(x *{{.ElementName}}) *{{.ElementName}} { return z } -var qMinusTwo *big.Int //test routines can set this to an incorrect value to fail whenever inverseExp was triggered - -// inverseExp is a fallback in case the inversion algorithm failed +// inverseExp computes z = x⁻¹ mod q = x**(q-2) mod q func (z *{{.ElementName}}) inverseExp(x *{{.ElementName}}) *{{.ElementName}} { - if qMinusTwo == nil { - qMinusTwo = Modulus() - qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) - } - return z.Exp(*x, qMinusTwo) + qMinusTwo := Modulus() + qMinusTwo.Sub(qMinusTwo, big.NewInt(2)) + return z.Exp(*x, qMinusTwo) } // approximate a big number x into a single 64 bit word using its uppermost and lowermost bits @@ -274,7 +339,9 @@ func (z *{{.ElementName}}) linearComb(x *{{.ElementName}}, xC int64, y *{{.Eleme // montReduceSigned z = (xHi * r + x) * r⁻¹ using the SOS algorithm // Requires |xHi| < 2⁶³. Most significant bit of xHi is the sign bit. func (z *{{.ElementName}}) montReduceSigned(x *{{.ElementName}}, xHi uint64) { - + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = {{index .QInverse 0}} + const signBitRemover = ^signBitSelector neg := xHi & signBitSelector != 0 // the SOS implementation requires that most significant bit is 0 @@ -325,7 +392,7 @@ func (z *{{.ElementName}}) montReduceSigned(x *{{.ElementName}}, xHi uint64) { {{- end}} z[{{.NbWordsLastIndex}}], z[{{sub .NbWordsLastIndex 1}}] = madd2(m, q{{.ElementName}}Word{{.NbWordsLastIndex}}, t[i+{{.NbWordsLastIndex}}], C) } - {{ template "reduce" . }} + {{ template "reduce" . }} // if neg { @@ -357,7 +424,7 @@ func (z *{{.ElementName}}) montReduceSignedSimpleButSlow(x *{{.ElementName}}, xH {{- end }} z[{{.NbWordsLastIndex}}], _ = bits.Add64(z[{{.NbWordsLastIndex}}], 0, c) - {{ template "reduce" . }} + {{ template "reduce" . }} } else { // The real input value is xHi r + x - 2⁶⁴r @@ -381,5 +448,13 @@ func (z *{{.ElementName}}) mulWSigned(x *{{.ElementName}}, y int64) { z.Neg(z) } } + +func _mulWGeneric(z,x *{{.ElementName}}, y uint64) { + {{ template "mul_nocarry_v2" dict "all" . "V2" "x"}} + {{ template "reduce" . }} +} + + {{ end }} + ` diff --git a/field/internal/templates/element/mul_cios.go b/field/internal/templates/element/mul_cios.go index 936e2188d6..be671e9689 100644 --- a/field/internal/templates/element/mul_cios.go +++ b/field/internal/templates/element/mul_cios.go @@ -50,11 +50,7 @@ const MulCIOS = ` z[{{$i}}], b = bits.Sub64(t[{{$i}}], {{index $.all.Q $i}}, b) {{- end}} {{- end}} - {{if $.NoReturn }} return - {{else}} - return z - {{end}} } // copy t into z @@ -63,4 +59,24 @@ const MulCIOS = ` {{- end}} {{ end }} + +{{ define "mul_cios_one_limb" }} + // CIOS multiplication + // Used for Montgomery reduction. (qInvNeg) q + r'.r = 1, i.e., qInvNeg = - q⁻¹ mod r + const qInvNegLsw uint64 = {{index $.all.QInverse 0}} + + var r uint64 + hi, lo := bits.Mul64({{$.V1}}[0], {{$.V2}}[0]) + m := lo * qInvNegLsw + hi2, lo2 := bits.Mul64(m, q) + _, carry := bits.Add64(lo2, lo, 0) + r, carry = bits.Add64(hi2, hi, carry) + + if carry != 0 || r >= q { + // we need to reduce + r -= q + + } + z[0] = r +{{ end }} ` diff --git a/field/internal/templates/element/ops_generic.go b/field/internal/templates/element/ops_generic.go index 5546c7e26e..3933ba42b3 100644 --- a/field/internal/templates/element/ops_generic.go +++ b/field/internal/templates/element/ops_generic.go @@ -12,7 +12,13 @@ const OpsNoAsm = ` // MulBy{{$i}} x *= {{$i}} func MulBy{{$i}}(x *{{$.ElementName}}) { + {{- if eq 1 $.NbWords}} + var y {{$.ElementName}} + y.SetUint64({{$i}}) + x.Mul(x, &y) + {{- else}} mulByConstant(x, {{$i}}) + {{- end}} } {{- end}} @@ -25,9 +31,11 @@ func Butterfly(a, b *{{.ElementName}}) { _butterflyGeneric(a, b) } +{{- if ne .NbWords 1}} func mul(z, x, y *{{.ElementName}}) { _mulGeneric(z, x, y) } +{{- end}} // FromMont converts z in place (i.e. mutates) from Montgomery to regular representation diff --git a/field/internal/templates/element/reduce.go b/field/internal/templates/element/reduce.go index 605bbc84f3..aefd25bd39 100644 --- a/field/internal/templates/element/reduce.go +++ b/field/internal/templates/element/reduce.go @@ -2,8 +2,13 @@ package element const Reduce = ` {{ define "reduce" }} -// if z > q → z -= q +// if z >= q → z -= q // note: this is NOT constant time +{{- if eq $.NbWords 1}} +if z[0] >= q { + z[0] -= q +} +{{- else}} if !({{- range $i := reverse .NbWordsIndexesNoZero}} z[{{$i}}] < {{index $.Q $i}} || ( z[{{$i}}] == {{index $.Q $i}} && ( {{- end}}z[0] < {{index $.Q 0}} {{- range $i := .NbWordsIndexesNoZero}} )) {{- end}} ){ {{- if eq $.NbWordsLastIndex 0}} @@ -21,5 +26,6 @@ if !({{- range $i := reverse .NbWordsIndexesNoZero}} z[{{$i}}] < {{index $.Q $i} {{- end}} } {{- end }} +{{- end }} ` diff --git a/field/internal/templates/element/tests.go b/field/internal/templates/element/tests.go index 67a825debd..fff1f20cc6 100644 --- a/field/internal/templates/element/tests.go +++ b/field/internal/templates/element/tests.go @@ -11,10 +11,12 @@ import ( "math/big" "math/bits" "fmt" - {{if $UsingP20Inverse}} mrand "math/rand" {{end}} - "testing" - + {{if $UsingP20Inverse}} "github.com/consensys/gnark-crypto/field" + mrand "math/rand" + {{end}} + "testing" + "github.com/leanovate/gopter" "github.com/leanovate/gopter/prop" ggen "github.com/leanovate/gopter/gen" @@ -326,12 +328,20 @@ func init() { staticTestValues = append(staticTestValues, a) } + {{- if ne .NbWords 1}} { a := q{{.ElementName}} a[{{.NbWordsLastIndex}}]-- a[0]++ staticTestValues = append(staticTestValues, a) } + {{- end}} + + { + a := q{{.ElementName}} + a[{{.NbWordsLastIndex}}] = 0 + staticTestValues = append(staticTestValues, a) + } { a := q{{.ElementName}} @@ -378,13 +388,15 @@ func Test{{toTitle .ElementName}}Reduce(t *testing.T) { )) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly - if supportAdx { - t.Log("disabling ADX") - supportAdx = false - properties.TestingRun(t, gopter.ConsoleReporter(false)) - supportAdx = true - } + {{- if .ASM}} + // if we have ADX instruction enabled, test both path in assembly + if supportAdx { + t.Log("disabling ADX") + supportAdx = false + properties.TestingRun(t, gopter.ConsoleReporter(false)) + supportAdx = true + } + {{- end}} } @@ -478,13 +490,15 @@ func Test{{toTitle .ElementName}}InverseExp(t *testing.T) { properties.Property("inv(0) == 0", prop.ForAll(invMatchExp, ggen.OneConstOf(testPair{{.ElementName}}{}))) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly - if supportAdx { - t.Log("disabling ADX") - supportAdx = false - properties.TestingRun(t, gopter.ConsoleReporter(false)) - supportAdx = true - } + {{- if .ASM}} + // if we have ADX instruction enabled, test both path in assembly + if supportAdx { + t.Log("disabling ADX") + supportAdx = false + properties.TestingRun(t, gopter.ConsoleReporter(false)) + supportAdx = true + } + {{- end}} } @@ -572,13 +586,15 @@ func Test{{toTitle .ElementName}}MulByConstants(t *testing.T) { )) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly - if supportAdx { - t.Log("disabling ADX") - supportAdx = false - properties.TestingRun(t, gopter.ConsoleReporter(false)) - supportAdx = true - } + {{- if .ASM}} + // if we have ADX instruction enabled, test both path in assembly + if supportAdx { + t.Log("disabling ADX") + supportAdx = false + properties.TestingRun(t, gopter.ConsoleReporter(false)) + supportAdx = true + } + {{- end}} } @@ -603,13 +619,15 @@ func Test{{toTitle .ElementName}}Legendre(t *testing.T) { )) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly - if supportAdx { - t.Log("disabling ADX") - supportAdx = false - properties.TestingRun(t, gopter.ConsoleReporter(false)) - supportAdx = true - } + {{- if .ASM}} + // if we have ADX instruction enabled, test both path in assembly + if supportAdx { + t.Log("disabling ADX") + supportAdx = false + properties.TestingRun(t, gopter.ConsoleReporter(false)) + supportAdx = true + } + {{- end}} } @@ -642,13 +660,15 @@ func Test{{toTitle .ElementName}}Butterflies(t *testing.T) { )) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly - if supportAdx { - t.Log("disabling ADX") - supportAdx = false - properties.TestingRun(t, gopter.ConsoleReporter(false)) - supportAdx = true - } + {{- if .ASM}} + // if we have ADX instruction enabled, test both path in assembly + if supportAdx { + t.Log("disabling ADX") + supportAdx = false + properties.TestingRun(t, gopter.ConsoleReporter(false)) + supportAdx = true + } + {{- end}} } @@ -685,13 +705,15 @@ func Test{{toTitle .ElementName}}LexicographicallyLargest(t *testing.T) { )) properties.TestingRun(t, gopter.ConsoleReporter(false)) - // if we have ADX instruction enabled, test both path in assembly - if supportAdx { - t.Log("disabling ADX") - supportAdx = false - properties.TestingRun(t, gopter.ConsoleReporter(false)) - supportAdx = true - } + {{- if .ASM}} + // if we have ADX instruction enabled, test both path in assembly + if supportAdx { + t.Log("disabling ADX") + supportAdx = false + properties.TestingRun(t, gopter.ConsoleReporter(false)) + supportAdx = true + } + {{- end}} } @@ -890,14 +912,16 @@ func Test{{toTitle .all.ElementName}}{{.Op}}(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() - // if we have ADX instruction enabled, test both path in assembly - if supportAdx { - t.Log("disabling ADX") - supportAdx = false - properties.TestingRun(t, gopter.ConsoleReporter(false)) - specialValueTest() - supportAdx = true - } + {{- if $.all.ASM}} + // if we have ADX instruction enabled, test both path in assembly + if supportAdx { + t.Log("disabling ADX") + supportAdx = false + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() + supportAdx = true + } + {{- end}} } {{ end }} @@ -1022,14 +1046,16 @@ func Test{{toTitle .all.ElementName}}{{.Op}}(t *testing.T) { properties.TestingRun(t, gopter.ConsoleReporter(false)) specialValueTest() - // if we have ADX instruction enabled, test both path in assembly - if supportAdx { - supportAdx = false - t.Log("disabling ADX") - properties.TestingRun(t, gopter.ConsoleReporter(false)) - specialValueTest() - supportAdx = true - } + {{- if $.all.ASM}} + // if we have ADX instruction enabled, test both path in assembly + if supportAdx { + t.Log("disabling ADX") + supportAdx = false + properties.TestingRun(t, gopter.ConsoleReporter(false)) + specialValueTest() + supportAdx = true + } + {{- end}} } {{ end }} @@ -1280,6 +1306,139 @@ properties.Property("z.SetInterface must match z.SetString with {{.tName}}", pro {{end}} +func Test{{toTitle .ElementName}}NegativeExp(t *testing.T) { + t.Parallel() + + parameters := gopter.DefaultTestParameters() + if testing.Short() { + parameters.MinSuccessfulTests = nbFuzzShort + } else { + parameters.MinSuccessfulTests = nbFuzz + } + + properties := gopter.NewProperties(parameters) + + genA := gen() + + properties.Property("x⁻ᵏ == 1/xᵏ", prop.ForAll( + func(a,b testPair{{.ElementName}}) bool { + + var nb, d, e big.Int + nb.Neg(&b.bigint) + + var c {{.ElementName}} + c.Exp(a.element, &nb) + + d.Exp(&a.bigint, &nb, Modulus()) + + return c.FromMont().ToBigInt(&e).Cmp(&d) == 0 + }, + genA, genA, + )) + + + properties.TestingRun(t, gopter.ConsoleReporter(false)) +} + + + +func Test{{toTitle .ElementName}}BatchInvert(t *testing.T) { + assert := require.New(t) + + t.Parallel() + + // ensure batchInvert([x]) == invert(x) + for i:=int64(-1); i <=2; i++ { + var e, eInv {{.ElementName}} + e.SetInt64(i) + eInv.Inverse(&e) + + a := []{{.ElementName}}{e} + aInv := BatchInvert(a) + + assert.True(aInv[0].Equal(&eInv), "batchInvert != invert") + + } + + // test x * x⁻¹ == 1 + tData := [][]int64 { + []int64{-1,1,2,3}, + []int64{0, -1,1,2,3, 0}, + []int64{0, -1,1,0, 2,3, 0}, + []int64{-1,1,0, 2,3}, + []int64{0,0,1}, + []int64{1,0,0}, + []int64{0,0,0}, + } + + for _, t := range tData { + a := make([]{{.ElementName}}, len(t)) + for i:=0; i x * x⁻¹ == 1", prop.ForAll( + func(tp testPair{{.ElementName}}, r uint8) bool { + + a := make([]{{.ElementName}}, r) + if r != 0 { + a[0] = tp.element + + } + one := One() + for i:=1; i