diff --git a/accounts/external/backend.go b/accounts/external/backend.go index 52cb7923e242..73b0a0627c13 100644 --- a/accounts/external/backend.go +++ b/accounts/external/backend.go @@ -222,10 +222,14 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap()) args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap()) case types.BlobTxType: - hashes, _, blobs := tx.BlobWrapData() + hashes, _, blobs, aggProof := tx.BlobWrapData() if len(hashes) != len(blobs) { return nil, fmt.Errorf("missing blobs data, expected %d blobs", len(hashes)) } + var z types.KZGProof + if aggProof == z { + return nil, fmt.Errorf("missing aggregated proof in blobs") + } args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap()) args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap()) args.Blobs = blobs diff --git a/cmd/devp2p/internal/ethtest/transaction.go b/cmd/devp2p/internal/ethtest/transaction.go index 5d722f417a22..821f7b6ca8e7 100644 --- a/cmd/devp2p/internal/ethtest/transaction.go +++ b/cmd/devp2p/internal/ethtest/transaction.go @@ -66,7 +66,7 @@ func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction return fmt.Errorf("peering failed: %v", err) } // Send the transaction - if err = sendConn.Write(&Transactions{tx}); err != nil { + if err = sendConn.Write(&Transactions{types.NewNetworkTransaction(tx)}); err != nil { return fmt.Errorf("failed to write to connection: %v", err) } // peer receiving connection to node @@ -82,7 +82,7 @@ func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction recTxs := *msg // if you receive an old tx propagation, read from connection again if len(recTxs) == 1 && prevTx != nil { - if recTxs[0] == prevTx { + if recTxs[0].Tx == prevTx { continue } } @@ -168,7 +168,7 @@ func sendMaliciousTx(s *Suite, tx *types.Transaction, isEth66 bool) error { return fmt.Errorf("peering failed: %v", err) } // write malicious tx - if err = conn.Write(&Transactions{tx}); err != nil { + if err = conn.Write(&Transactions{types.NewNetworkTransaction(tx)}); err != nil { return fmt.Errorf("failed to write to connection: %v", err) } return nil @@ -179,7 +179,11 @@ var nonce = uint64(99) // sendMultipleSuccessfulTxs sends the given transactions to the node and // expects the node to accept and propagate them. func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, txs []*types.Transaction) error { - txMsg := Transactions(txs) + ntxs := make([]*types.NetworkTransaction, len(txs)) + for i := range txs { + ntxs[i] = types.NewNetworkTransaction(txs[i]) + } + txMsg := Transactions(ntxs) t.Logf("sending %d txs\n", len(txs)) sendConn, recvConn, err := s.createSendAndRecvConns(true) diff --git a/core/beacon/types.go b/core/beacon/types.go index ecdbd72552db..026a24a37c8a 100644 --- a/core/beacon/types.go +++ b/core/beacon/types.go @@ -17,6 +17,7 @@ package beacon import ( + "errors" "fmt" "math/big" @@ -42,9 +43,10 @@ type payloadAttributesMarshaling struct { // BlobsBundleV1 holds the blobs of an execution payload, to be retrieved separately type BlobsBundleV1 struct { - BlockHash common.Hash `json:"blockHash" gencodec:"required"` - KZGs []types.KZGCommitment `json:"kzgs" gencodec:"required"` - Blobs []types.Blob `json:"blobs" gencodec:"required"` + BlockHash common.Hash `json:"blockHash" gencodec:"required"` + KZGs []types.KZGCommitment `json:"kzgs" gencodec:"required"` + Blobs []types.Blob `json:"blobs" gencodec:"required"` + AggregatedProof types.KZGProof `json:"aggregatedProof" gencodec:"required"` } //go:generate go run github.com/fjl/gencodec -type ExecutableDataV1 -field-override executableDataMarshaling -out gen_ed.go @@ -213,13 +215,18 @@ func BlockToBlobData(block *types.Block) (*BlobsBundleV1, error) { blobsBundle := &BlobsBundleV1{BlockHash: blockHash} for i, tx := range block.Transactions() { if tx.Type() == types.BlobTxType { - versionedHashes, kzgs, blobs := tx.BlobWrapData() + versionedHashes, kzgs, blobs, aggProof := tx.BlobWrapData() if len(versionedHashes) != len(kzgs) || len(versionedHashes) != len(blobs) { return nil, fmt.Errorf("tx %d in block %s has inconsistent blobs (%d) / kzgs (%d)"+ " / versioned hashes (%d)", i, blockHash, len(blobs), len(kzgs), len(versionedHashes)) } + var zProof types.KZGProof + if zProof == aggProof { + return nil, errors.New("aggregated proof is not available in blobs") + } blobsBundle.Blobs = append(blobsBundle.Blobs, blobs...) blobsBundle.KZGs = append(blobsBundle.KZGs, kzgs...) + blobsBundle.AggregatedProof = aggProof } } return blobsBundle, nil diff --git a/core/tx_pool.go b/core/tx_pool.go index 91af2e198f14..93b547fe2458 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -30,7 +30,6 @@ import ( "github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto/kzg" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -1012,24 +1011,9 @@ func (pool *TxPool) filterInvalidTxsLocked(txs []*types.Transaction, errs []erro // filterInvalidBlobTxsLocked marks all blob txs (if any) with an error if the blobs or kzg commitments are invalid func (pool *TxPool) filterInvalidBlobTxsLocked(txs []*types.Transaction, errs []error) { - // We batch multiple transactions together. - var batchVerify kzg.BlobsBatch for i, tx := range txs { - if errs[i] != nil { - continue - } - errs[i] = tx.VerifyBlobsBatched(batchVerify.Join) - } - if err := batchVerify.Verify(); err != nil { - // we'll have to verify each individual blob tx (can still use batch per tx) - // to not throw away the good ones because of some bad tx. - for i, tx := range txs { - if errs[i] != nil { - continue - } - // all blobs within the tx can still be batched together - errs[i] = tx.VerifyBlobsBatched(kzg.VerifyBlobs) - } + // all blobs within the tx can still be batched together + errs[i] = tx.VerifyBlobs() } } diff --git a/core/types/data_blob.go b/core/types/data_blob.go index 87f37dd2abd4..dcbf856f5289 100644 --- a/core/types/data_blob.go +++ b/core/types/data_blob.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "math/big" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -71,6 +72,52 @@ func (kzg KZGCommitment) ComputeVersionedHash() common.Hash { return h } +// Compressed BLS12-381 G1 element +type KZGProof [48]byte + +func (p *KZGProof) Deserialize(dr *codec.DecodingReader) error { + if p == nil { + return errors.New("nil pubkey") + } + _, err := dr.Read(p[:]) + return err +} + +func (p *KZGProof) Serialize(w *codec.EncodingWriter) error { + return w.Write(p[:]) +} + +func (KZGProof) ByteLength() uint64 { + return 48 +} + +func (KZGProof) FixedLength() uint64 { + return 48 +} + +func (p KZGProof) HashTreeRoot(hFn tree.HashFn) tree.Root { + var a, b tree.Root + copy(a[:], p[0:32]) + copy(b[:], p[32:48]) + return hFn(a, b) +} + +func (p KZGProof) MarshalText() ([]byte, error) { + return []byte("0x" + hex.EncodeToString(p[:])), nil +} + +func (p KZGProof) String() string { + return "0x" + hex.EncodeToString(p[:]) +} + +func (p *KZGProof) UnmarshalText(text []byte) error { + return hexutil.UnmarshalFixedText("KZGProof", text, p[:]) +} + +func (p *KZGProof) Point() (*bls.G1Point, error) { + return bls.FromCompressedG1(p[:]) +} + type BLSFieldElement [32]byte func (p BLSFieldElement) MarshalText() ([]byte, error) { @@ -306,22 +353,76 @@ func (blobs Blobs) ComputeCommitments() (commitments []KZGCommitment, versionedH return commitments, versionedHashes, true } +// Return KZG commitments, versioned hashes and the aggregated KZG proof that correspond to these blobs +func (blobs Blobs) ComputeCommitmentsAndAggregatedProof() (commitments []KZGCommitment, versionedHashes []common.Hash, aggregatedProof KZGProof, err error) { + commitments = make([]KZGCommitment, len(blobs)) + versionedHashes = make([]common.Hash, len(blobs)) + for i, blob := range blobs { + var ok bool + commitments[i], ok = blob.ComputeCommitment() + if !ok { + return nil, nil, KZGProof{}, errors.New("invalid blob for commitment") + } + versionedHashes[i] = commitments[i].ComputeVersionedHash() + } + + var kzgProof KZGProof + if len(blobs) != 0 { + aggregatePoly, aggregateCommitmentG1, err := computeAggregateKzgCommitment(blobs, commitments) + if err != nil { + return nil, nil, KZGProof{}, err + } + + var aggregateCommitment KZGCommitment + copy(aggregateCommitment[:], bls.ToCompressedG1(aggregateCommitmentG1)) + + var aggregateBlob Blob + for i := range aggregatePoly { + aggregateBlob[i] = bls.FrTo32(&aggregatePoly[i]) + } + root := tree.GetHashFn().HashTreeRoot(&aggregateBlob, &aggregateCommitment) + var z bls.Fr + hashToFr(&z, root) + + var y bls.Fr + kzg.EvaluatePolyInEvaluationForm(&y, aggregatePoly[:], &z) + + aggProofG1, err := kzg.ComputeProof(aggregatePoly, &z) + if err != nil { + return nil, nil, KZGProof{}, err + } + copy(kzgProof[:], bls.ToCompressedG1(aggProofG1)) + } + + return commitments, versionedHashes, kzgProof, nil +} + +type randomChallengeHasher struct { + b Blobs + c BlobKzgs +} + +func (h *randomChallengeHasher) HashTreeRoot(hFn tree.HashFn) tree.Root { + return hFn.HashTreeRoot(&h.b, &h.c) +} + type BlobTxWrapper struct { - Tx SignedBlobTx - BlobKzgs BlobKzgs - Blobs Blobs + Tx SignedBlobTx + BlobKzgs BlobKzgs + Blobs Blobs + KzgAggregatedProof KZGProof } func (txw *BlobTxWrapper) Deserialize(dr *codec.DecodingReader) error { - return dr.Container(&txw.Tx, &txw.BlobKzgs, &txw.Blobs) + return dr.Container(&txw.Tx, &txw.BlobKzgs, &txw.Blobs, &txw.KzgAggregatedProof) } func (txw *BlobTxWrapper) Serialize(w *codec.EncodingWriter) error { - return w.Container(&txw.Tx, &txw.BlobKzgs, &txw.Blobs) + return w.Container(&txw.Tx, &txw.BlobKzgs, &txw.Blobs, &txw.KzgAggregatedProof) } func (txw *BlobTxWrapper) ByteLength() uint64 { - return codec.ContainerLength(&txw.Tx, &txw.BlobKzgs, &txw.Blobs) + return codec.ContainerLength(&txw.Tx, &txw.BlobKzgs, &txw.Blobs, &txw.KzgAggregatedProof) } func (txw *BlobTxWrapper) FixedLength() uint64 { @@ -329,19 +430,20 @@ func (txw *BlobTxWrapper) FixedLength() uint64 { } func (txw *BlobTxWrapper) HashTreeRoot(hFn tree.HashFn) tree.Root { - return hFn.HashTreeRoot(&txw.Tx, &txw.BlobKzgs, &txw.Blobs) + return hFn.HashTreeRoot(&txw.Tx, &txw.BlobKzgs, &txw.Blobs, &txw.KzgAggregatedProof) } type BlobTxWrapData struct { - BlobKzgs BlobKzgs - Blobs Blobs + BlobKzgs BlobKzgs + Blobs Blobs + KzgAggregatedProof KZGProof } func (b *BlobTxWrapData) sizeWrapData() common.StorageSize { - return common.StorageSize(4 + 4 + b.BlobKzgs.ByteLength() + b.Blobs.ByteLength()) + return common.StorageSize(4 + 4 + b.BlobKzgs.ByteLength() + b.Blobs.ByteLength() + b.KzgAggregatedProof.ByteLength()) } -func (b *BlobTxWrapData) verifyBlobsBatched(inner TxData, joinBatch JoinBlobBatchVerify) error { +func (b *BlobTxWrapData) verifyVersionedHash(inner TxData) error { blobTx, ok := inner.(*SignedBlobTx) if !ok { return fmt.Errorf("expected signed blob tx, got %T", inner) @@ -360,24 +462,47 @@ func (b *BlobTxWrapData) verifyBlobsBatched(inner TxData, joinBatch JoinBlobBatc return fmt.Errorf("versioned hash %d supposedly %s but does not match computed %s", i, h, computed) } } + return nil +} - // Time to verify that the KZG commitments match the included blobs: - // first extract crypto material out of our types and pass them to the crypto layer - commitments, err := b.BlobKzgs.Parse() +// Blob verification using KZG proofs +func (b *BlobTxWrapData) verifyBlobs(inner TxData) error { + if err := b.verifyVersionedHash(inner); err != nil { + return err + } + + aggregatePoly, aggregateCommitmentG1, err := computeAggregateKzgCommitment(b.Blobs, b.BlobKzgs) if err != nil { - return fmt.Errorf("commitments parse error: %v", err) + return fmt.Errorf("failed to compute aggregate commitment: %v", err) + } + var aggregateBlob Blob + for i := range aggregatePoly { + aggregateBlob[i] = bls.FrTo32(&aggregatePoly[i]) } - blobs, err := b.Blobs.Parse() + var aggregateCommitment KZGCommitment + copy(aggregateCommitment[:], bls.ToCompressedG1(aggregateCommitmentG1)) + root := tree.GetHashFn().HashTreeRoot(&aggregateBlob, &aggregateCommitment) + var z bls.Fr + hashToFr(&z, root) + + var y bls.Fr + kzg.EvaluatePolyInEvaluationForm(&y, aggregatePoly[:], &z) + + aggregateProofG1, err := b.KzgAggregatedProof.Point() if err != nil { - return fmt.Errorf("blobs parse error: %v", err) + return fmt.Errorf("aggregate proof parse error: %v", err) + } + if !kzg.VerifyKzgProof(aggregateCommitmentG1, &z, &y, aggregateProofG1) { + return errors.New("failed to verify kzg") } - return joinBatch(commitments, blobs) + return nil } func (b *BlobTxWrapData) copy() TxWrapData { return &BlobTxWrapData{ - BlobKzgs: b.BlobKzgs.copy(), - Blobs: b.Blobs.copy(), + BlobKzgs: b.BlobKzgs.copy(), + Blobs: b.Blobs.copy(), + KzgAggregatedProof: b.KzgAggregatedProof, } } @@ -389,6 +514,10 @@ func (b *BlobTxWrapData) blobs() Blobs { return b.Blobs } +func (b *BlobTxWrapData) aggregatedProof() KZGProof { + return b.KzgAggregatedProof +} + func (b *BlobTxWrapData) encodeTyped(w io.Writer, txdata TxData) error { if _, err := w.Write([]byte{BlobTxType}); err != nil { return err @@ -398,9 +527,52 @@ func (b *BlobTxWrapData) encodeTyped(w io.Writer, txdata TxData) error { return fmt.Errorf("expected signed blob tx, got %T", txdata) } wrapped := BlobTxWrapper{ - Tx: *blobTx, - BlobKzgs: b.BlobKzgs, - Blobs: b.Blobs, + Tx: *blobTx, + BlobKzgs: b.BlobKzgs, + Blobs: b.Blobs, + KzgAggregatedProof: b.KzgAggregatedProof, } return EncodeSSZ(w, &wrapped) } + +func computePowers(r *bls.Fr, n int) []bls.Fr { + var currentPower bls.Fr + bls.AsFr(¤tPower, 1) + powers := make([]bls.Fr, n) + for i := range powers { + powers[i] = currentPower + bls.MulModFr(¤tPower, ¤tPower, r) + } + return powers +} + +func computeAggregateKzgCommitment(blobs Blobs, commitments []KZGCommitment) ([]bls.Fr, *bls.G1Point, error) { + // create challenges + hasher := randomChallengeHasher{blobs, commitments} + root := hasher.HashTreeRoot(tree.GetHashFn()) + var r bls.Fr + hashToFr(&r, root) + + powers := computePowers(&r, len(blobs)) + + commitmentsG1 := make([]bls.G1Point, len(commitments)) + for i := 0; i < len(commitmentsG1); i++ { + p, _ := commitments[i].Point() + bls.CopyG1(&commitmentsG1[i], p) + } + aggregateCommitmentG1 := bls.LinCombG1(commitmentsG1, powers) + var aggregateCommitment KZGCommitment + copy(aggregateCommitment[:], bls.ToCompressedG1(aggregateCommitmentG1)) + + polys, err := blobs.Parse() + if err != nil { + return nil, nil, err + } + aggregatePoly := kzg.MatrixLinComb(polys, powers) + return aggregatePoly, aggregateCommitmentG1, nil +} + +func hashToFr(out *bls.Fr, root tree.Root) { + zB := new(big.Int).Mod(new(big.Int).SetBytes(root[:]), kzg.BLSModulus) + kzg.BigToFr(out, zB) +} diff --git a/core/types/transaction.go b/core/types/transaction.go index b109220c9928..2563f8a5ec9b 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -25,7 +25,6 @@ import ( "sync/atomic" "time" - "github.com/protolambda/go-kzg/bls" "github.com/protolambda/ztyp/codec" "github.com/ethereum/go-ethereum/common" @@ -91,19 +90,14 @@ func WithTxWrapData(wrapData TxWrapData) TxOption { } } -// JoinBlobBatchVerify adds the kzgs and blobs to a batch for verification. -// The commitments and blobs must have equal length and should be valid points and field elements. -// -// An early error may be returned if the input is verified immediately. -type JoinBlobBatchVerify func(kzgs []*bls.G1Point, blobs [][]bls.Fr) error - type TxWrapData interface { copy() TxWrapData kzgs() BlobKzgs blobs() Blobs + aggregatedProof() KZGProof encodeTyped(w io.Writer, txdata TxData) error sizeWrapData() common.StorageSize - verifyBlobsBatched(inner TxData, joinBatch JoinBlobBatchVerify) error + verifyBlobs(inner TxData) error } // TxData is the underlying data of a transaction. @@ -273,7 +267,7 @@ func (tx *Transaction) decodeTyped(b []byte) (TxData, TxWrapData, error) { case BlobTxType: var wrapped BlobTxWrapper err := DecodeSSZ(b[1:], &wrapped) - return &wrapped.Tx, &BlobTxWrapData{BlobKzgs: wrapped.BlobKzgs, Blobs: wrapped.Blobs}, err + return &wrapped.Tx, &BlobTxWrapData{BlobKzgs: wrapped.BlobKzgs, Blobs: wrapped.Blobs, KzgAggregatedProof: wrapped.KzgAggregatedProof}, err default: minimal, err := tx.decodeTypedMinimal(b) return minimal, nil, err @@ -516,25 +510,23 @@ func (tx *Transaction) IsIncomplete() bool { return tx.Type() == BlobTxType && tx.wrapData == nil } -// VerifyBlobsBatched runs basic pre-verification and then joins the batch if pre-verification is valid. -// The batch should be verified for the blobs to really be valid. -// A transaction without blobs does not join the batch verification. -func (tx *Transaction) VerifyBlobsBatched(joinBatch JoinBlobBatchVerify) error { +// VerifyBlobs verifies the blob transaction +func (tx *Transaction) VerifyBlobs() error { if tx.wrapData != nil { - return tx.wrapData.verifyBlobsBatched(tx.inner, joinBatch) + return tx.wrapData.verifyBlobs(tx.inner) } return nil } // BlobWrapData returns the blob and kzg data, if any. // kzgs and blobs may be empty if the transaction is not wrapped. -func (tx *Transaction) BlobWrapData() (versionedHashes []common.Hash, kzgs BlobKzgs, blobs Blobs) { +func (tx *Transaction) BlobWrapData() (versionedHashes []common.Hash, kzgs BlobKzgs, blobs Blobs, aggProof KZGProof) { if blobWrap, ok := tx.wrapData.(*BlobTxWrapData); ok { if signedBlobTx, ok := tx.inner.(*SignedBlobTx); ok { - return signedBlobTx.Message.BlobVersionedHashes, blobWrap.BlobKzgs, blobWrap.Blobs + return signedBlobTx.Message.BlobVersionedHashes, blobWrap.BlobKzgs, blobWrap.Blobs, blobWrap.KzgAggregatedProof } } - return nil, nil, nil + return nil, nil, nil, KZGProof{} } // WithSignature returns a new transaction with the given signature. diff --git a/core/types/transaction_marshalling.go b/core/types/transaction_marshalling.go index 0d8212fedcc9..df05e9198e7d 100644 --- a/core/types/transaction_marshalling.go +++ b/core/types/transaction_marshalling.go @@ -22,7 +22,6 @@ import ( "fmt" "math/big" - "github.com/ethereum/go-ethereum/crypto/kzg" "github.com/protolambda/ztyp/view" "github.com/ethereum/go-ethereum/common" @@ -54,6 +53,7 @@ type txJSON struct { BlobVersionedHashes []common.Hash `json:"blobVersionedHashes,omitempty"` Blobs Blobs `json:"blobs,omitempty"` BlobKzgs BlobKzgs `json:"blobKzgs,omitempty"` + KzgAggregatedProof KZGProof `json:"kzgAggregatedProof,omitempty"` // Only used for encoding: Hash common.Hash `json:"hash"` @@ -123,6 +123,7 @@ func (t *Transaction) MarshalJSON() ([]byte, error) { } enc.Blobs = t.wrapData.blobs() enc.BlobKzgs = t.wrapData.kzgs() + enc.KzgAggregatedProof = t.wrapData.aggregatedProof() } return json.Marshal(&enc) } @@ -349,11 +350,12 @@ func (t *Transaction) UnmarshalJSON(input []byte) error { } itx.Message.BlobVersionedHashes = dec.BlobVersionedHashes t.wrapData = &BlobTxWrapData{ - BlobKzgs: dec.BlobKzgs, - Blobs: dec.Blobs, + BlobKzgs: dec.BlobKzgs, + Blobs: dec.Blobs, + KzgAggregatedProof: dec.KzgAggregatedProof, } // Verify that versioned hashes match kzgs, and kzgs match blobs. - if err := t.wrapData.verifyBlobsBatched(&itx, kzg.VerifyBlobs); err != nil { + if err := t.wrapData.verifyBlobs(&itx); err != nil { return fmt.Errorf("blob wrapping data is invalid: %v", err) } default: diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index 06f050de2ca8..341a451453a6 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -493,8 +493,9 @@ func TestTransactionCoding(t *testing.T) { }, } wrapData = &BlobTxWrapData{ - BlobKzgs: BlobKzgs{KZGCommitment{0: 0xc0}}, - Blobs: Blobs{Blob{}}, + BlobKzgs: BlobKzgs{KZGCommitment{0: 0xc0}}, + Blobs: Blobs{Blob{}}, + KzgAggregatedProof: KZGProof{0: 0xd0}, } } tx, err := SignNewTx(key, signer, txdata, WithTxWrapData(wrapData)) diff --git a/core/types/types_test.go b/core/types/types_test.go index f523928d5a87..3db8221d2a8e 100644 --- a/core/types/types_test.go +++ b/core/types/types_test.go @@ -136,8 +136,9 @@ func benchRLP(b *testing.B, encode bool) { BlobVersionedHashes: VersionedHashesView{common.Hash{0xaa}}, }, }, WithTxWrapData(&BlobTxWrapData{ - BlobKzgs: BlobKzgs{KZGCommitment{0xbb}}, - Blobs: Blobs{Blob{}}, + BlobKzgs: BlobKzgs{KZGCommitment{0xbb}}, + Blobs: Blobs{Blob{}}, + KzgAggregatedProof: KZGProof{0xbc}, })), }, } { diff --git a/crypto/kzg/kzg.go b/crypto/kzg/kzg.go index d745ab58635e..3ae17e55438b 100644 --- a/crypto/kzg/kzg.go +++ b/crypto/kzg/kzg.go @@ -4,9 +4,11 @@ import ( "encoding/json" "errors" "fmt" + "math/big" "sync" "github.com/ethereum/go-ethereum/params" + "github.com/protolambda/go-kzg/bls" ) @@ -114,7 +116,7 @@ func (batch *BlobsBatch) Verify() error { // By regrouping the above equation around the `L` points we can reduce the length of the MSM further // (down to just `n` scalar multiplications) by making it look like this: // (r_0*b0_0 + r_1*b1_0 + r_2*b2_0) * L_0 + (r_0*b0_1 + r_1*b1_1 + r_2*b2_1) * L_1 -func VerifyBlobs(commitments []*bls.G1Point, blobs [][]bls.Fr) error { +func VerifyBlobsLegacy(commitments []*bls.G1Point, blobs [][]bls.Fr) error { // Prepare objects to hold our two MSMs lPoints := make([]bls.G1Point, params.FieldElementsPerBlob) lScalars := make([]bls.Fr, params.FieldElementsPerBlob) @@ -163,6 +165,50 @@ func VerifyBlobs(commitments []*bls.G1Point, blobs [][]bls.Fr) error { return nil } +// ComputeProof returns KZG Proof of polynomial in evaluation form at point z +func ComputeProof(eval []bls.Fr, z *bls.Fr) (*bls.G1Point, error) { + if len(eval) != params.FieldElementsPerBlob { + return nil, errors.New("invalid eval polynomial for proof") + } + + // To avoid overflow/underflow, convert elements into int + var poly [params.FieldElementsPerBlob]big.Int + for i := range poly { + frToBig(&poly[i], &eval[i]) + } + var zB big.Int + frToBig(&zB, z) + + // Shift our polynomial first (in evaluation form we can't handle the division remainder) + var yB big.Int + var y bls.Fr + EvaluatePolyInEvaluationForm(&y, eval, z) + frToBig(&yB, &y) + var polyShifted [params.FieldElementsPerBlob]big.Int + + for i := range polyShifted { + polyShifted[i].Mod(new(big.Int).Sub(&poly[i], &yB), BLSModulus) + } + + var denomPoly [params.FieldElementsPerBlob]big.Int + for i := range denomPoly { + // Make sure we won't induce a division by zero later. Shouldn't happen if using Fiat-Shamir challenges + if Domain[i].Cmp(&zB) == 0 { + return nil, errors.New("inavlid z challenge") + } + denomPoly[i].Mod(new(big.Int).Sub(Domain[i], &zB), BLSModulus) + } + + // Calculate quotient polynomial by doing point-by-point division + var quotientPoly [params.FieldElementsPerBlob]bls.Fr + for i := range quotientPoly { + var tmp big.Int + blsDiv(&tmp, &polyShifted[i], &denomPoly[i]) + BigToFr("ientPoly[i], &tmp) + } + return bls.LinCombG1(kzgSetupLagrange, quotientPoly[:]), nil +} + type JSONTrustedSetup struct { SetupG1 []bls.G1Point SetupG2 []bls.G2Point @@ -182,4 +228,6 @@ func init() { kzgSetupG2 = parsedSetup.SetupG2 kzgSetupLagrange = parsedSetup.SetupLagrange KzgSetupG1 = parsedSetup.SetupG1 + + initDomain() } diff --git a/crypto/kzg/util.go b/crypto/kzg/util.go new file mode 100644 index 000000000000..7ed17bfd1da8 --- /dev/null +++ b/crypto/kzg/util.go @@ -0,0 +1,115 @@ +package kzg + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/params" + "github.com/protolambda/go-kzg/bls" +) + +var ( + BLSModulus *big.Int + Domain [params.FieldElementsPerBlob]*big.Int + DomainFr [params.FieldElementsPerBlob]bls.Fr +) + +func initDomain() { + BLSModulus = new(big.Int) + BLSModulus.SetString("0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001", 0) + + // ROOT_OF_UNITY = pow(PRIMITIVE_ROOT, (MODULUS - 1) // WIDTH, MODULUS) + primitiveRoot := big.NewInt(7) + width := big.NewInt(int64(params.FieldElementsPerBlob)) + exp := new(big.Int).Div(new(big.Int).Sub(BLSModulus, big.NewInt(1)), width) + rootOfUnity := new(big.Int).Exp(primitiveRoot, exp, BLSModulus) + for i := 0; i < params.FieldElementsPerBlob; i++ { + Domain[i] = new(big.Int).Exp(rootOfUnity, big.NewInt(int64(i)), BLSModulus) + BigToFr(&DomainFr[i], Domain[i]) + } +} + +func MatrixLinComb(vectors [][]bls.Fr, scalars []bls.Fr) []bls.Fr { + r := make([]bls.Fr, len(vectors[0])) + for i := 0; i < len(vectors); i++ { + var tmp bls.Fr + for j := 0; j < len(r); j++ { + bls.MulModFr(&tmp, &vectors[i][j], &scalars[i]) + bls.AddModFr(&r[j], &r[j], &tmp) + } + } + return r +} + +// EvaluatePolyInEvaluationForm evaluates the polynomial using the barycentric formula: +// f(x) = (1 - x**WIDTH) / WIDTH * sum_(i=0)^WIDTH (f(DOMAIN[i]) * DOMAIN[i]) / (x - DOMAIN[i]) +func EvaluatePolyInEvaluationForm(yFr *bls.Fr, poly []bls.Fr, x *bls.Fr) { + if len(poly) != params.FieldElementsPerBlob { + panic("invalid polynomial length") + } + + width := big.NewInt(int64(params.FieldElementsPerBlob)) + var inverseWidth big.Int + blsModInv(&inverseWidth, width) + + var y bls.Fr + for i := 0; i < params.FieldElementsPerBlob; i++ { + var num bls.Fr + bls.MulModFr(&num, &poly[i], &DomainFr[i]) + + var denom bls.Fr + bls.SubModFr(&denom, x, &DomainFr[i]) + + var div bls.Fr + bls.DivModFr(&div, &num, &denom) + + var tmp bls.Fr + bls.AddModFr(&tmp, &y, &div) + bls.CopyFr(&y, &tmp) + } + + xB := new(big.Int) + frToBig(xB, x) + powB := new(big.Int).Exp(xB, width, BLSModulus) + powB.Sub(powB, big.NewInt(1)) + + // TODO: add ExpModFr to go-kzg + var yB big.Int + frToBig(&yB, &y) + yB.Mul(&yB, new(big.Int).Mul(powB, &inverseWidth)) + yB.Mod(&yB, BLSModulus) + bls.SetFr(yFr, yB.String()) +} + +func frToBig(b *big.Int, val *bls.Fr) { + //b.SetBytes((*kilicbls.Fr)(val).RedToBytes()) + // silly double conversion + v := bls.FrTo32(val) + for i := 0; i < 16; i++ { + v[31-i], v[i] = v[i], v[31-i] + } + b.SetBytes(v[:]) +} + +func BigToFr(out *bls.Fr, in *big.Int) { + var b [32]byte + inb := in.Bytes() + copy(b[32-len(inb):], inb) + // again, we have to double convert as go-kzg only accepts little-endian + for i := 0; i < 16; i++ { + b[31-i], b[i] = b[i], b[31-i] + } + bls.FrFrom32(out, b) +} + +func blsModInv(out *big.Int, x *big.Int) { + if len(x.Bits()) != 0 { // if non-zero + out.ModInverse(x, BLSModulus) + } +} + +// faster than using big.Int ModDiv +func blsDiv(out *big.Int, a *big.Int, b *big.Int) { + var bInv big.Int + blsModInv(&bInv, b) + out.Mod(new(big.Int).Mul(a, &bInv), BLSModulus) +} diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index 9cf10d6bc77d..c04de73e3837 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -446,8 +446,9 @@ func testTransactionPropagation(t *testing.T, protocol uint) { }, } wrapData := &types.BlobTxWrapData{ - BlobKzgs: types.BlobKzgs{types.KZGCommitment{0: 0xc0}}, - Blobs: types.Blobs{types.Blob{}}, + BlobKzgs: types.BlobKzgs{types.KZGCommitment{0: 0xc0}}, + Blobs: types.Blobs{types.Blob{}}, + KzgAggregatedProof: types.KZGProof{0: 0xd0}, } blobTx, err := types.SignNewTx(testKey, types.NewDankSigner(common.Big1), txdata, types.WithTxWrapData(wrapData)) if err != nil { diff --git a/eth/protocols/eth/protocol_test.go b/eth/protocols/eth/protocol_test.go index 5ca895774121..025d80a6f9f0 100644 --- a/eth/protocols/eth/protocol_test.go +++ b/eth/protocols/eth/protocol_test.go @@ -108,7 +108,7 @@ func TestEth66EmptyMessages(t *testing.T) { ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{})}, // Transactions GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket([]common.Hash{})}, - PooledTransactionsPacket66{1111, PooledTransactionsPacket([]*types.Transaction{})}, + PooledTransactionsPacket66{1111, PooledTransactionsPacket([]*types.NetworkTransaction{})}, PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket([]rlp.RawValue{})}, } { if have, _ := rlp.EncodeToBytes(msg); !bytes.Equal(have, want) { @@ -127,6 +127,7 @@ func TestEth66Messages(t *testing.T) { blockBody *BlockBody blockBodyRlp rlp.RawValue txs []*types.Transaction + ntxs []*types.NetworkTransaction txRlps []rlp.RawValue hashes []common.Hash receipts []*types.Receipt @@ -154,6 +155,7 @@ func TestEth66Messages(t *testing.T) { t.Fatal(err) } txs = append(txs, tx) + ntxs = append(ntxs, types.NewNetworkTransaction(tx)) txRlps = append(txRlps, rlpdata) } } @@ -253,7 +255,7 @@ func TestEth66Messages(t *testing.T) { common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), }, { - PooledTransactionsPacket66{1111, PooledTransactionsPacket(txs)}, + PooledTransactionsPacket66{1111, PooledTransactionsPacket(ntxs)}, common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"), }, { diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index cf89bf57a913..1111906af123 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -273,12 +273,13 @@ func (args *TransactionArgs) toTransaction() *types.Transaction { msg.Value.SetFromBig((*big.Int)(args.Value)) msg.Data = args.data() msg.AccessList = types.AccessListView(al) - commitments, versionedHashes, ok := types.Blobs(args.Blobs).ComputeCommitments() + commitments, versionedHashes, aggregatedProof, err := types.Blobs(args.Blobs).ComputeCommitmentsAndAggregatedProof() // XXX if blobs are invalid we will omit the wrap-data (and an error will pop-up later) - if ok { + if err == nil { opts = append(opts, types.WithTxWrapData(&types.BlobTxWrapData{ - BlobKzgs: commitments, - Blobs: args.Blobs, + BlobKzgs: commitments, + Blobs: args.Blobs, + KzgAggregatedProof: aggregatedProof, })) msg.BlobVersionedHashes = versionedHashes } diff --git a/signer/core/apitypes/types.go b/signer/core/apitypes/types.go index a94c13c60ffb..68de9c463a7b 100644 --- a/signer/core/apitypes/types.go +++ b/signer/core/apitypes/types.go @@ -159,6 +159,10 @@ func (args *SendTxArgs) ToTransaction() *types.Transaction { wrapData.BlobKzgs = append(wrapData.BlobKzgs, commitment) wrapData.Blobs = append(wrapData.Blobs, bl) } + _, _, aggProof, err := types.Blobs(args.Blobs).ComputeCommitmentsAndAggregatedProof() + if err == nil { + wrapData.KzgAggregatedProof = aggProof + } data = &types.SignedBlobTx{Message: msg} return types.NewTx(data, types.WithTxWrapData(&wrapData)) case args.MaxFeePerGas != nil: diff --git a/tests/kzg_bench_test.go b/tests/kzg_bench_test.go index 6391c3e61d0b..c4b16eb82ab9 100644 --- a/tests/kzg_bench_test.go +++ b/tests/kzg_bench_test.go @@ -1,13 +1,18 @@ package tests import ( + "fmt" "math" "testing" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto/kzg" "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" gokzg "github.com/protolambda/go-kzg" "github.com/protolambda/go-kzg/bls" + "github.com/protolambda/ztyp/view" ) func randomBlob() []bls.Fr { @@ -26,7 +31,7 @@ func BenchmarkBlobToKzg(b *testing.B) { } } -func BenchmarkVerifyBlobs(b *testing.B) { +func BenchmarkVerifyBlobsWithoutKZGProof(b *testing.B) { var blobs [][]bls.Fr var commitments []*bls.G1Point for i := 0; i < 16; i++ { @@ -36,7 +41,52 @@ func BenchmarkVerifyBlobs(b *testing.B) { } b.ResetTimer() for i := 0; i < b.N; i++ { - kzg.VerifyBlobs(commitments, blobs) + kzg.VerifyBlobsLegacy(commitments, blobs) + } +} + +func BenchmarkVerifyBlobs(b *testing.B) { + blobs := make([]types.Blob, params.MaxBlobsPerTx) + var commitments []types.KZGCommitment + var hashes []common.Hash + for i := 0; i < len(blobs); i++ { + tmp := randomBlob() + for j := range tmp { + blobs[i][j] = bls.FrTo32(&tmp[j]) + } + c, ok := blobs[i].ComputeCommitment() + if !ok { + b.Fatal("Could not compute commitment") + } + commitments = append(commitments, c) + hashes = append(hashes, c.ComputeVersionedHash()) + } + txData := &types.SignedBlobTx{ + Message: types.BlobTxMessage{ + ChainID: view.Uint256View(*uint256.NewInt(1)), + Nonce: view.Uint64View(0), + Gas: view.Uint64View(123457), + GasTipCap: view.Uint256View(*uint256.NewInt(42)), + GasFeeCap: view.Uint256View(*uint256.NewInt(10)), + BlobVersionedHashes: hashes, + }, + } + _, _, aggregatedProof, err := types.Blobs(blobs).ComputeCommitmentsAndAggregatedProof() + if err != nil { + b.Fatal(err) + } + wrapData := &types.BlobTxWrapData{ + BlobKzgs: commitments, + Blobs: blobs, + KzgAggregatedProof: aggregatedProof, + } + tx := types.NewTx(txData, types.WithTxWrapData(wrapData)) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := tx.VerifyBlobs(); err != nil { + b.Fatal(err) + } } } @@ -78,3 +128,115 @@ func BenchmarkVerifyKzgProof(b *testing.B) { } } } + +func BenchmarkVerifyMultiple(b *testing.B) { + runBenchmark := func(siz int) { + b.Run(fmt.Sprintf("%d", siz), func(b *testing.B) { + var blobsSet [][]types.Blob + var commitmentsSet [][]types.KZGCommitment + var hashesSet [][]common.Hash + for i := 0; i < 10; i++ { + var blobs []types.Blob + var commitments []types.KZGCommitment + var hashes []common.Hash + for i := 0; i < params.MaxBlobsPerTx; i++ { + var blobElements types.Blob + blob := randomBlob() + for j := range blob { + blobElements[j] = bls.FrTo32(&blob[j]) + } + blobs = append(blobs, blobElements) + c, ok := blobElements.ComputeCommitment() + if !ok { + b.Fatal("Could not compute commitment") + } + commitments = append(commitments, c) + hashes = append(hashes, c.ComputeVersionedHash()) + } + blobsSet = append(blobsSet, blobs) + commitmentsSet = append(commitmentsSet, commitments) + hashesSet = append(hashesSet, hashes) + } + + var txs []*types.Transaction + for i := range blobsSet { + blobs := blobsSet[i] + commitments := commitmentsSet[i] + hashes := hashesSet[i] + + txData := &types.SignedBlobTx{ + Message: types.BlobTxMessage{ + ChainID: view.Uint256View(*uint256.NewInt(1)), + Nonce: view.Uint64View(0), + Gas: view.Uint64View(123457), + GasTipCap: view.Uint256View(*uint256.NewInt(42)), + GasFeeCap: view.Uint256View(*uint256.NewInt(10)), + BlobVersionedHashes: hashes, + }, + } + _, _, aggregatedProof, err := types.Blobs(blobs).ComputeCommitmentsAndAggregatedProof() + if err != nil { + b.Fatal(err) + } + wrapData := &types.BlobTxWrapData{ + BlobKzgs: commitments, + Blobs: blobs, + KzgAggregatedProof: aggregatedProof, + } + txs = append(txs, types.NewTx(txData, types.WithTxWrapData(wrapData))) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, tx := range txs { + if err := tx.VerifyBlobs(); err != nil { + b.Fatal(err) + } + } + } + }) + } + + //runBenchmark(2) + //runBenchmark(4) + runBenchmark(8) + //runBenchmark(16) +} + +func BenchmarkBatchVerifyWithoutKZGProofs(b *testing.B) { + runBenchmark := func(siz int) { + b.Run(fmt.Sprintf("%d", siz), func(b *testing.B) { + var blobsSet [][][]bls.Fr + var commitmentsSet [][]*bls.G1Point + for i := 0; i < siz; i++ { + var blobs [][]bls.Fr + var commitments []*bls.G1Point + for i := 0; i < params.MaxBlobsPerTx; i++ { + blob := randomBlob() + blobs = append(blobs, blob) + commitments = append(commitments, kzg.BlobToKzg(blob)) + } + blobsSet = append(blobsSet, blobs) + commitmentsSet = append(commitmentsSet, commitments) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + var batchVerify kzg.BlobsBatch + for i := range blobsSet { + if err := batchVerify.Join(commitmentsSet[i], blobsSet[i]); err != nil { + b.Fatalf("unable to join: %v", err) + } + } + if err := batchVerify.Verify(); err != nil { + b.Fatalf("batch verify failed: %v", err) + } + } + }) + } + + runBenchmark(2) + runBenchmark(4) + runBenchmark(8) + runBenchmark(16) +} diff --git a/tests/kzg_test.go b/tests/kzg_test.go index 510a3a7c10e3..18b138944288 100644 --- a/tests/kzg_test.go +++ b/tests/kzg_test.go @@ -208,31 +208,37 @@ func TestVerifyBlobs(t *testing.T) { Blobs: []types.Blob{blob1, blob2}, } - // Extract cryptographic material out of the blobs/commitments - commitments, err := blobData.BlobKzgs.Parse() - if err != nil { - t.Fatalf("failed to parse commitments: %v", err) + var hashes []common.Hash + for i := 0; i < len(blobData.BlobKzgs); i++ { + hashes = append(hashes, blobData.BlobKzgs[i].ComputeVersionedHash()) + } + txData := &types.SignedBlobTx{ + Message: types.BlobTxMessage{ + BlobVersionedHashes: hashes, + }, } - blobs, err := blobData.Blobs.Parse() + _, _, aggregatedProof, err := blobData.Blobs.ComputeCommitmentsAndAggregatedProof() if err != nil { - t.Fatalf("failed to parse blobs: %v", err) + t.Fatalf("bad CommitmentsAndAggregatedProof: %v", err) + } + wrapData := &types.BlobTxWrapData{ + BlobKzgs: blobData.BlobKzgs, + Blobs: blobData.Blobs, + KzgAggregatedProof: aggregatedProof, } + tx := types.NewTx(txData, types.WithTxWrapData(wrapData)) // Verify the blobs against the commitments!! - err = kzg.VerifyBlobs(commitments, blobs) + err = tx.VerifyBlobs() if err != nil { t.Fatalf("bad verifyBlobs: %v", err) } // Now let's do a bad case: // mutate a single chunk of a single blob and VerifyBlobs() must fail - blobData.Blobs[0][42][1] = 0x42 - blobs, err = blobData.Blobs.Parse() - if err != nil { - t.Fatalf("internal blobs: %v", err) - } - - err = kzg.VerifyBlobs(commitments, blobs) + wrapData.Blobs[0][42][1] = 0x42 + tx = types.NewTx(txData, types.WithTxWrapData(wrapData)) + err = tx.VerifyBlobs() if err == nil { t.Fatal("bad VerifyBlobs actually succeeded, expected error") }