From 68ca293cf4f571bb14925256908d0216b1eb6cbc Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Sat, 20 Jul 2024 22:15:39 +0200 Subject: [PATCH 01/52] feat: upgrade gnark-crypto/gnark --- galoisd/go.mod | 6 +- galoisd/pkg/lightclient/nonadjacent/ptau.go | 786 ++++++++++++++++++++ 2 files changed, 789 insertions(+), 3 deletions(-) create mode 100644 galoisd/pkg/lightclient/nonadjacent/ptau.go diff --git a/galoisd/go.mod b/galoisd/go.mod index d7a8d07bb1..8029b4b9e9 100644 --- a/galoisd/go.mod +++ b/galoisd/go.mod @@ -7,7 +7,7 @@ require ( github.com/cometbft/cometbft v1.0.0-rc1.0.20240908111210-ab0be101882f github.com/cometbft/cometbft/api v1.0.0-rc.1 github.com/consensys/gnark v0.7.2-0.20230418172633-f83323bdf138 - github.com/consensys/gnark-crypto v0.12.2-0.20240215234832-d72fcb379d3e + github.com/consensys/gnark-crypto v0.12.2-0.20240703135258-5d8b5fab1afb github.com/cosmos/cosmos-sdk v0.52.0 github.com/rs/zerolog v1.33.0 github.com/spf13/cobra v1.8.1 @@ -151,9 +151,9 @@ replace ( replace ( github.com/cometbft/cometbft => github.com/unionlabs/cometbls v0.0.0-20241021101406-df0586cc2041 github.com/cometbft/cometbft/api => github.com/unionlabs/cometbls/api v0.0.0-20241021101406-df0586cc2041 - github.com/consensys/gnark => github.com/consensys/gnark v0.9.2-0.20240312175655-ce0186ef32c1 + github.com/consensys/gnark => github.com/unionlabs/gnark v0.0.0-20240723153903-9d859afe4c14 // Fork of gnark crypto until https://github.com/ConsenSys/gnark-crypto/pull/314 is merged - github.com/consensys/gnark-crypto => github.com/unionlabs/gnark-crypto v0.0.0-20240112093739-635c1b6963c6 + github.com/consensys/gnark-crypto => github.com/unionlabs/gnark-crypto v0.0.0-20240720201413-c0383b2a80e9 github.com/cosmos/cosmos-sdk => github.com/unionlabs/cosmos-sdk v0.0.0-20241018173625-c2982236c557 github.com/tunabay/go-bitarray => github.com/poisonphang/go-bitarray v0.0.0-20240912214703-d6127bb4d1bd ) diff --git a/galoisd/pkg/lightclient/nonadjacent/ptau.go b/galoisd/pkg/lightclient/nonadjacent/ptau.go new file mode 100644 index 0000000000..2bcdfe00c7 --- /dev/null +++ b/galoisd/pkg/lightclient/nonadjacent/ptau.go @@ -0,0 +1,786 @@ +package nonadjacent + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "math/big" + "os" + + "github.com/consensys/gnark-crypto/ecc/bn254" + "github.com/consensys/gnark-crypto/ecc/bn254/fp" +) + +/////////////////////////////////////////////////////////////////// +/// PTAU /// +/////////////////////////////////////////////////////////////////// + +// Format +// Taken from the iden3/snarkjs repo powersoftau_new.js file +// https://github.com/iden3/snarkjs/blob/master/src/powersoftau_new.js +/* +Header(1) + n8 + prime + power +tauG1(2) + {(2 ** power)*2-1} [ + G1, tau*G1, tau^2 * G1, .... + ] +tauG2(3) + {2 ** power}[ + G2, tau*G2, tau^2 * G2, ... + ] +alphaTauG1(4) + {2 ** power}[ + alpha*G1, alpha*tau*G1, alpha*tau^2*G1,.... + ] +betaTauG1(5) + {2 ** power} [] + beta*G1, beta*tau*G1, beta*tau^2*G1, .... + ] +betaG2(6) + {1}[ + beta*G2 + ] +contributions(7) - Ignore contributions, users can verify using snarkjs + NContributions + {NContributions}[ + tau*G1 + tau*G2 + alpha*G1 + beta*G1 + beta*G2 + pubKey + tau_g1s + tau_g1sx + tau_g2spx + alpha_g1s + alpha_g1sx + alpha_g1spx + beta_g1s + beta_g1sx + beta_g1spx + partialHash (216 bytes) See https://github.com/mafintosh/blake2b-wasm/blob/23bee06945806309977af802bc374727542617c7/blake2b.wat#L9 + hashNewChallenge + ] +*/ + +// in bytes +const BN254_FIELD_ELEMENT_SIZE = 32 + +type G1 [2]big.Int +type G2 [4]big.Int + +type PtauHeader struct { + N8 uint32 + Prime big.Int + Power uint32 +} + +type Ptau struct { + Header PtauHeader + PTauPubKey PtauPubKey +} + +type PtauPubKey struct { + TauG1 []G1 + TauG2 []G2 + AlphaTauG1 []G1 + BetaTauG1 []G1 + BetaG2 G2 +} + +type PtauFile struct { + Header PtauHeader + Sections [][]SectionSegment + Reader *os.File +} + +func InitPtau(path string) (*PtauFile, error) { + reader, err := os.Open(path) + + if err != nil { + return nil, err + } + + var ptauStr = make([]byte, 4) + _, err = reader.Read(ptauStr) + + fmt.Printf("zkeyStr: %s \n", string(ptauStr)) + + // version + _, err = readULE32(reader) + + // number of sections + _, err = readULE32(reader) + + numSections := uint32(7) + fmt.Printf("num sections: %v \n", numSections) + + // in practice, all sections have only one segment, but who knows... + // 1-based indexing, so we need to allocate one more than the number of sections + sections := make([][]SectionSegment, numSections+1) + for i := uint32(0); i < numSections; i++ { + ht, _ := readULE32(reader) + hl, _ := readULE64(reader) + fmt.Printf("ht: %v \n", ht) + fmt.Printf("hl: %v \n", hl) + if sections[ht] == nil { + sections[ht] = make([]SectionSegment, 0) + } + pos, _ := reader.Seek(0, io.SeekCurrent) + sections[ht] = append(sections[ht], SectionSegment{pos: uint64(pos), size: hl}) + reader.Seek(int64(hl), io.SeekCurrent) + } + + fmt.Printf("sections: %v \n", sections) + + // section size + _, err = readBigInt(reader, 8) + + // Header (1) + seekToUniqueSection(reader, sections, 1) + + // Read header + header, err := readPtauHeader(reader) + + if err != nil { + return nil, err + } + + return &PtauFile{Header: header, Sections: sections, Reader: reader}, nil +} + +func (ptauFile *PtauFile) Close() error { + return ptauFile.Reader.Close() +} + +func (ptauFile *PtauFile) DomainSize() int { + return 1 << ptauFile.Header.Power +} + +func (ptauFile *PtauFile) readG1s(out chan bn254.G1Affine, count int) error { + for i := 0; i < count; i++ { + g1, err := readG1(ptauFile.Reader) + if err != nil { + return err + } + g1Affine := bn254.G1Affine{} + x := bytesToElement(g1[0].Bytes()) + g1Affine.X = x + y := bytesToElement(g1[1].Bytes()) + g1Affine.Y = y + if !g1Affine.IsOnCurve() { + fmt.Printf("readG1s: \n index: %v g1Affine.X: %v \n g1Affine.Y: %v \n", i, g1Affine.X.String(), g1Affine.Y.String()) + panic("g1Affine is not on curve") + } + out <- g1Affine + } + return nil +} + +func (ptauFile *PtauFile) readG2() (bn254.G2Affine, error) { + g2, err := readG2(ptauFile.Reader) + if err != nil { + return bn254.G2Affine{}, err + } + g2Affine := bn254.G2Affine{} + x0 := bytesToElement(g2[0].Bytes()) + x1 := bytesToElement(g2[1].Bytes()) + g2Affine.X.A0 = x0 + g2Affine.X.A1 = x1 + y0 := bytesToElement(g2[2].Bytes()) + y1 := bytesToElement(g2[3].Bytes()) + g2Affine.Y.A0 = y0 + g2Affine.Y.A1 = y1 + if !g2Affine.IsOnCurve() { + + fmt.Printf("readG2s: \n, g2Affine.X.A0: %v \n g2Affine.X.A1: %v \n g2Affine.Y.A0: %v \n g2Affine.Y.A1 %v \n", g2Affine.X.A0.String(), g2Affine.X.A1.String(), g2Affine.Y.A0.String(), g2Affine.Y.A1.String()) + panic("g2Affine is not on curve") + } + return g2Affine, nil +} + +func (ptauFile *PtauFile) readG2s(out chan bn254.G2Affine, count int) error { + for i := 0; i < count; i++ { + g2Affine, err := ptauFile.readG2() + if err != nil { + return err + } + out <- g2Affine + } + return nil +} + +func (ptauFile *PtauFile) ReadTauG1(out chan bn254.G1Affine) error { + defer close(out) + seekToUniqueSection(ptauFile.Reader, ptauFile.Sections, 2) + numPoints := ptauFile.DomainSize()*2 - 1 + fmt.Printf("tauG1 numPoints: %v \n", numPoints) + ptauFile.readG1s(out, numPoints) + return nil +} + +func (ptauFile *PtauFile) ReadTauG2(out chan bn254.G2Affine) error { + defer close(out) + seekToUniqueSection(ptauFile.Reader, ptauFile.Sections, 3) + numPoints := ptauFile.DomainSize() + fmt.Printf("tauG2 numPoints: %v \n", numPoints) + ptauFile.readG2s(out, numPoints) + return nil +} + +func (ptauFile *PtauFile) ReadAlphaTauG1(out chan bn254.G1Affine) error { + defer close(out) + seekToUniqueSection(ptauFile.Reader, ptauFile.Sections, 4) + numPoints := ptauFile.DomainSize() + fmt.Printf("alphaTauG1 numPoints: %v \n", numPoints) + ptauFile.readG1s(out, numPoints) + return nil +} + +func (ptauFile *PtauFile) ReadBetaTauG1(out chan bn254.G1Affine) error { + defer close(out) + seekToUniqueSection(ptauFile.Reader, ptauFile.Sections, 5) + numPoints := ptauFile.DomainSize() + fmt.Printf("betaTauG1 numPoints: %v \n", numPoints) + ptauFile.readG1s(out, numPoints) + return nil +} + +func (ptauFile *PtauFile) ReadBetaG2() (bn254.G2Affine, error) { + fmt.Printf("betaG2: \n") + seekToUniqueSection(ptauFile.Reader, ptauFile.Sections, 6) + return ptauFile.readG2() +} + +func ReadPtau(zkeyPath string) (Ptau, error) { + reader, err := os.Open(zkeyPath) + + if err != nil { + return Ptau{}, err + } + + defer reader.Close() + + var ptauStr = make([]byte, 4) + _, err = reader.Read(ptauStr) + + fmt.Printf("zkeyStr: %s \n", string(ptauStr)) + + // version + _, err = readULE32(reader) + + // number of sections + _, err = readULE32(reader) + + numSections := uint32(7) + fmt.Printf("num sections: %v \n", numSections) + + // in practice, all sections have only one segment, but who knows... + // 1-based indexing, so we need to allocate one more than the number of sections + sections := make([][]SectionSegment, numSections+1) + for i := uint32(0); i < numSections; i++ { + ht, _ := readULE32(reader) + hl, _ := readULE64(reader) + fmt.Printf("ht: %v \n", ht) + fmt.Printf("hl: %v \n", hl) + if sections[ht] == nil { + sections[ht] = make([]SectionSegment, 0) + } + pos, _ := reader.Seek(0, io.SeekCurrent) + sections[ht] = append(sections[ht], SectionSegment{pos: uint64(pos), size: hl}) + reader.Seek(int64(hl), io.SeekCurrent) + } + + fmt.Printf("sections: %v \n", sections) + + // section size + _, err = readBigInt(reader, 8) + + // Header (1) + seekToUniqueSection(reader, sections, 1) + + // Read header + header, err := readPtauHeader(reader) + + if err != nil { + return Ptau{}, err + } + + // TauG1 (2) + seekToUniqueSection(reader, sections, 2) + + var PtauPubKey PtauPubKey + + twoToPower := uint32(1 << header.Power) + + fmt.Printf("tauG1: \n") + + PtauPubKey.TauG1, err = readG1Array(reader, twoToPower*2-1) + + if err != nil { + return Ptau{}, err + } + + // TauG2 (3) + seekToUniqueSection(reader, sections, 3) + + fmt.Printf("tauG2: \n") + + PtauPubKey.TauG2, err = readG2Array(reader, twoToPower) + + if err != nil { + return Ptau{}, err + } + + // AlphaTauG1 (4) + seekToUniqueSection(reader, sections, 4) + + fmt.Printf("alphaTauG1: \n") + + PtauPubKey.AlphaTauG1, err = readG1Array(reader, twoToPower) + + if err != nil { + return Ptau{}, err + } + + // BetaTauG1 (5) + seekToUniqueSection(reader, sections, 5) + + fmt.Printf("betaTauG1: \n") + + PtauPubKey.BetaTauG1, err = readG1Array(reader, twoToPower) + + if err != nil { + return Ptau{}, err + } + + // BetaG2 (6) + seekToUniqueSection(reader, sections, 6) + + fmt.Printf("betaG2: \n") + + PtauPubKey.BetaG2, err = readG2(reader) + + if err != nil { + return Ptau{}, err + } + + return Ptau{Header: header, PTauPubKey: PtauPubKey}, nil +} + +func readPtauHeader(reader io.ReadSeeker) (PtauHeader, error) { + var header PtauHeader + + n8, err := readULE32(reader) + + if err != nil { + return PtauHeader{}, err + } + + header.N8 = n8 + + prime, err := readBigInt(reader, n8) + + if err != nil { + return PtauHeader{}, err + } + + header.Prime = prime + + power, err := readULE32(reader) + + if err != nil { + return PtauHeader{}, err + } + + header.Power = power + + return header, nil +} + +func readG1Array(reader io.ReadSeeker, numPoints uint32) ([]G1, error) { + g1s := make([]G1, numPoints) + for i := uint32(0); i < numPoints; i++ { + g1, err := readG1(reader) + + if err != nil { + return []G1{}, err + } + + g1s[i] = g1 + } + return g1s, nil +} + +func readG2Array(reader io.ReadSeeker, numPoints uint32) ([]G2, error) { + g2s := make([]G2, numPoints) + + for i := uint32(0); i < numPoints; i++ { + g2, err := readG2(reader) + + if err != nil { + return []G2{}, err + } + + g2s[i] = g2 + } + + return g2s, nil +} + +func readTauG2(reader io.ReadSeeker) ([]G2, error) { + tauG2_s, err := readG2(reader) + + if err != nil { + return []G2{}, err + } + + tauG2_sx, err := readG2(reader) + + if err != nil { + return []G2{}, err + } + + return []G2{tauG2_s, tauG2_sx}, nil +} + +func readG1(reader io.ReadSeeker) (G1, error) { + var g1 G1 + + x, err := readBigInt(reader, BN254_FIELD_ELEMENT_SIZE) + + if err != nil { + return G1{}, err + } + + g1[0] = x + + y, err := readBigInt(reader, BN254_FIELD_ELEMENT_SIZE) + + if err != nil { + return G1{}, err + } + + g1[1] = y + + return g1, nil +} + +func readG2(reader io.ReadSeeker) (G2, error) { + var g2 G2 + + x0, err := readBigInt(reader, BN254_FIELD_ELEMENT_SIZE) + + if err != nil { + return G2{}, err + } + + g2[0] = x0 + + x1, err := readBigInt(reader, BN254_FIELD_ELEMENT_SIZE) + + if err != nil { + return G2{}, err + } + + g2[1] = x1 + + y0, err := readBigInt(reader, BN254_FIELD_ELEMENT_SIZE) + + if err != nil { + return G2{}, err + } + + g2[2] = y0 + + y1, err := readBigInt(reader, BN254_FIELD_ELEMENT_SIZE) + + if err != nil { + return G2{}, err + } + + g2[3] = y1 + + return g2, nil +} + +func readULE32(reader io.Reader) (uint32, error) { + var buffer = make([]byte, 4) + + _, err := reader.Read(buffer) + + if err != nil { + return 0, err + } + + return binary.LittleEndian.Uint32(buffer), nil +} + +func readULE64(reader io.Reader) (uint64, error) { + var buffer = make([]byte, 8) + + _, err := reader.Read(buffer) + + if err != nil { + return 0, err + } + + return binary.LittleEndian.Uint64(buffer), nil +} + +func readBigInt(reader io.Reader, n8 uint32) (big.Int, error) { + var buffer = make([]byte, n8) + + _, err := reader.Read(buffer) + reverseSlice(buffer) + + if err != nil { + return *big.NewInt(0), err + } + + bigInt := big.NewInt(0).SetBytes(buffer) + + return *bigInt, nil +} + +func reverseSlice(slice []byte) []byte { + for i := 0; i < len(slice)/2; i++ { + j := len(slice) - i - 1 + slice[i], slice[j] = slice[j], slice[i] + } + return slice +} + +func bytesToElement(b []byte) fp.Element { + var z fp.Element + reverseSlice(b) + if len(b) < 32 { + b = append(b, make([]byte, 32-len(b))...) + } + + z[0] = binary.LittleEndian.Uint64(b[0:8]) + z[1] = binary.LittleEndian.Uint64(b[8:16]) + z[2] = binary.LittleEndian.Uint64(b[16:24]) + z[3] = binary.LittleEndian.Uint64(b[24:32]) + + return z +} + +/////////////////////////////////////////////////////////////////// +/// ZKEY /// +/////////////////////////////////////////////////////////////////// + +// Taken from the iden3/snarkjs repo, zkey_utils.js +// (https://github.com/iden3/snarkjs/blob/fb144555d8ce4779ad79e707f269771c672a8fb7/src/zkey_utils.js#L20-L45) +// Format +// ====== +// 4 bytes, zket +// 4 bytes, version +// 4 bytes, number of sections +// 4 bytes, section number +// 8 bytes, section size +// Header(1) +// 4 bytes, Prover Type 1 Groth +// HeaderGroth(2) +// 4 bytes, n8q +// n8q bytes, q +// 4 bytes, n8r +// n8r bytes, r +// 4 bytes, NVars +// 4 bytes, NPub +// 4 bytes, DomainSize (multiple of 2) +// alpha1 +// beta1 +// delta1 +// beta2 +// gamma2 +// delta2 + +const GROTH_16_PROTOCOL_ID = uint32(1) + +type NotGroth16 struct { + Err error +} + +func (r *NotGroth16) Error() string { + return fmt.Sprintf("Groth16 is the only supported protocol at this time (PLONK and FFLONK are not): %v", r.Err) +} + +// Incomplete (only extracts necessary fields for conversion to .ph1 format) +type Zkey struct { + ZkeyHeader ZkeyHeader + protocolHeader HeaderGroth +} + +type ZkeyHeader struct { + ProtocolID uint32 + protocolHeader HeaderGroth +} + +type HeaderGroth struct { + n8q uint32 + q big.Int + n8r uint32 + r big.Int + nVars uint32 + nPublic uint32 + domainSize uint32 + power uint32 +} + +type SectionSegment struct { + pos uint64 + size uint64 +} + +func ReadZkey(zkeyPath string) (Zkey, error) { + reader, err := os.Open(zkeyPath) + + if err != nil { + return Zkey{}, err + } + + defer reader.Close() + + // zkey + var zkeyStr = make([]byte, 4) + _, err = reader.Read(zkeyStr) + fmt.Printf("zkeyStr: %s \n", string(zkeyStr)) + + // version + _, err = readULE32(reader) + + // number of sections + numSections, err := readULE32(reader) + fmt.Printf("num sections: %v \n", numSections) + + // in practice, all sections have only one segment, but who knows... + // 1-based indexing, so we need to allocate one more than the number of sections + sections := make([][]SectionSegment, numSections+1) + for i := uint32(0); i < numSections; i++ { + ht, _ := readULE32(reader) + hl, _ := readULE64(reader) + fmt.Printf("ht: %v \n", ht) + fmt.Printf("hl: %v \n", hl) + if sections[ht] == nil { + sections[ht] = make([]SectionSegment, 0) + } + pos, _ := reader.Seek(0, io.SeekCurrent) + sections[ht] = append(sections[ht], SectionSegment{pos: uint64(pos), size: hl}) + reader.Seek(int64(hl), io.SeekCurrent) + } + + fmt.Printf("sections: %v \n", sections) + + // section size + _, err = readBigInt(reader, 8) + + seekToUniqueSection(reader, sections, 1) + header, err := readHeader(reader, sections) + + if err != nil { + return Zkey{}, err + } + + zkey := Zkey{ZkeyHeader: header, protocolHeader: header.protocolHeader} + + return zkey, nil +} + +func seekToUniqueSection(reader io.ReadSeeker, sections [][]SectionSegment, sectionId uint32) { + section := sections[sectionId] + + if len(section) > 1 { + panic("Section has more than one segment") + } + + reader.Seek(int64(section[0].pos), io.SeekStart) +} + +func readHeader(reader io.ReadSeeker, sections [][]SectionSegment) (ZkeyHeader, error) { + var header = ZkeyHeader{} + + protocolID, err := readULE32(reader) + + if err != nil { + return header, err + } + + // if groth16 + if protocolID == GROTH_16_PROTOCOL_ID { + seekToUniqueSection(reader, sections, 2) + headerGroth, err := readHeaderGroth16(reader) + + if err != nil { + return header, err + } + + header = ZkeyHeader{ProtocolID: protocolID, protocolHeader: headerGroth} + + } else { + return header, &NotGroth16{Err: errors.New("ProtocolID is not Groth16")} + } + + return header, nil +} + +func readHeaderGroth16(reader io.ReadSeeker) (HeaderGroth, error) { + var header = HeaderGroth{} + + n8q, err := readULE32(reader) + + fmt.Printf("n8q is: %v \n", n8q) + + if err != nil { + return header, err + } + + q, err := readBigInt(reader, n8q) + + if err != nil { + return header, err + } + + n8r, err := readULE32(reader) + + if err != nil { + return header, err + } + + r, err := readBigInt(reader, n8r) + + if err != nil { + return header, err + } + + nVars, err := readULE32(reader) + + if err != nil { + return header, err + } + + nPublic, err := readULE32(reader) + + if err != nil { + return header, err + } + + domainSize, err := readULE32(reader) + + if err != nil { + return header, err + } + + power := math.Log2(float64(domainSize)) + + power_int := uint32(math.Ceil(power)) + + header = HeaderGroth{n8q: n8q, q: q, n8r: n8r, r: r, nVars: nVars, nPublic: nPublic, domainSize: domainSize, power: power_int} + + return header, nil +} From 450cef091b4d6dbd26be2b47ffbdd73c64e4f9ad Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Fri, 26 Jul 2024 17:04:16 +0200 Subject: [PATCH 02/52] feat(mpc): initial shot --- Cargo.toml | 5 + flake.nix | 1 + .../galoisd/cmd/phase1_init.go} | 158 +++++-- galoisd/cmd/galoisd/cmd/phase2_contribute.go | 26 ++ galoisd/cmd/galoisd/cmd/phase2_extract.go | 50 +++ galoisd/cmd/galoisd/cmd/phase2_init.go | 38 ++ galoisd/cmd/galoisd/cmd/phase2_verify.go | 30 ++ galoisd/cmd/galoisd/cmd/utils.go | 31 ++ galoisd/cmd/galoisd/lib.go | 83 ++++ galoisd/cmd/galoisd/main.go | 10 + galoisd/galoisd.nix | 143 ++++--- .../lightclient/nonadjacent/circuit_test.go | 369 +++++++++++++++- mpc/client/Cargo.toml | 17 + mpc/client/src/main.rs | 263 ++++++++++++ mpc/coordinator/Cargo.toml | 16 + mpc/coordinator/src/fsm.rs | 22 + mpc/coordinator/src/main.rs | 404 ++++++++++++++++++ mpc/mpc.nix | 31 ++ mpc/shared/Cargo.toml | 8 + mpc/shared/src/lib.rs | 88 ++++ mpc/shared/src/types.rs | 8 + tools/rust/rust.nix | 7 +- 22 files changed, 1706 insertions(+), 102 deletions(-) rename galoisd/{pkg/lightclient/nonadjacent/ptau.go => cmd/galoisd/cmd/phase1_init.go} (80%) create mode 100644 galoisd/cmd/galoisd/cmd/phase2_contribute.go create mode 100644 galoisd/cmd/galoisd/cmd/phase2_extract.go create mode 100644 galoisd/cmd/galoisd/cmd/phase2_init.go create mode 100644 galoisd/cmd/galoisd/cmd/phase2_verify.go create mode 100644 galoisd/cmd/galoisd/cmd/utils.go create mode 100644 galoisd/cmd/galoisd/lib.go create mode 100644 mpc/client/Cargo.toml create mode 100644 mpc/client/src/main.rs create mode 100644 mpc/coordinator/Cargo.toml create mode 100644 mpc/coordinator/src/fsm.rs create mode 100644 mpc/coordinator/src/main.rs create mode 100644 mpc/mpc.nix create mode 100644 mpc/shared/Cargo.toml create mode 100644 mpc/shared/src/lib.rs create mode 100644 mpc/shared/src/types.rs diff --git a/Cargo.toml b/Cargo.toml index 33b7f21d58..4b522d323d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -153,6 +153,10 @@ members = [ "lib/cometbft-types", "lib/galois-rpc", "lib/beacon-api-types", + + "mpc/shared", + "mpc/client", + "mpc/coordinator", ] [workspace.package] @@ -222,6 +226,7 @@ ibc-vm-rs = { path = "lib/ibc-vm-rs", default-features = fa ics008-wasm-client = { path = "cosmwasm/ics08-light-clients/interface", default-features = false } ics23 = { path = "lib/ics23", default-features = false } macros = { path = "lib/macros", default-features = false } +mpc-shared = { path = "mpc/shared", default-features = false } move-bindgen = { path = "tools/move-bindgen", default-features = false } move-bindgen-derive = { path = "lib/move-bindgen-derive", default-features = false } pg-queue = { path = "lib/pg-queue", default-features = false } diff --git a/flake.nix b/flake.nix index 3b4daa4be2..621cefbfe8 100644 --- a/flake.nix +++ b/flake.nix @@ -211,6 +211,7 @@ ./galoisd/galoisd.nix ./unionvisor/unionvisor.nix ./voyager/voyager.nix + ./mpc/mpc.nix ./lib/ics23/ics23.nix ./lib/ssz/ssz.nix ./lib/unionlabs/unionlabs.nix diff --git a/galoisd/pkg/lightclient/nonadjacent/ptau.go b/galoisd/cmd/galoisd/cmd/phase1_init.go similarity index 80% rename from galoisd/pkg/lightclient/nonadjacent/ptau.go rename to galoisd/cmd/galoisd/cmd/phase1_init.go index 2bcdfe00c7..ed31856d4b 100644 --- a/galoisd/pkg/lightclient/nonadjacent/ptau.go +++ b/galoisd/cmd/galoisd/cmd/phase1_init.go @@ -1,6 +1,8 @@ -package nonadjacent +package cmd import ( + "github.com/spf13/cobra" + "encoding/binary" "errors" "fmt" @@ -11,12 +13,34 @@ import ( "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fp" + mpc "github.com/consensys/gnark/backend/groth16/bn254/mpcsetup" ) +func Phase1InitCmd() *cobra.Command { + var cmd = &cobra.Command{ + Short: "Initialize the phase 1 of the groth16 multi-party computation.", + Use: "mpc-phase1-init [ptau] [phase1FinalOutput]", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + ptauPath := args[0] + ptau, err := ReadPtau(ptauPath) + if err != nil { + return err + } + srs1, err := convertPtauToPhase1(ptau) + if err != nil { + return err + } + phase1FinalPath := args[1] + return saveTo(phase1FinalPath, &srs1) + }, + } + return cmd +} + /////////////////////////////////////////////////////////////////// /// PTAU /// /////////////////////////////////////////////////////////////////// - // Format // Taken from the iden3/snarkjs repo powersoftau_new.js file // https://github.com/iden3/snarkjs/blob/master/src/powersoftau_new.js @@ -109,8 +133,6 @@ func InitPtau(path string) (*PtauFile, error) { var ptauStr = make([]byte, 4) _, err = reader.Read(ptauStr) - fmt.Printf("zkeyStr: %s \n", string(ptauStr)) - // version _, err = readULE32(reader) @@ -118,7 +140,6 @@ func InitPtau(path string) (*PtauFile, error) { _, err = readULE32(reader) numSections := uint32(7) - fmt.Printf("num sections: %v \n", numSections) // in practice, all sections have only one segment, but who knows... // 1-based indexing, so we need to allocate one more than the number of sections @@ -126,8 +147,6 @@ func InitPtau(path string) (*PtauFile, error) { for i := uint32(0); i < numSections; i++ { ht, _ := readULE32(reader) hl, _ := readULE64(reader) - fmt.Printf("ht: %v \n", ht) - fmt.Printf("hl: %v \n", hl) if sections[ht] == nil { sections[ht] = make([]SectionSegment, 0) } @@ -136,8 +155,6 @@ func InitPtau(path string) (*PtauFile, error) { reader.Seek(int64(hl), io.SeekCurrent) } - fmt.Printf("sections: %v \n", sections) - // section size _, err = readBigInt(reader, 8) @@ -174,7 +191,6 @@ func (ptauFile *PtauFile) readG1s(out chan bn254.G1Affine, count int) error { y := bytesToElement(g1[1].Bytes()) g1Affine.Y = y if !g1Affine.IsOnCurve() { - fmt.Printf("readG1s: \n index: %v g1Affine.X: %v \n g1Affine.Y: %v \n", i, g1Affine.X.String(), g1Affine.Y.String()) panic("g1Affine is not on curve") } out <- g1Affine @@ -198,7 +214,6 @@ func (ptauFile *PtauFile) readG2() (bn254.G2Affine, error) { g2Affine.Y.A1 = y1 if !g2Affine.IsOnCurve() { - fmt.Printf("readG2s: \n, g2Affine.X.A0: %v \n g2Affine.X.A1: %v \n g2Affine.Y.A0: %v \n g2Affine.Y.A1 %v \n", g2Affine.X.A0.String(), g2Affine.X.A1.String(), g2Affine.Y.A0.String(), g2Affine.Y.A1.String()) panic("g2Affine is not on curve") } return g2Affine, nil @@ -219,7 +234,6 @@ func (ptauFile *PtauFile) ReadTauG1(out chan bn254.G1Affine) error { defer close(out) seekToUniqueSection(ptauFile.Reader, ptauFile.Sections, 2) numPoints := ptauFile.DomainSize()*2 - 1 - fmt.Printf("tauG1 numPoints: %v \n", numPoints) ptauFile.readG1s(out, numPoints) return nil } @@ -228,7 +242,6 @@ func (ptauFile *PtauFile) ReadTauG2(out chan bn254.G2Affine) error { defer close(out) seekToUniqueSection(ptauFile.Reader, ptauFile.Sections, 3) numPoints := ptauFile.DomainSize() - fmt.Printf("tauG2 numPoints: %v \n", numPoints) ptauFile.readG2s(out, numPoints) return nil } @@ -237,7 +250,6 @@ func (ptauFile *PtauFile) ReadAlphaTauG1(out chan bn254.G1Affine) error { defer close(out) seekToUniqueSection(ptauFile.Reader, ptauFile.Sections, 4) numPoints := ptauFile.DomainSize() - fmt.Printf("alphaTauG1 numPoints: %v \n", numPoints) ptauFile.readG1s(out, numPoints) return nil } @@ -246,13 +258,11 @@ func (ptauFile *PtauFile) ReadBetaTauG1(out chan bn254.G1Affine) error { defer close(out) seekToUniqueSection(ptauFile.Reader, ptauFile.Sections, 5) numPoints := ptauFile.DomainSize() - fmt.Printf("betaTauG1 numPoints: %v \n", numPoints) ptauFile.readG1s(out, numPoints) return nil } func (ptauFile *PtauFile) ReadBetaG2() (bn254.G2Affine, error) { - fmt.Printf("betaG2: \n") seekToUniqueSection(ptauFile.Reader, ptauFile.Sections, 6) return ptauFile.readG2() } @@ -269,8 +279,6 @@ func ReadPtau(zkeyPath string) (Ptau, error) { var ptauStr = make([]byte, 4) _, err = reader.Read(ptauStr) - fmt.Printf("zkeyStr: %s \n", string(ptauStr)) - // version _, err = readULE32(reader) @@ -278,7 +286,6 @@ func ReadPtau(zkeyPath string) (Ptau, error) { _, err = readULE32(reader) numSections := uint32(7) - fmt.Printf("num sections: %v \n", numSections) // in practice, all sections have only one segment, but who knows... // 1-based indexing, so we need to allocate one more than the number of sections @@ -286,8 +293,6 @@ func ReadPtau(zkeyPath string) (Ptau, error) { for i := uint32(0); i < numSections; i++ { ht, _ := readULE32(reader) hl, _ := readULE64(reader) - fmt.Printf("ht: %v \n", ht) - fmt.Printf("hl: %v \n", hl) if sections[ht] == nil { sections[ht] = make([]SectionSegment, 0) } @@ -296,8 +301,6 @@ func ReadPtau(zkeyPath string) (Ptau, error) { reader.Seek(int64(hl), io.SeekCurrent) } - fmt.Printf("sections: %v \n", sections) - // section size _, err = readBigInt(reader, 8) @@ -318,8 +321,6 @@ func ReadPtau(zkeyPath string) (Ptau, error) { twoToPower := uint32(1 << header.Power) - fmt.Printf("tauG1: \n") - PtauPubKey.TauG1, err = readG1Array(reader, twoToPower*2-1) if err != nil { @@ -329,8 +330,6 @@ func ReadPtau(zkeyPath string) (Ptau, error) { // TauG2 (3) seekToUniqueSection(reader, sections, 3) - fmt.Printf("tauG2: \n") - PtauPubKey.TauG2, err = readG2Array(reader, twoToPower) if err != nil { @@ -340,8 +339,6 @@ func ReadPtau(zkeyPath string) (Ptau, error) { // AlphaTauG1 (4) seekToUniqueSection(reader, sections, 4) - fmt.Printf("alphaTauG1: \n") - PtauPubKey.AlphaTauG1, err = readG1Array(reader, twoToPower) if err != nil { @@ -351,8 +348,6 @@ func ReadPtau(zkeyPath string) (Ptau, error) { // BetaTauG1 (5) seekToUniqueSection(reader, sections, 5) - fmt.Printf("betaTauG1: \n") - PtauPubKey.BetaTauG1, err = readG1Array(reader, twoToPower) if err != nil { @@ -362,8 +357,6 @@ func ReadPtau(zkeyPath string) (Ptau, error) { // BetaG2 (6) seekToUniqueSection(reader, sections, 6) - fmt.Printf("betaG2: \n") - PtauPubKey.BetaG2, err = readG2(reader) if err != nil { @@ -650,14 +643,12 @@ func ReadZkey(zkeyPath string) (Zkey, error) { // zkey var zkeyStr = make([]byte, 4) _, err = reader.Read(zkeyStr) - fmt.Printf("zkeyStr: %s \n", string(zkeyStr)) // version _, err = readULE32(reader) // number of sections numSections, err := readULE32(reader) - fmt.Printf("num sections: %v \n", numSections) // in practice, all sections have only one segment, but who knows... // 1-based indexing, so we need to allocate one more than the number of sections @@ -665,8 +656,6 @@ func ReadZkey(zkeyPath string) (Zkey, error) { for i := uint32(0); i < numSections; i++ { ht, _ := readULE32(reader) hl, _ := readULE64(reader) - fmt.Printf("ht: %v \n", ht) - fmt.Printf("hl: %v \n", hl) if sections[ht] == nil { sections[ht] = make([]SectionSegment, 0) } @@ -675,8 +664,6 @@ func ReadZkey(zkeyPath string) (Zkey, error) { reader.Seek(int64(hl), io.SeekCurrent) } - fmt.Printf("sections: %v \n", sections) - // section size _, err = readBigInt(reader, 8) @@ -734,8 +721,6 @@ func readHeaderGroth16(reader io.ReadSeeker) (HeaderGroth, error) { n8q, err := readULE32(reader) - fmt.Printf("n8q is: %v \n", n8q) - if err != nil { return header, err } @@ -784,3 +769,94 @@ func readHeaderGroth16(reader io.ReadSeeker) (HeaderGroth, error) { return header, nil } + +func convertPtauToPhase1(ptau Ptau) (phase1 mpc.Phase1, err error) { + tauG1 := make([]bn254.G1Affine, len(ptau.PTauPubKey.TauG1)) + for i, g1 := range ptau.PTauPubKey.TauG1 { + g1Affine := bn254.G1Affine{} + x := bytesToElement(g1[0].Bytes()) + g1Affine.X = x + y := bytesToElement(g1[1].Bytes()) + g1Affine.Y = y + if !g1Affine.IsOnCurve() { + fmt.Printf("tauG1: \n index: %v g1Affine.X: %v \n g1Affine.Y: %v \n", i, g1Affine.X.String(), g1Affine.Y.String()) + panic("g1Affine is not on curve") + } + tauG1[i] = g1Affine + } + + alphaTauG1 := make([]bn254.G1Affine, len(ptau.PTauPubKey.AlphaTauG1)) + for i, g1 := range ptau.PTauPubKey.AlphaTauG1 { + g1Affine := bn254.G1Affine{} + x := bytesToElement(g1[0].Bytes()) + g1Affine.X = x + y := bytesToElement(g1[1].Bytes()) + g1Affine.Y = y + if !g1Affine.IsOnCurve() { + fmt.Printf("alphaTauG1: \n index: %v g1Affine.X: %v \n g1Affine.Y: %v \n", i, g1Affine.X.String(), g1Affine.Y.String()) + panic("g1Affine is not on curve") + } + alphaTauG1[i] = g1Affine + } + // fmt.Printf("alphaTauG1: %v \n", alphaTauG1) + + betaTauG1 := make([]bn254.G1Affine, len(ptau.PTauPubKey.BetaTauG1)) + + for i, g1 := range ptau.PTauPubKey.BetaTauG1 { + g1Affine := bn254.G1Affine{} + x := bytesToElement(g1[0].Bytes()) + g1Affine.X = x + y := bytesToElement(g1[1].Bytes()) + g1Affine.Y = y + if !g1Affine.IsOnCurve() { + fmt.Printf("betaTauG1: \n index: %v, g1Affine.X: %v \n g1Affine.Y: %v \n", i, g1Affine.X.String(), g1Affine.Y.String()) + panic("g1Affine is not on curve") + } + betaTauG1[i] = g1Affine + } + tauG2 := make([]bn254.G2Affine, len(ptau.PTauPubKey.TauG2)) + for i, g2 := range ptau.PTauPubKey.TauG2 { + g2Affine := bn254.G2Affine{} + x0 := bytesToElement(g2[0].Bytes()) + x1 := bytesToElement(g2[1].Bytes()) + g2Affine.X.A0 = x0 + g2Affine.X.A1 = x1 + y0 := bytesToElement(g2[2].Bytes()) + y1 := bytesToElement(g2[3].Bytes()) + g2Affine.Y.A0 = y0 + g2Affine.Y.A1 = y1 + if !g2Affine.IsOnCurve() { + fmt.Printf("tauG2: \n index: %v, g2Affine.X.A0: %v \n g2Affine.X.A1: %v \n g2Affine.Y.A0: %v \n g2Affine.Y.A1 %v \n", i, g2Affine.X.A0.String(), g2Affine.X.A1.String(), g2Affine.Y.A0.String(), g2Affine.Y.A1.String()) + panic("g2Affine is not on curve") + } + tauG2[i] = g2Affine + } + + betaG2 := bn254.G2Affine{} + { + g2 := ptau.PTauPubKey.BetaG2 + + x0 := bytesToElement(g2[0].Bytes()) + x1 := bytesToElement(g2[1].Bytes()) + betaG2.X.A0 = x0 + betaG2.X.A1 = x1 + y0 := bytesToElement(g2[2].Bytes()) + y1 := bytesToElement(g2[3].Bytes()) + betaG2.Y.A0 = y0 + betaG2.Y.A1 = y1 + + if !betaG2.IsOnCurve() { + fmt.Printf("g2Affine.X.A0: %v \n g2Affine.X.A1: %v \n g2Affine.Y.A0: %v \n g2Affine.Y.A1 %v \n", betaG2.X.A0.String(), betaG2.X.String(), betaG2.Y.A0.String(), betaG2.Y.A1.String()) + panic("g2Affine is not on curve") + } + } + + phase1.Parameters.G1.Tau = tauG1 + phase1.Parameters.G1.AlphaTau = alphaTauG1 + phase1.Parameters.G1.BetaTau = betaTauG1 + + phase1.Parameters.G2.Tau = tauG2 + phase1.Parameters.G2.Beta = betaG2 + + return phase1, nil +} diff --git a/galoisd/cmd/galoisd/cmd/phase2_contribute.go b/galoisd/cmd/galoisd/cmd/phase2_contribute.go new file mode 100644 index 0000000000..517e7a0fef --- /dev/null +++ b/galoisd/cmd/galoisd/cmd/phase2_contribute.go @@ -0,0 +1,26 @@ +package cmd + +import ( + mpc "github.com/consensys/gnark/backend/groth16/bn254/mpcsetup" + "github.com/spf13/cobra" +) + +func Phase2ContributeCmd() *cobra.Command { + var cmd = &cobra.Command{ + Short: "Initialize the phase 2 of the groth16 multi-party computation.", + Use: "mpc-phase2-contrib [phase2] [phase2Output]", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + phase2Path := args[0] + var srs2 mpc.Phase2 + err := readFrom(phase2Path, &srs2) + if err != nil { + return err + } + srs2.Contribute() + phase2Output := args[1] + return saveTo(phase2Output, &srs2) + }, + } + return cmd +} diff --git a/galoisd/cmd/galoisd/cmd/phase2_extract.go b/galoisd/cmd/galoisd/cmd/phase2_extract.go new file mode 100644 index 0000000000..d0464f6c51 --- /dev/null +++ b/galoisd/cmd/galoisd/cmd/phase2_extract.go @@ -0,0 +1,50 @@ +package cmd + +import ( + mpc "github.com/consensys/gnark/backend/groth16/bn254/mpcsetup" + bn254 "github.com/consensys/gnark/constraint/bn254" + "github.com/spf13/cobra" +) + +func Phase2ExtractCmd() *cobra.Command { + var cmd = &cobra.Command{ + Short: "Initialize the phase 2 of the groth16 multi-party computation.", + Use: "mpc-phase2-extract [r1cs] [phase1Final] [phase2Final] [phase2Evals] [provingKeyOutput] [verifyingKeyOutput]", + Args: cobra.ExactArgs(6), + RunE: func(cmd *cobra.Command, args []string) error { + r1csPath := args[0] + var r1cs bn254.R1CS + err := readFrom(r1csPath, &r1cs) + if err != nil { + return err + } + phase1Path := args[1] + var srs1 mpc.Phase1 + err = readFrom(phase1Path, &srs1) + if err != nil { + return err + } + phase2Path := args[2] + var srs2 mpc.Phase2 + err = readFrom(phase2Path, &srs2) + if err != nil { + return err + } + phase2EvalsPath := args[3] + var evals mpc.Phase2Evaluations + err = readFrom(phase2EvalsPath, &evals) + if err != nil { + return err + } + pk, vk := mpc.ExtractKeys(&r1cs, &srs1, &srs2, &evals) + pkOutput := args[4] + err = saveTo(pkOutput, &pk) + if err != nil { + return err + } + vkOutput := args[5] + return saveTo(vkOutput, &vk) + }, + } + return cmd +} diff --git a/galoisd/cmd/galoisd/cmd/phase2_init.go b/galoisd/cmd/galoisd/cmd/phase2_init.go new file mode 100644 index 0000000000..6b9eba35f6 --- /dev/null +++ b/galoisd/cmd/galoisd/cmd/phase2_init.go @@ -0,0 +1,38 @@ +package cmd + +import ( + mpc "github.com/consensys/gnark/backend/groth16/bn254/mpcsetup" + bn254 "github.com/consensys/gnark/constraint/bn254" + "github.com/spf13/cobra" +) + +func Phase2InitCmd() *cobra.Command { + var cmd = &cobra.Command{ + Short: "Initialize the phase 2 of the groth16 multi-party computation.", + Use: "mpc-phase2-init [r1cs] [phase1Final] [phase2InitOutput] [phase2EvalsOutput]", + Args: cobra.ExactArgs(4), + RunE: func(cmd *cobra.Command, args []string) error { + r1csPath := args[0] + var r1cs bn254.R1CS + err := readFrom(r1csPath, &r1cs) + if err != nil { + return err + } + phase1Path := args[1] + var srs1 mpc.Phase1 + err = readFrom(phase1Path, &srs1) + if err != nil { + return err + } + srs2, evals := mpc.InitPhase2(&r1cs, &srs1) + phase2InitPath := args[2] + err = saveTo(phase2InitPath, &srs2) + if err != nil { + return err + } + phase2EvalsOutput := args[3] + return saveTo(phase2EvalsOutput, &evals) + }, + } + return cmd +} diff --git a/galoisd/cmd/galoisd/cmd/phase2_verify.go b/galoisd/cmd/galoisd/cmd/phase2_verify.go new file mode 100644 index 0000000000..8256732d58 --- /dev/null +++ b/galoisd/cmd/galoisd/cmd/phase2_verify.go @@ -0,0 +1,30 @@ +package cmd + +import ( + mpc "github.com/consensys/gnark/backend/groth16/bn254/mpcsetup" + "github.com/spf13/cobra" +) + +func Phase2VerifyCmd() *cobra.Command { + var cmd = &cobra.Command{ + Short: "Verify a phase 2 contribution of the groth16 multi-party computation.", + Use: "mpc-phase2-verify [phase2Previous] [phase2Contrib]", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + phase2Previous := args[0] + var prev mpc.Phase2 + err := readFrom(phase2Previous, &prev) + if err != nil { + return err + } + phase2Contrib := args[1] + var contrib mpc.Phase2 + err = readFrom(phase2Contrib, &contrib) + if err != nil { + return err + } + return mpc.VerifyPhase2(&prev, &contrib) + }, + } + return cmd +} diff --git a/galoisd/cmd/galoisd/cmd/utils.go b/galoisd/cmd/galoisd/cmd/utils.go new file mode 100644 index 0000000000..208387715e --- /dev/null +++ b/galoisd/cmd/galoisd/cmd/utils.go @@ -0,0 +1,31 @@ +package cmd + +import ( + "bufio" + "io" + "os" +) + +func saveTo(file string, x io.WriterTo) error { + f, err := os.Create(file) + if err != nil { + return err + } + defer f.Close() + w := bufio.NewWriter(f) + _, err = x.WriteTo(w) + if err != nil { + return err + } + return w.Flush() +} + +func readFrom(file string, obj io.ReaderFrom) error { + f, err := os.OpenFile(file, os.O_RDONLY, os.ModePerm) + if err != nil { + return err + } + defer f.Close() + _, err = obj.ReadFrom(f) + return err +} diff --git a/galoisd/cmd/galoisd/lib.go b/galoisd/cmd/galoisd/lib.go new file mode 100644 index 0000000000..75adc8fc1b --- /dev/null +++ b/galoisd/cmd/galoisd/lib.go @@ -0,0 +1,83 @@ +//go:build library +// +build library + +package main + +/* + #include + #include + #include +*/ +import "C" + +import ( + "bytes" + "io" + "log" + "unsafe" + + mpc "github.com/consensys/gnark/backend/groth16/bn254/mpcsetup" +) + +//export Phase2Verify +func Phase2Verify(phase2PreviousRaw *C.char, phase2ContribRaw *C.char, l C.int) bool { + phase2PreviousPayload := C.GoBytes(unsafe.Pointer(phase2PreviousRaw), l) + var previous mpc.Phase2 + err := readFromBuffer(phase2PreviousPayload, &previous) + if err != nil { + log.Printf("Failed to read phase2PreviousPayload: %v\n", err) + return false + } + phase2ContribPayload := C.GoBytes(unsafe.Pointer(phase2ContribRaw), l) + var contrib mpc.Phase2 + err = readFromBuffer(phase2ContribPayload, &contrib) + if err != nil { + log.Printf("Failed to read phase2ContribPayload: %v\n", err) + return false + } + err = mpc.VerifyPhase2(&previous, &contrib) + if err != nil { + log.Printf("Failed to verify phase2 contribution: %v\n", err) + return false + } + return true +} + +//export Phase2Contribute +func Phase2Contribute(phase2PayloadRaw *C.char, phase2ContribRaw *C.char, l C.int) bool { + phase2Payload := C.GoBytes(unsafe.Pointer(phase2PayloadRaw), l) + phase2Contrib, err := phase2Contribute(phase2Payload) + if err != nil { + log.Printf("Failed to contribute %v\n", err) + return false + } + phase2ContribOutput := unsafe.Slice(phase2ContribRaw, l) + for i := 0; i < int(l); i++ { + phase2ContribOutput[i] = C.char(phase2Contrib[i]) + } + return true +} + +func phase2Contribute(phase2Payload []byte) ([]byte, error) { + var srs2 mpc.Phase2 + err := readFromBuffer(phase2Payload, &srs2) + if err != nil { + log.Printf("Failed to read phase2Payload: %v\n", err) + return nil, err + } + srs2.Contribute() + var phase2Output bytes.Buffer + _, err = srs2.WriteTo(&phase2Output) + if err != nil { + log.Printf("Failed to write phase2Payload: %v\n", err) + return nil, err + } + return phase2Output.Bytes(), nil +} + +func readFromBuffer(buffer []byte, obj io.ReaderFrom) error { + _, err := obj.ReadFrom(bytes.NewReader(buffer)) + return err +} + +func main() {} diff --git a/galoisd/cmd/galoisd/main.go b/galoisd/cmd/galoisd/main.go index 56b51aebd9..fdfa06fd2c 100644 --- a/galoisd/cmd/galoisd/main.go +++ b/galoisd/cmd/galoisd/main.go @@ -1,3 +1,6 @@ +//go:build binary +// +build binary + package main import ( @@ -13,5 +16,12 @@ func main() { rootCmd.AddCommand(cmd.ExampleVerifyCmd()) rootCmd.AddCommand(cmd.QueryStats()) rootCmd.AddCommand(cmd.QueryStatsHealth()) + rootCmd.AddCommand( + cmd.Phase1InitCmd(), + cmd.Phase2InitCmd(), + cmd.Phase2ContributeCmd(), + cmd.Phase2VerifyCmd(), + cmd.Phase2ExtractCmd(), + ) rootCmd.Execute() } diff --git a/galoisd/galoisd.nix b/galoisd/galoisd.nix index 8bc73b422d..0e05f67a6f 100644 --- a/galoisd/galoisd.nix +++ b/galoisd/galoisd.nix @@ -12,32 +12,6 @@ }: { packages = { - galoisd = goPkgs.pkgsStatic.buildGo123Module ( - { - name = "galoisd"; - src = ./.; - vendorHash = "sha256-wZSsLqnNi38rZL2oJ+GpnMWuo/5ydTJ80ebHQ/SXtis="; - meta = { - mainProgram = "galoisd"; - }; - doCheck = true; - } - // ( - if pkgs.stdenv.isLinux then - { - nativeBuildInputs = [ pkgs.musl ]; - CGO_ENABLED = 1; - ldflags = [ - "-checklinkname=0" - "-linkmode external" - "-extldflags '-static -L${pkgs.musl}/lib -s -w'" - ]; - } - else - { } - ) - ); - galoisd-image = pkgs.dockerTools.buildImage { name = "${self'.packages.galoisd.name}-image"; copyToRoot = pkgs.buildEnv { @@ -78,33 +52,98 @@ } ); - download-circuit = - let - files = pkgs.writeText "files.txt" '' - /circuit.zip - ''; - in - mkCi false ( - pkgs.writeShellApplication { - name = "download-circuit"; - runtimeInputs = [ - pkgs.rclone - pkgs.zip - pkgs.unzip - ]; - text = '' - if [[ "$#" -ne 1 ]]; then - echo "Invalid arguments, must be: download-circuit [path]" - exit 1 - fi - rclone --progress --no-traverse --http-url "https://circuit.cryptware.io" copy :http:/ "$1" --files-from=${files} - unzip "$1"/circuit.zip - rm "$1"/circuit.zip - ''; - } - ); + galoisd = goPkgs.pkgsStatic.build123GoModule ({ + name = "galoisd"; + src = ./.; + vendorHash = "sha256-wZSsLqnNi38rZL2oJ+GpnMWuo/5ydTJ80ebHQ/SXtis="; + meta = { mainProgram = "galoisd"; }; + tags = [ "binary" ]; + doCheck = true; + } // (if pkgs.stdenv.isLinux then { + nativeBuildInputs = [ pkgs.musl ]; + CGO_ENABLED = 0; + ldflags = [ + "-extldflags '-static -L${pkgs.musl}/lib -s -w'" + ]; + } else + { })); + + galoisd-library = goPkgs.pkgsStatic.build123GoModule ({ + name = "libgalois"; + src = ./.; + vendorHash = "sha256-wZSsLqnNi38rZL2oJ+GpnMWuo/5ydTJ80ebHQ/SXtis="; + tags = [ "library" ]; + } // (if pkgs.stdenv.isLinux then { + nativeBuildInputs = [ goPkgs.musl goPkgs.pkgsStatic.binutils ]; + doCheck = false; + CGO_ENABLED = 1; + GOBIN = "${placeholder "out"}/lib"; + postInstall = '' + mv $out/lib/galoisd $out/lib/libgalois.a + ''; + ldflags = [ + "-s" + "-w" + "-buildmode c-archive" + ]; + } else + { })); + + + galoisd-image = pkgs.dockerTools.buildImage { + name = "${self'.packages.galoisd.name}-image"; + copyToRoot = pkgs.buildEnv { + name = "image-root"; + paths = [ pkgs.coreutils-full pkgs.cacert ]; + pathsToLink = [ "/bin" ]; + }; + config = { + Entrypoint = [ (pkgs.lib.getExe self'.packages.galoisd) ]; + Env = [ "SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt" ]; + }; + }; + + generate-prover-proto = mkCi false (pkgs.writeShellApplication { + name = "generate-prover-proto"; + runtimeInputs = + [ pkgs.protobuf pkgs.protoc-gen-go pkgs.protoc-gen-go-grpc ]; + text = '' + find ${proto.galoisd} -type f -regex ".*proto" |\ + while read -r file; do + echo "Generating $file" + protoc \ + -I"${proto.cometbls}/proto" \ + -I"${proto.gogoproto}" \ + -I"${proto.galoisd}" \ + --go_out=./grpc --go_opt=paths=source_relative \ + --go-grpc_out=./grpc --go-grpc_opt=paths=source_relative \ + "$file" + done + ''; + }); + + download-circuit = + let + files = pkgs.writeText "files.txt" '' + /circuit.zip + ''; + in + mkCi false (pkgs.writeShellApplication { + name = "download-circuit"; + runtimeInputs = [ pkgs.rclone pkgs.zip pkgs.unzip ]; + text = '' + if [[ "$#" -ne 1 ]]; then + echo "Invalid arguments, must be: download-circuit [path]" + exit 1 + fi + rclone --progress --no-traverse --http-url "https://circuit.cryptware.io" copy :http:/ "$1" --files-from=${files} + unzip "$1"/circuit.zip + rm "$1"/circuit.zip + ''; + }); - download-circuit-devnet = pkgs.writeShellApplication { + download-circuit-devnet = + pkgs.writeShellApplication { name = "download-circuit-devnet"; runtimeInputs = [ pkgs.coreutils diff --git a/galoisd/pkg/lightclient/nonadjacent/circuit_test.go b/galoisd/pkg/lightclient/nonadjacent/circuit_test.go index d48ddb8a64..2e16e4da83 100644 --- a/galoisd/pkg/lightclient/nonadjacent/circuit_test.go +++ b/galoisd/pkg/lightclient/nonadjacent/circuit_test.go @@ -1,27 +1,25 @@ package nonadjacent import ( + "cosmossdk.io/math" "crypto/sha256" "encoding/hex" "fmt" "galois/pkg/lightclient" - "math/big" - "math/rand" - "time" - - tmtypes "github.com/cometbft/cometbft/api/cometbft/types/v1" - version "github.com/cometbft/cometbft/api/cometbft/version/v1" cometbn254 "github.com/cometbft/cometbft/crypto/bn254" ce "github.com/cometbft/cometbft/crypto/encoding" "github.com/cometbft/cometbft/crypto/merkle" + tmtypes "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/cometbft/cometbft/proto/tendermint/version" "github.com/cometbft/cometbft/types" comettypes "github.com/cometbft/cometbft/types" - "github.com/consensys/gnark-crypto/ecc" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" - + "github.com/consensys/gnark/backend/witness" + bn254 "github.com/consensys/gnark/constraint/bn254" "github.com/consensys/gnark/frontend" + "github.com/consensys/gnark/frontend/cs/r1cs" gadget "github.com/consensys/gnark/std/algebra/emulated/sw_bn254" "github.com/consensys/gnark/test" @@ -627,3 +625,358 @@ func TestCantSelectPaddedValidator(t *testing.T) { func TestCantSelectPaddedPower(t *testing.T) { AttackFailing(t, AttackSelectPaddedPower) } + +func buildCircuit(t *testing.T) (*bn254.R1CS, *witness.Witness, error) { + r := rand.New(rand.NewSource(0xCAFEBABE)) + + nbOfValidators := 1 + r.Uint32()%lightclient.MaxVal + + privKeys := make([]cometbn254.PrivKey, nbOfValidators) + validators := make([]*tmtypes.SimpleValidator, nbOfValidators) + totalPower := int64(0) + for i := 0; i < len(validators); i++ { + privKeys[i] = cometbn254.GenPrivKey() + val, err := toValidator(privKeys[i].PubKey().Bytes(), 100000000+r.Int63n(100000000)) + if err != nil { + t.Fatal(err) + } + totalPower += val.VotingPower + validators[i] = val + } + + trustedValidators := validators + untrustedValidators := validators + + trustedValidatorsInput, trustedValidatorsRoot, err := marshalValidators(trustedValidators) + if err != nil { + t.Fatal(err) + } + + untrustedValidatorsInput, untrustedValidatorsRoot, err := marshalValidators(untrustedValidators) + if err != nil { + t.Fatal(err) + } + + header, vote, cometblsHeader, cometblsVote := getBlockHeader(r, trustedValidatorsRoot, untrustedValidatorsRoot) + + signedBytes := comettypes.VoteSignBytes(cometblsHeader.ChainID, cometblsVote) + + var signatures [][]byte + var bitmap big.Int + votingPower := 0 + + for true { + if votingPower > int(totalPower)/3*2+1 { + break + } + index := uint32(rand.Int31n(int32(nbOfValidators) - 1)) + i := index + for bitmap.Bit(int(i)) == 1 { + i = (i + 1) % nbOfValidators + } + votingPower += int(validators[i].VotingPower) + bitmap.SetBit(&bitmap, int(i), 1) + sig, err := privKeys[i].Sign(signedBytes) + if err != nil { + t.Fatal(err) + } + signatures = append(signatures, sig) + } + + trustedSignatures := signatures + untrustedSignatures := signatures + + trustedAggregatedSignature, err := aggregateSignatures(trustedSignatures) + if err != nil { + t.Fatal(err) + } + + untrustedAggregatedSignature, err := aggregateSignatures(untrustedSignatures) + if err != nil { + t.Fatal(err) + } + + trustedBitmap := bitmap + untrustedBitmap := bitmap + + trustedInput := TendermintNonAdjacentLightClientInput{ + Sig: gadget.NewG2Affine(trustedAggregatedSignature), + Validators: trustedValidatorsInput, + NbOfVal: nbOfValidators, + NbOfSignature: len(trustedSignatures), + Bitmap: trustedBitmap, + } + + untrustedInput := TendermintNonAdjacentLightClientInput{ + Sig: gadget.NewG2Affine(untrustedAggregatedSignature), + Validators: untrustedValidatorsInput, + NbOfVal: nbOfValidators, + NbOfSignature: len(untrustedSignatures), + Bitmap: untrustedBitmap, + } + + circuit := Circuit{ + DomainSeparationTag: []byte(cometbn254.CometblsSigDST), + TrustedInput: trustedInput, + TrustedValRoot: trustedValidatorsRoot, + UntrustedInput: untrustedInput, + Vote: *vote, + Header: *header, + InputsHash: inputsHash(cometblsHeader), + } + + err = test.IsSolved( + &Circuit{}, + &circuit, + ecc.BN254.ScalarField(), + ) + assert.NoError(t, err) + + witness, err := frontend.NewWitness(&circuit, ecc.BN254.ScalarField()) + if err != nil { + return nil, nil, err + } + + r1cs, err := frontend.Compile(ecc.BN254.ScalarField(), r1cs.NewBuilder, &circuit) + if err != nil { + return nil, nil, err + } + + return r1cs.(*bn254.R1CS), &witness, err +} + +// func TestNonAdjacentSetup(t *testing.T) { +// const ( +// nContributionsPhase1 = 1 +// nContributionsPhase2 = 1 +// power = 22 +// ) + +// assert := require.New(t) + +// t.Log("InitPhase1") +// start := time.Now() +// var srs1 mpc.Phase1 +// err := readFrom("final.ph1", &srs1) +// assert.NoError(err) +// t.Logf("InitPhase1: %v", time.Since(start)) + +// t.Log("Building circuit...") +// start = time.Now() +// r1cs, witness, err := buildCircuit(t) +// assert.NoError(err) +// t.Logf("Built in: %v", time.Since(start)) + +// t.Log("InitPhase2") +// start = time.Now() +// var srs2 mpc.Phase2 +// err = readFrom("init.ph2", &srs2) +// assert.NoError(err) +// var evals mpc.Phase2Evaluations +// err = readFrom("evals.ph2", &evals) +// assert.NoError(err) +// t.Logf("InitPhase2: %v", time.Since(start)) + +// err = saveTo("init.ph2", &srs2) +// assert.NoError(err) +// err = saveTo("evals.ph2", &evals) +// assert.NoError(err) + +// err = saveTo("contrib.ph2", &srs2) +// assert.NoError(err) + +// // Make and verify contributions for phase1 +// for i := 0; i < nContributionsPhase2; i++ { +// // we clone for test purposes; but in practice, participant will receive a []byte, deserialize it, +// // add his contribution and send back to coordinator. +// t.Log("ContributePhase2") +// assert.NoError(err) +// start = time.Now() +// srs2.Contribute() +// t.Logf("ContributePhase2: %v", time.Since(start)) +// t.Log("VerifyPhase2") +// var prev mpc.Phase2 +// start = time.Now() +// err = readFrom("contrib.ph2", &prev) +// t.Logf("VerifyPhase2Read: %v", time.Since(start)) +// start = time.Now() +// assert.NoError(mpc.VerifyPhase2(&prev, &srs2)) +// err = saveTo("contrib.ph2", &srs2) +// assert.NoError(err) +// t.Logf("VerifyPhase2: %v", time.Since(start)) +// } + +// t.Log("ExtractKeys") +// // Extract the proving and verifying keys +// pk, vk := mpc.ExtractKeys(r1cs, &srs1, &srs2, &evals) + +// t.Log("Save") +// err = saveTo("r1cs.bin", r1cs) +// assert.NoError(err) +// err = saveTo("pk.bin", &pk) +// assert.NoError(err) +// err = saveTo("vk.bin", &vk) +// assert.NoError(err) + +// t.Log("Prove...") +// proof, err := groth16.Prove(r1cs, &pk, *witness) +// assert.NoError(err) + +// pubWitness, err := (*witness).Public() +// assert.NoError(err) + +// t.Log("Verify...") +// err = groth16.Verify(proof, &vk, pubWitness) +// assert.NoError(err) +// } + +// func readFrom(file string, obj io.ReaderFrom) error { +// f, err := os.OpenFile(file, os.O_RDONLY, os.ModePerm) +// if err != nil { +// return err +// } +// defer f.Close() +// obj.ReadFrom(f) +// return nil +// } + +// func saveTo(file string, x io.WriterTo) error { +// f, err := os.Create(file) +// if err != nil { +// return err +// } +// defer f.Close() +// w := bufio.NewWriter(f) +// _, err = x.WriteTo(w) +// if err != nil { +// return err +// } +// w.Flush() +// return nil +// } + +// func clonePhase1(phase1 *mpc.Phase1) mpc.Phase1 { +// r := mpc.Phase1{} +// r.Parameters.G1.Tau = append(r.Parameters.G1.Tau, phase1.Parameters.G1.Tau...) +// r.Parameters.G1.AlphaTau = append(r.Parameters.G1.AlphaTau, phase1.Parameters.G1.AlphaTau...) +// r.Parameters.G1.BetaTau = append(r.Parameters.G1.BetaTau, phase1.Parameters.G1.BetaTau...) + +// r.Parameters.G2.Tau = append(r.Parameters.G2.Tau, phase1.Parameters.G2.Tau...) +// r.Parameters.G2.Beta = phase1.Parameters.G2.Beta + +// r.PublicKeys = phase1.PublicKeys +// r.Hash = append(r.Hash, phase1.Hash...) + +// return r +// } + +// func clonePhase2(phase2 *mpc.Phase2) mpc.Phase2 { +// r := mpc.Phase2{} +// r.Parameters.G1.BasisExpSigma = make([][]curve.G1Affine, len(r.Parameters.G1.BasisExpSigma)) +// for i := 0; i < len(r.Parameters.G1.BasisExpSigma); i++ { +// r.Parameters.G1.BasisExpSigma[i] = append( +// r.Parameters.G1.BasisExpSigma[i], +// phase2.Parameters.G1.BasisExpSigma[i]..., +// ) +// } +// r.Parameters.G1.Delta = phase2.Parameters.G1.Delta +// r.Parameters.G1.L = append(r.Parameters.G1.L, phase2.Parameters.G1.L...) +// r.Parameters.G1.Z = append(r.Parameters.G1.Z, phase2.Parameters.G1.Z...) +// r.Parameters.G2.Delta = phase2.Parameters.G2.Delta +// r.Parameters.G2.GRootSigmaNeg = phase2.Parameters.G2.GRootSigmaNeg +// r.PublicKey = phase2.PublicKey +// r.Hash = append(r.Hash, phase2.Hash...) + +// return r +// } + +// func convertPtauToPhase1(ptau Ptau) (phase1 mpc.Phase1, err error) { +// tauG1 := make([]curve.G1Affine, len(ptau.PTauPubKey.TauG1)) +// for i, g1 := range ptau.PTauPubKey.TauG1 { +// g1Affine := curve.G1Affine{} +// x := bytesToElement(g1[0].Bytes()) +// g1Affine.X = x +// y := bytesToElement(g1[1].Bytes()) +// g1Affine.Y = y +// if !g1Affine.IsOnCurve() { +// fmt.Printf("tauG1: \n index: %v g1Affine.X: %v \n g1Affine.Y: %v \n", i, g1Affine.X.String(), g1Affine.Y.String()) +// panic("g1Affine is not on curve") +// } +// tauG1[i] = g1Affine +// } + +// alphaTauG1 := make([]curve.G1Affine, len(ptau.PTauPubKey.AlphaTauG1)) +// for i, g1 := range ptau.PTauPubKey.AlphaTauG1 { +// g1Affine := curve.G1Affine{} +// x := bytesToElement(g1[0].Bytes()) +// g1Affine.X = x +// y := bytesToElement(g1[1].Bytes()) +// g1Affine.Y = y +// if !g1Affine.IsOnCurve() { +// fmt.Printf("alphaTauG1: \n index: %v g1Affine.X: %v \n g1Affine.Y: %v \n", i, g1Affine.X.String(), g1Affine.Y.String()) +// panic("g1Affine is not on curve") +// } +// alphaTauG1[i] = g1Affine +// } +// // fmt.Printf("alphaTauG1: %v \n", alphaTauG1) + +// betaTauG1 := make([]curve.G1Affine, len(ptau.PTauPubKey.BetaTauG1)) + +// for i, g1 := range ptau.PTauPubKey.BetaTauG1 { +// g1Affine := curve.G1Affine{} +// x := bytesToElement(g1[0].Bytes()) +// g1Affine.X = x +// y := bytesToElement(g1[1].Bytes()) +// g1Affine.Y = y +// if !g1Affine.IsOnCurve() { +// fmt.Printf("betaTauG1: \n index: %v, g1Affine.X: %v \n g1Affine.Y: %v \n", i, g1Affine.X.String(), g1Affine.Y.String()) +// panic("g1Affine is not on curve") +// } +// betaTauG1[i] = g1Affine +// } +// tauG2 := make([]curve.G2Affine, len(ptau.PTauPubKey.TauG2)) +// for i, g2 := range ptau.PTauPubKey.TauG2 { +// g2Affine := curve.G2Affine{} +// x0 := bytesToElement(g2[0].Bytes()) +// x1 := bytesToElement(g2[1].Bytes()) +// g2Affine.X.A0 = x0 +// g2Affine.X.A1 = x1 +// y0 := bytesToElement(g2[2].Bytes()) +// y1 := bytesToElement(g2[3].Bytes()) +// g2Affine.Y.A0 = y0 +// g2Affine.Y.A1 = y1 +// if !g2Affine.IsOnCurve() { +// fmt.Printf("tauG2: \n index: %v, g2Affine.X.A0: %v \n g2Affine.X.A1: %v \n g2Affine.Y.A0: %v \n g2Affine.Y.A1 %v \n", i, g2Affine.X.A0.String(), g2Affine.X.A1.String(), g2Affine.Y.A0.String(), g2Affine.Y.A1.String()) +// panic("g2Affine is not on curve") +// } +// tauG2[i] = g2Affine +// } + +// betaG2 := curve.G2Affine{} +// { +// g2 := ptau.PTauPubKey.BetaG2 + +// x0 := bytesToElement(g2[0].Bytes()) +// x1 := bytesToElement(g2[1].Bytes()) +// betaG2.X.A0 = x0 +// betaG2.X.A1 = x1 +// y0 := bytesToElement(g2[2].Bytes()) +// y1 := bytesToElement(g2[3].Bytes()) +// betaG2.Y.A0 = y0 +// betaG2.Y.A1 = y1 + +// if !betaG2.IsOnCurve() { +// fmt.Printf("g2Affine.X.A0: %v \n g2Affine.X.A1: %v \n g2Affine.Y.A0: %v \n g2Affine.Y.A1 %v \n", betaG2.X.A0.String(), betaG2.X.String(), betaG2.Y.A0.String(), betaG2.Y.A1.String()) +// panic("g2Affine is not on curve") +// } +// } + +// phase1.Parameters.G1.Tau = tauG1 +// phase1.Parameters.G1.AlphaTau = alphaTauG1 +// phase1.Parameters.G1.BetaTau = betaTauG1 + +// phase1.Parameters.G2.Tau = tauG2 +// phase1.Parameters.G2.Beta = betaG2 + +// return phase1, nil +// } diff --git a/mpc/client/Cargo.toml b/mpc/client/Cargo.toml new file mode 100644 index 0000000000..d076ebe5e5 --- /dev/null +++ b/mpc/client/Cargo.toml @@ -0,0 +1,17 @@ +[package] +edition = { workspace = true } +name = "mpc-client" +version = "0.1.0" + +[dependencies] +clap = { version = "4.5", features = ["derive"] } +crossterm = "0.27.0" +mpc-shared = { workspace = true } +rand = "0.8.5" +ratatui = "0.27.0" +reqwest = { workspace = true, features = ["json"] } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +sha2 = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["full"] } diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs new file mode 100644 index 0000000000..ad58024b87 --- /dev/null +++ b/mpc/client/src/main.rs @@ -0,0 +1,263 @@ +use std::{io::SeekFrom, path::Path, str::FromStr}; + +use clap::{Parser, Subcommand}; +use mpc_shared::{ + phase2_contribute, phase2_verify, types::Contribution, CONTRIBUTION_CHUNKS, + CONTRIBUTION_CHUNK_SIZE, CONTRIBUTION_SIZE, +}; +use reqwest::{ + header::{ + HeaderMap, HeaderValue, AUTHORIZATION, CONTENT_ENCODING, CONTENT_LENGTH, RANGE, + TRANSFER_ENCODING, + }, + StatusCode, +}; +use sha2::Digest; +use tokio::io::{AsyncSeekExt, AsyncWriteExt}; + +#[derive(Parser, Debug)] +#[command(version, about, long_about = None)] +struct Args { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug, Clone)] +enum Command { + Contribute { + #[arg(short, long)] + token: String, + }, + Verify { + #[arg(short, long)] + phase2_payload_path: String, + #[arg(short, long)] + phase2_contrib_path: String, + }, +} + +#[derive(thiserror::Error, Debug, Copy, Clone)] +enum Error { + #[error(transparent)] + Phase2ContributionFailed(#[from] mpc_shared::Phase2ContributionError), + #[error(transparent)] + Phase2VerificationFailed(#[from] mpc_shared::Phase2VerificationError), + #[error("Failed to read current state file.")] + FailedToReadPhase2Payload, + #[error("Failed to read contrib state file.")] + FailedToReadPhase2Contrib, + #[error("Failed to write the contribution file.")] + FailedToWriteContribution, +} + +async fn get_state_file(path: &Path) -> (Vec, [u8; 32]) { + if !tokio::fs::try_exists(path).await.unwrap() { + tokio::fs::write(path, []).await.unwrap(); + } + let content = tokio::fs::read(path).await.unwrap(); + let hash = sha2::Sha256::new().chain_update(&content).finalize(); + (content, hash.into()) +} + +#[derive(Debug)] +pub enum CompressionType { + Gzip, + Deflate, + Brotli, + Zstd, +} + +impl FromStr for CompressionType { + type Err = (); + fn from_str(value: &str) -> Result { + match value { + "gzip" => Ok(CompressionType::Gzip), + "deflate" => Ok(CompressionType::Deflate), + "br" => Ok(CompressionType::Brotli), + "zstd" => Ok(CompressionType::Zstd), + _ => Err(()), + } + } +} + +fn get_compression_type(headers: &HeaderMap) -> Option { + let mut compression_type = headers + .get_all(CONTENT_ENCODING) + .iter() + .find_map(|value| value.to_str().ok().and_then(|value| value.parse().ok())); + + if compression_type.is_none() { + compression_type = headers + .get_all(TRANSFER_ENCODING) + .iter() + .find_map(|value| value.to_str().ok().and_then(|value| value.parse().ok())); + } + + if compression_type.is_some() { + if let Some(content_length) = headers.get(CONTENT_LENGTH) { + if content_length == "0" { + return None; + } + } + } + + compression_type +} + +#[tokio::main] +async fn main() -> Result<(), Error> { + let args = Args::parse(); + match args.command { + Command::Contribute { token } => { + let url = |path: String| format!("http://localhost:8000/{}", path); + println!("client token {}", token); + let client = reqwest::ClientBuilder::new() + .default_headers(HeaderMap::from_iter([( + AUTHORIZATION, + HeaderValue::from_str(&format!("Bearer {}", token)).unwrap(), + )])) + .build() + .unwrap(); + println!("joining the queue"); + client + .post(url("join".into())) + .send() + .await + .unwrap() + .error_for_status() + .unwrap(); + println!("waiting our turn..."); + let expected_state_file_hash = loop { + let response = client.get(url("me".into())).send().await.unwrap(); + if response.status().is_success() { + println!("finally our turn!"); + let expected_hash = response.bytes().await.unwrap(); + break expected_hash; + } + let response = client.get(url("contribution".into())).send().await.unwrap(); + match response.status() { + StatusCode::OK => { + println!( + "contribution done: {:?}", + response.json::().await.unwrap() + ); + return Ok(()); + } + _ => {} + } + println!("checking contribution status..."); + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + }; + println!("verifying state file integrity..."); + enum StateFileAction { + Download(usize), + Done(Vec), + } + let state_path = Path::new("./state.ph2"); + let action = match get_state_file(&state_path).await { + (content, _) if content.len() < CONTRIBUTION_SIZE => { + println!("partial download, continuing from {}...", content.len()); + StateFileAction::Download(content.len()) + } + (content, hash) + if content.len() == CONTRIBUTION_SIZE && hash == *expected_state_file_hash => + { + println!("integrity verified, download complete."); + StateFileAction::Done(content) + } + _ => { + println!("invalid size or invalid hash detected, redownloading..."); + StateFileAction::Download(0) + } + }; + let payload = match action { + StateFileAction::Download(start_position) => { + let mut response = client + .get(url("state".into())) + .header(RANGE, format!("bytes={}-", start_position)) + .send() + .await + .unwrap() + .error_for_status() + .unwrap(); + let headers = response.headers(); + assert!( + get_compression_type(headers).is_none(), + "compression not supported." + ); + let total_length = start_position + + u64::from_str(headers.get(CONTENT_LENGTH).unwrap().to_str().unwrap()) + .unwrap() as usize; + println!("state file length: {}", total_length); + assert!( + total_length == CONTRIBUTION_SIZE, + "contribution length mismatch." + ); + let mut state_file = tokio::fs::OpenOptions::new() + .write(true) + .create(false) + .open(state_path) + .await + .unwrap(); + state_file.set_len(start_position as u64).await.unwrap(); + state_file + .seek(SeekFrom::Start(start_position as u64)) + .await + .unwrap(); + let mut i = 0; + while let Some(chunk) = response.chunk().await.unwrap() { + if i % 10 == 0 { + println!("eta: chunk {}.", i); + } + let written = state_file.write(&chunk).await.unwrap(); + assert!(written == chunk.len(), "couldn't write chunk."); + state_file.sync_data().await.unwrap(); + i += 1; + } + println!("download complete"); + println!("verifying integrity..."); + let final_content = tokio::fs::read(state_path).await.unwrap(); + let final_content_hash = sha2::Sha256::new() + .chain_update(&final_content) + .finalize() + .to_vec(); + assert!( + &final_content_hash == expected_state_file_hash.as_ref(), + "invalid file hash after download." + ); + println!("integrity verified"); + final_content + } + StateFileAction::Done(content) => content, + }; + println!("generating contribution, may take some time..."); + let phase2_contribution = phase2_contribute(&payload).unwrap(); + println!("uploading contribution..."); + for i in 0..=CONTRIBUTION_CHUNKS { + println!("eta: chunk {}.", i); + let chunk = &phase2_contribution[i * CONTRIBUTION_CHUNK_SIZE + ..std::cmp::min((i + 1) * CONTRIBUTION_CHUNK_SIZE, CONTRIBUTION_SIZE)]; + client + .post(url(format!("contribute/{}", i))) + .body(chunk.to_vec()) + .send() + .await + .unwrap() + .error_for_status() + .unwrap(); + } + Ok(()) + } + Command::Verify { + phase2_payload_path, + phase2_contrib_path, + } => { + // let phase2_payload = + // std::fs::read(phase2_payload_path).map_err(|_| Error::FailedToReadPhase2Payload)?; + // let phase2_contrib = + // std::fs::read(phase2_contrib_path).map_err(|_| Error::FailedToReadPhase2Contrib)?; + // phase2_verify(&phase2_payload, &phase2_contrib)?; + Ok(()) + } + } +} diff --git a/mpc/coordinator/Cargo.toml b/mpc/coordinator/Cargo.toml new file mode 100644 index 0000000000..9ed39e7388 --- /dev/null +++ b/mpc/coordinator/Cargo.toml @@ -0,0 +1,16 @@ +[package] +edition = { workspace = true } +name = "mpc-coordinator" +version = "0.1.0" + +[dependencies] +clap = { version = "4.5", features = ["derive"] } +futures.workspace = true +mpc-shared = { workspace = true } +priority-queue = "2.0.3" +rocket = { version = "0.5.1", features = ["json", "msgpack", "uuid"] } +rocket-authorization = "1.0.0" +rocket_seek_stream = "0.2.6" +rust-fsm = { version = "0.7.0", features = ["diagram"] } +sha2 = { workspace = true } +thiserror = { workspace = true } diff --git a/mpc/coordinator/src/fsm.rs b/mpc/coordinator/src/fsm.rs new file mode 100644 index 0000000000..b5ca516fb2 --- /dev/null +++ b/mpc/coordinator/src/fsm.rs @@ -0,0 +1,22 @@ +use rust_fsm::state_machine; + +state_machine! { + #[derive(Debug, Copy, Clone, PartialEq, Eq)] + pub union_mpc(AwaitContribution) + InitContributor => { + NoContributor => AwaitContributor, + ContributorSet => AwaitContribution, + }, + AwaitContributor => { + Join => InitContributor, + }, + AwaitContribution => { + Contribute => Verify, + Join => AwaitContribution, + SetPriority => AwaitContribution, + }, + Verify => { + Valid => InitContributor, + Invalid => InitContributor, + }, +} diff --git a/mpc/coordinator/src/main.rs b/mpc/coordinator/src/main.rs new file mode 100644 index 0000000000..272612446d --- /dev/null +++ b/mpc/coordinator/src/main.rs @@ -0,0 +1,404 @@ +pub mod fsm; + +use std::{ + collections::{HashMap, HashSet}, + io::Cursor, + marker::PhantomData, + sync::{ + mpsc::{channel, Sender, TryRecvError}, + Arc, RwLock, + }, + time::{SystemTime, UNIX_EPOCH}, +}; + +use clap::{Parser, Subcommand}; +use mpc_shared::{ + phase2_verify, types::Contribution, CONTRIBUTION_CHUNKS, CONTRIBUTION_CHUNK_SIZE, + CONTRIBUTION_CHUNK_SIZE_FINAL, +}; +use priority_queue::PriorityQueue; +use rocket::{ + data::{Limits, ToByteUnit}, + fs::TempFile, + get, + http::{ + hyper::{header::AUTHORIZATION, HeaderValue}, + ContentType, Cookie, CookieJar, Header, Status, + }, + post, + request::{FromParam, FromRequest, Outcome, Request}, + response::status::{self, Forbidden, NotFound, Unauthorized}, + routes, + serde::{ + json::{json, Json, Value}, + Deserialize, Serialize, + }, + Response, State, +}; +use rocket_authorization::{AuthError, Authorization, Credential}; +use rocket_seek_stream::SeekStream; +use sha2::Digest; + +#[derive(Parser, Debug)] +#[command(version, about, long_about = None)] +struct Args { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug, Clone)] +enum Command { + Start { + #[arg(short, long)] + phase2_payload_path: String, + }, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)] +#[serde(crate = "rocket::serde")] +struct Contributor { + token: String, +} + +#[derive(Debug)] +struct AppState { + queue: PriorityQueue, + processed: HashMap, + payload: Vec, + payload_hash: Vec, + contributor: Contributor, + upload_index: Index, + contribution_payload: Vec, + machine: fsm::union_mpc::StateMachine, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +enum AppMessage { + Join { + token: String, + }, + ContributePartial { + token: String, + index: Index, + payload_fraction: Vec, + }, +} + +struct Current; +struct Any; +struct AuthContributor(String, PhantomData); +#[rocket::async_trait] +impl Authorization for AuthContributor { + const KIND: &'static str = "Bearer"; + async fn parse(_: &str, token: &str, request: &Request) -> Result { + let app_state = request + .rocket() + .state::>>() + .unwrap() + .read() + .unwrap(); + if app_state.machine.state() == &fsm::union_mpc::State::AwaitContribution + && app_state.contributor.token == token + { + Ok(Self(token.to_string(), PhantomData)) + } else { + Err(AuthError::Unauthorized) + } + } +} +#[rocket::async_trait] +impl Authorization for AuthContributor { + const KIND: &'static str = "Bearer"; + async fn parse(_: &str, token: &str, request: &Request) -> Result { + Ok(Self(token.to_string(), PhantomData)) + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize)] +#[serde(crate = "rocket::serde")] +struct Index(u8); +impl<'a> FromParam<'a> for Index { + type Error = (); + fn from_param(param: &'a str) -> Result { + let result = u8::from_param(param).map_err(|_| ())?; + if result as usize <= CONTRIBUTION_CHUNKS { + Ok(Index(result)) + } else { + Err(()) + } + } +} + +#[get("/contribution")] +async fn contribution( + c: Credential>, + app_state: &State>>, +) -> Result, NotFound<()>> { + match app_state + .read() + .unwrap() + .processed + .get(&Contributor { token: c.0 .0 }) + { + Some(contribution) => Ok(Json(contribution.clone())), + None => Err(NotFound(())), + } +} + +#[post("/contribute/", data = "")] +async fn contribute( + c: Credential>, + index: Index, + payload: Vec, + app_state: &State>>, + tx: &State>, +) -> Result<(), Forbidden<()>> { + tx.send(AppMessage::ContributePartial { + token: c.0 .0, + index, + payload_fraction: payload, + }) + .unwrap(); + Ok(()) +} + +#[get("/me")] +async fn me( + c: Credential>, + app_state: &State>>, +) -> Result, Forbidden<()>> { + let hash = { app_state.read().unwrap().payload_hash.clone() }; + Ok(hash) +} + +#[get("/state")] +async fn state<'a>( + c: Credential>, + app_state: &State>>, +) -> Result, Forbidden<()>> { + let bytes = { app_state.read().unwrap().payload.clone() }; + let len = bytes.len() as u64; + Ok(SeekStream::with_opts( + Cursor::new(bytes), + len, + "application/octet-stream", + )) +} + +#[post("/join")] +async fn join(c: Credential>, tx: &State>) -> Status { + // TODO: verify token + tx.send(AppMessage::Join { token: c.0 .0 }).unwrap(); + Status::Ok +} + +#[get("/")] +fn index() -> &'static str { + " + Bruh u ain't mess with the kraken + " +} + +#[rocket::main] +async fn main() -> std::io::Result<()> { + let args = Args::parse(); + match args.command { + Command::Start { + phase2_payload_path, + } => { + let payload = std::fs::read(phase2_payload_path).unwrap(); + let payload_hash = sha2::Sha256::new() + .chain_update(&payload) + .finalize() + .to_vec(); + let (tx, rx) = channel::(); + let initial_contributor = Contributor { + token: "union".into(), + }; + let app_state = Arc::new(RwLock::new(AppState { + queue: PriorityQueue::with_capacity(4096), + processed: HashMap::with_capacity(4096), + contributor: initial_contributor, + payload, + payload_hash, + upload_index: Index(0), + contribution_payload: Vec::new(), + machine: fsm::union_mpc::StateMachine::new(), + })); + let app_state_clone = app_state.clone(); + rocket::tokio::spawn(async move { + let figment = rocket::Config::figment() + .merge(("limits", Limits::new().limit("bytes", 12.mebibytes()))); + rocket::custom(figment) + .manage(app_state_clone) + .manage(tx) + .mount( + "/", + routes![index, join, me, state, contribute, contribution], + ) + .launch() + .await + .unwrap(); + }); + let (tx_verify, rx_verify) = channel::<(Vec, Vec)>(); + let (tx_verify_result, rx_verify_result) = channel::(); + rocket::tokio::spawn(async move { + loop { + let (payload, contribution_payload) = rx_verify.recv().unwrap(); + println!("verifying contribution payloads"); + if phase2_verify(&payload, &contribution_payload).is_ok() { + println!("valid"); + tx_verify_result.send(true).unwrap(); + } else { + println!("invalid"); + tx_verify_result.send(false).unwrap(); + } + } + }); + + loop { + { + let mut app_state = app_state.write().unwrap(); + let machine_state = *app_state.machine.state(); + let mut join = |token: String| { + let new_contributor = Contributor { token }; + if app_state.queue.get(&new_contributor).is_none() + && app_state.processed.get(&new_contributor).is_none() + && app_state.contributor != new_contributor + { + let queue_len = app_state.queue.len(); + const BASE_PRORITY: usize = 1000000; + let priority = BASE_PRORITY - queue_len; + println!( + "contributor joined: {} with priority {}", + new_contributor.token, priority + ); + app_state.queue.push(new_contributor, priority); + app_state + .machine + .consume(&fsm::union_mpc::Input::Join) + .unwrap(); + } + }; + match machine_state { + fsm::union_mpc::State::InitContributor => { + let input = match app_state.queue.pop() { + Some((contributor, _)) => { + println!("new contributor slot"); + app_state.contributor = contributor; + app_state.upload_index = Index(0); + app_state.contribution_payload.clear(); + fsm::union_mpc::Input::ContributorSet + } + None => { + println!("no contributor"); + fsm::union_mpc::Input::NoContributor + } + }; + app_state.machine.consume(&input).unwrap(); + } + fsm::union_mpc::State::AwaitContributor => match rx.try_recv() { + Ok(message) => match message { + AppMessage::Join { token } => { + join(token); + } + _ => {} + }, + Err(TryRecvError::Empty) => {} + Err(e) => { + println!("error in awaiting contributor {:?}", e); + break; + } + }, + fsm::union_mpc::State::AwaitContribution => match rx.try_recv() { + Ok(message) => match message { + AppMessage::Join { token } => { + join(token); + } + AppMessage::ContributePartial { + token, + index: Index(index), + mut payload_fraction, + } => { + if app_state.contributor.token == token + && app_state.upload_index == Index(index) + { + let expected_len = if (index as usize) < CONTRIBUTION_CHUNKS + { + CONTRIBUTION_CHUNK_SIZE + } else { + CONTRIBUTION_CHUNK_SIZE_FINAL + }; + if payload_fraction.len() == expected_len { + println!("partial contribution chunk {}", index); + app_state.upload_index = Index(index + 1); + app_state + .contribution_payload + .append(&mut payload_fraction); + if index as usize == CONTRIBUTION_CHUNKS { + println!("contribution complete"); + tx_verify + .send(( + app_state.payload.clone(), + app_state.contribution_payload.clone(), + )) + .unwrap(); + app_state.payload = + app_state.contribution_payload.clone(); + app_state + .machine + .consume(&fsm::union_mpc::Input::Contribute) + .unwrap(); + } + } else { + println!("invalid chunk length {}", index); + } + } + } + }, + Err(TryRecvError::Empty) => {} + Err(e) => { + println!("error in await {:?}", e); + break; + } + }, + fsm::union_mpc::State::Verify => match rx_verify_result.try_recv() { + Ok(result) => { + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + let contribution = Contribution { + success: result, + timestamp, + }; + let contributor = app_state.contributor.clone(); + app_state.processed.insert(contributor, contribution); + if result { + println!("verification succeeded."); + app_state + .machine + .consume(&fsm::union_mpc::Input::Valid) + .unwrap(); + } else { + println!("verification failed."); + app_state + .machine + .consume(&fsm::union_mpc::Input::Invalid) + .unwrap(); + } + } + Err(TryRecvError::Empty) => {} + Err(e) => { + println!("error in verify {:?}", e); + break; + } + }, + }; + } + rocket::tokio::time::sleep(std::time::Duration::from_millis(1000 / 30)).await; + } + Ok(()) + } + } +} diff --git a/mpc/mpc.nix b/mpc/mpc.nix new file mode 100644 index 0000000000..206acf4665 --- /dev/null +++ b/mpc/mpc.nix @@ -0,0 +1,31 @@ +{ self, ... }: { + perSystem = { self', pkgs, crane, ... }: + let + attrs = { + rustflags = "-L${self'.packages.galoisd-library}/lib"; + }; + mpc-client = crane.buildWorkspaceMember (attrs // { + crateDirFromRoot = "mpc/client"; + }); + mpc-coordinator = crane.buildWorkspaceMember (attrs // { + crateDirFromRoot = "mpc/coordinator"; + }); + in + { + packages = mpc-coordinator.packages // mpc-client.packages // { + mpc-image = pkgs.dockerTools.buildImage { + name = "${self'.packages.mpc-client.name}-image"; + copyToRoot = pkgs.buildEnv { + name = "image-root"; + paths = [ pkgs.coreutils-full pkgs.cacert ]; + pathsToLink = [ "/bin" ]; + }; + config = { + Entrypoint = [ (pkgs.lib.getExe self'.packages.mpc) ]; + Env = [ "SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt" ]; + }; + }; + }; + checks = mpc-coordinator.checks // mpc-client.checks; + }; +} diff --git a/mpc/shared/Cargo.toml b/mpc/shared/Cargo.toml new file mode 100644 index 0000000000..44a4b1f80f --- /dev/null +++ b/mpc/shared/Cargo.toml @@ -0,0 +1,8 @@ +[package] +edition = { workspace = true } +name = "mpc-shared" +version = "0.1.0" + +[dependencies] +rocket = { version = "0.5.1", features = ["json", "msgpack", "uuid"] } +thiserror = { workspace = true } diff --git a/mpc/shared/src/lib.rs b/mpc/shared/src/lib.rs new file mode 100644 index 0000000000..ee50387ef9 --- /dev/null +++ b/mpc/shared/src/lib.rs @@ -0,0 +1,88 @@ +pub mod types; + +use std::ffi::{c_char, c_int}; + +pub const CONTRIBUTION_SIZE: usize = 306032532; +pub const CONTRIBUTION_CHUNKS: usize = 30; +pub const CONTRIBUTION_CHUNK_SIZE: usize = CONTRIBUTION_SIZE / CONTRIBUTION_CHUNKS; +pub const CONTRIBUTION_CHUNK_SIZE_FINAL: usize = + CONTRIBUTION_SIZE - (CONTRIBUTION_CHUNK_SIZE * CONTRIBUTION_CHUNKS); + +#[link(name = "galois")] +extern "C" { + fn Phase2Contribute( + phase2_payload_raw: *const c_char, + phase2_contrib_raw: *mut c_char, + len: c_int, + ) -> bool; + + fn Phase2Verify( + phase2_previous_raw: *const c_char, + phase2_contrib_raw: *const c_char, + len: c_int, + ) -> bool; +} + +#[derive(thiserror::Error, Debug, Copy, Clone)] +pub enum Phase2ContributionError { + #[error("Failed to convert initial payload size, based af is you reached this.")] + FailedToConvertPayloadSize, + #[error("Looks like you spent time contributing for no reason because it failed.")] + FailedToContribute, +} + +#[derive(thiserror::Error, Debug, Copy, Clone)] +pub enum Phase2VerificationError { + #[error("Failed to convert contribution payload size, based af is you reached this.")] + FailedToConvertPayloadSize, + #[error("1 BTC = 1 BTC, what are you trying to achieve?")] + InconsistentPayloadSize, + #[error("Cheating is great, but not allowed. You may lose your slot if the coordinator chose to :'(.")] + Phase2VerificationFailed, +} + +pub fn phase2_contribute(phase2_payload: &[u8]) -> Result, Phase2ContributionError> { + let payload_len = phase2_payload + .len() + .try_into() + .map_err(|_| Phase2ContributionError::FailedToConvertPayloadSize)?; + let mut phase2_contrib_raw = vec![0u8; phase2_payload.len()]; + let result = unsafe { + Phase2Contribute( + phase2_payload.as_ptr() as *const _, + phase2_contrib_raw.as_mut_ptr() as *mut _, + payload_len, + ) + }; + if result { + Ok(phase2_contrib_raw) + } else { + Err(Phase2ContributionError::FailedToContribute) + } +} + +pub fn phase2_verify( + phase2_payload: &[u8], + phase2_contrib: &[u8], +) -> Result<(), Phase2VerificationError> { + let payload_len = phase2_payload + .len() + .try_into() + .map_err(|_| Phase2VerificationError::FailedToConvertPayloadSize)?; + if phase2_contrib.len() != phase2_payload.len() { + Err(Phase2VerificationError::InconsistentPayloadSize) + } else { + let result = unsafe { + Phase2Verify( + phase2_payload.as_ptr() as *const _, + phase2_contrib.as_ptr() as *mut _, + payload_len, + ) + }; + if result { + Ok(()) + } else { + Err(Phase2VerificationError::Phase2VerificationFailed) + } + } +} diff --git a/mpc/shared/src/types.rs b/mpc/shared/src/types.rs new file mode 100644 index 0000000000..9e6e7e1a69 --- /dev/null +++ b/mpc/shared/src/types.rs @@ -0,0 +1,8 @@ +use rocket::serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(crate = "rocket::serde")] +pub struct Contribution { + pub success: bool, + pub timestamp: u64, +} diff --git a/tools/rust/rust.nix b/tools/rust/rust.nix index 6f6f28ad1c..eb50aff572 100644 --- a/tools/rust/rust.nix +++ b/tools/rust/rust.nix @@ -124,7 +124,12 @@ _: { inherit mkBuildStdToolchain mkNightly rustSrc; toolchains = { - nightly = mkNightly { }; + nightly = mkNightly { + targets = [ + "wasm32-unknown-unknown" + "x86_64-unknown-linux-musl" + ]; + }; # for use in the devShell dev = pkgs.rust-bin.nightly.${nightlyVersion}.default.override { From d3f6709a52257fb721b20b57fe10124a3609a01c Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Fri, 26 Jul 2024 21:30:47 +0200 Subject: [PATCH 03/52] feat: mpc schema --- mpc/coordinator/database.sql | 99 ++++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 mpc/coordinator/database.sql diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql new file mode 100644 index 0000000000..ce8fe9b172 --- /dev/null +++ b/mpc/coordinator/database.sql @@ -0,0 +1,99 @@ +CREATE TABLE queue ( + id uuid PRIMARY KEY, + payload_id uuid NOT NULL DEFAULT(gen_random_uuid()), + joined timestamptz NOT NULL DEFAULT (now()), + score integer NOT NULL +); + +CREATE OR REPLACE FUNCTION min_score() RETURNS INTEGER AS $$ + SELECT COALESCE(MIN(score) - 1, 1000000) FROM queue +$$ LANGUAGE SQL; + +CREATE OR REPLACE FUNCTION set_initial_score_trigger() RETURNS TRIGGER AS $$ +BEGIN + NEW.score := min_score(); + RETURN NEW; +END +$$ LANGUAGE plpgsql; + +CREATE TRIGGER queue_set_initial_score BEFORE INSERT ON queue FOR EACH ROW EXECUTE FUNCTION set_initial_score_trigger(); + +CREATE UNIQUE INDEX idx_queue_score_id ON queue(score, id); +CREATE UNIQUE INDEX idx_queue_score ON queue(score); + +ALTER TABLE queue ADD FOREIGN KEY (id) REFERENCES users(id); + +CREATE TABLE contribution_status( + id uuid PRIMARY KEY, + started timestamptz NOT NULL DEFAULT(now()), + expire timestamptz NOT NULL DEFAULT(now() + INTERVAL '30 minutes') +); + +CREATE UNIQUE INDEX idx_contribution_status_id_expire ON contribution_status(id, expire); + +ALTER TABLE contribution_status ADD FOREIGN KEY (id) REFERENCES queue(id); + +CREATE TABLE contribution( + id uuid PRIMARY KEY, + seq SMALLSERIAL NOT NULL, + created_at timestamptz NOT NULL DEFAULT(now()), + success boolean +); + +ALTER TABLE contribution ADD FOREIGN KEY (id) REFERENCES queue(id); + +CREATE UNIQUE INDEX idx_contribution_seq ON contribution(seq); +CREATE UNIQUE INDEX idx_contribution_seq_success ON contribution(seq, success); + +-- The next contributor is the one with the higest score that didn't contribute yet. +CREATE OR REPLACE FUNCTION set_next_contributor_trigger() RETURNS TRIGGER AS $$ +BEGIN + CALL set_next_contributor(); + RETURN NEW; +END +$$ LANGUAGE plpgsql; + +-- Rotate the current contributor whenever a contribution is done. +CREATE TRIGGER contribution_added AFTER INSERT ON contribution FOR EACH ROW EXECUTE FUNCTION set_next_contributor_trigger(); + +-- Current contributor is the highest score in the queue with the contribution +-- not done yet and it's status not expired. +CREATE OR REPLACE VIEW current_contributor_id AS + SELECT q.id + FROM queue q + WHERE q.score = ( + SELECT MAX(qq.score) + FROM queue qq + WHERE NOT EXISTS ( + SELECT * FROM contribution c WHERE c.id = qq.id + ) AND EXISTS ( + SELECT cs.expire FROM contribution_status cs WHERE cs.id = qq.id AND cs.expire > now() + ) + ); + +-- The current payload is from the latest successfull contribution +CREATE OR REPLACE VIEW current_payload_id AS + SELECT q.payload_id + FROM contribution c + INNER JOIN queue q ON q.id = c.id + WHERE c.seq = ( + SELECT MAX(cc.seq) FROM contribution cc WHERE cc.success + ); + +CREATE OR REPLACE PROCEDURE set_next_contributor() AS $$ +BEGIN + IF (SELECT COUNT(*) FROM current_contributor_id) = 0 THEN + INSERT INTO contribution_status(id) SELECT q.id FROM queue q WHERE q.score = ( + SELECT MAX(qq.score) + FROM queue qq + WHERE NOT EXISTS ( + SELECT * FROM contribution c WHERE c.id = qq.id + ) AND NOT EXISTS( + SELECT cs.expire FROM contribution_status cs WHERE cs.id = qq.id AND cs.expire < now() + ) + ); + END IF; +END +$$ LANGUAGE plpgsql; + +SELECT cron.schedule('update-contributor', '10 seconds', 'CALL set_next_contributor()'); From 4eae4cd18b54cc3dc4a8517f6693b84f8478cd2e Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Mon, 29 Jul 2024 23:05:11 +0200 Subject: [PATCH 04/52] feat: mpc use supa --- app/src/routes/ceremony/+page.svelte | 195 +++++++++ mpc/client/Cargo.toml | 28 +- mpc/client/src/main.rs | 494 ++++++++++++++--------- mpc/coordinator/Cargo.toml | 19 +- mpc/coordinator/database.sql | 218 ++++++++-- mpc/coordinator/src/fsm.rs | 22 -- mpc/coordinator/src/main.rs | 572 ++++++++++----------------- mpc/shared/Cargo.toml | 2 +- mpc/shared/src/lib.rs | 4 - mpc/shared/src/types.rs | 16 +- 10 files changed, 947 insertions(+), 623 deletions(-) create mode 100644 app/src/routes/ceremony/+page.svelte delete mode 100644 mpc/coordinator/src/fsm.rs diff --git a/app/src/routes/ceremony/+page.svelte b/app/src/routes/ceremony/+page.svelte new file mode 100644 index 0000000000..e30a462c79 --- /dev/null +++ b/app/src/routes/ceremony/+page.svelte @@ -0,0 +1,195 @@ + + + + Union | Ceremony + + + +
+ {#if queuePosition != null} + Queue position: {queuePosition} + {:else} +
+ Queue position: +
+ {/if} + + {#each messages as message} +
+ {@html message} +
+ {/each} +
diff --git a/mpc/client/Cargo.toml b/mpc/client/Cargo.toml index d076ebe5e5..17a4dae0ce 100644 --- a/mpc/client/Cargo.toml +++ b/mpc/client/Cargo.toml @@ -4,14 +4,20 @@ name = "mpc-client" version = "0.1.0" [dependencies] -clap = { version = "4.5", features = ["derive"] } -crossterm = "0.27.0" -mpc-shared = { workspace = true } -rand = "0.8.5" -ratatui = "0.27.0" -reqwest = { workspace = true, features = ["json"] } -serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true } -sha2 = { workspace = true } -thiserror = { workspace = true } -tokio = { workspace = true, features = ["full"] } +async-sqlite = "0.2.2" +base64 = { workspace = true } +clap = { version = "4.5", features = ["derive"] } +crossterm = "0.27.0" +http-body-util = "0.1" +httpdate = "1.0" +hyper = { version = "1", features = ["full"] } +hyper-util = { version = "0.1", features = ["full"] } +mpc-shared = { workspace = true } +postgrest = "1.0" +rand = "0.8.5" +ratatui = "0.27.0" +reqwest = { workspace = true, features = ["json"] } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["full"] } diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index ad58024b87..f05a298301 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -1,193 +1,159 @@ -use std::{io::SeekFrom, path::Path, str::FromStr}; +use std::{ + convert::Infallible, io::SeekFrom, net::SocketAddr, os::unix::fs::MetadataExt, path::Path, + str::FromStr, time::UNIX_EPOCH, +}; +use async_sqlite::{rusqlite::params, JournalMode, Pool, PoolBuilder}; +use base64::{prelude::BASE64_STANDARD, Engine}; use clap::{Parser, Subcommand}; +use http_body_util::{BodyExt, Full}; +use httpdate::parse_http_date; +use hyper::{body::Buf, service::service_fn, Method}; +use hyper_util::rt::TokioIo; use mpc_shared::{ - phase2_contribute, phase2_verify, types::Contribution, CONTRIBUTION_CHUNKS, - CONTRIBUTION_CHUNK_SIZE, CONTRIBUTION_SIZE, + phase2_contribute, phase2_verify, + types::{Contribution, ContributorId, PayloadId}, + CONTRIBUTION_CHUNKS, CONTRIBUTION_CHUNK_SIZE, CONTRIBUTION_SIZE, }; +use postgrest::Postgrest; use reqwest::{ header::{ - HeaderMap, HeaderValue, AUTHORIZATION, CONTENT_ENCODING, CONTENT_LENGTH, RANGE, - TRANSFER_ENCODING, + HeaderMap, HeaderName, HeaderValue, AUTHORIZATION, CONTENT_ENCODING, CONTENT_LENGTH, + LOCATION, RANGE, TRANSFER_ENCODING, }, StatusCode, }; -use sha2::Digest; -use tokio::io::{AsyncSeekExt, AsyncWriteExt}; +use serde::{Deserialize, Serialize}; +use tokio::{ + io::{empty, AsyncSeekExt, AsyncWriteExt}, + net::TcpListener, +}; -#[derive(Parser, Debug)] -#[command(version, about, long_about = None)] -struct Args { - #[command(subcommand)] - command: Command, -} +static INTERNAL_SERVER_ERROR: &[u8] = b"Internal Server Error"; +static NOTFOUND: &[u8] = b"Not Found"; -#[derive(Subcommand, Debug, Clone)] -enum Command { - Contribute { - #[arg(short, long)] - token: String, - }, - Verify { - #[arg(short, long)] - phase2_payload_path: String, - #[arg(short, long)] - phase2_contrib_path: String, - }, +#[derive(PartialEq, Eq, Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +struct Contribute { + bucket: String, + jwt: String, + api_key: String, + contributor_id: String, + payload_id: String, } -#[derive(thiserror::Error, Debug, Copy, Clone)] +#[derive(thiserror::Error, Debug, Clone)] enum Error { + #[error("couldn't find expected header: {0}")] + HeaderNotFound(String), + #[error("current contributor not found.")] + ContributorNotFound, + #[error("current payload not found.")] + PayloadNotFound, #[error(transparent)] Phase2ContributionFailed(#[from] mpc_shared::Phase2ContributionError), #[error(transparent)] Phase2VerificationFailed(#[from] mpc_shared::Phase2VerificationError), - #[error("Failed to read current state file.")] - FailedToReadPhase2Payload, - #[error("Failed to read contrib state file.")] - FailedToReadPhase2Contrib, - #[error("Failed to write the contribution file.")] - FailedToWriteContribution, } -async fn get_state_file(path: &Path) -> (Vec, [u8; 32]) { +async fn get_state_file(path: &str) -> Vec { if !tokio::fs::try_exists(path).await.unwrap() { tokio::fs::write(path, []).await.unwrap(); } - let content = tokio::fs::read(path).await.unwrap(); - let hash = sha2::Sha256::new().chain_update(&content).finalize(); - (content, hash.into()) + tokio::fs::read(path).await.unwrap() } -#[derive(Debug)] -pub enum CompressionType { - Gzip, - Deflate, - Brotli, - Zstd, -} +type BoxBody = http_body_util::combinators::BoxBody; -impl FromStr for CompressionType { - type Err = (); - fn from_str(value: &str) -> Result { - match value { - "gzip" => Ok(CompressionType::Gzip), - "deflate" => Ok(CompressionType::Deflate), - "br" => Ok(CompressionType::Brotli), - "zstd" => Ok(CompressionType::Zstd), - _ => Err(()), - } - } -} - -fn get_compression_type(headers: &HeaderMap) -> Option { - let mut compression_type = headers - .get_all(CONTENT_ENCODING) - .iter() - .find_map(|value| value.to_str().ok().and_then(|value| value.parse().ok())); - - if compression_type.is_none() { - compression_type = headers - .get_all(TRANSFER_ENCODING) - .iter() - .find_map(|value| value.to_str().ok().and_then(|value| value.parse().ok())); - } - - if compression_type.is_some() { - if let Some(content_length) = headers.get(CONTENT_LENGTH) { - if content_length == "0" { - return None; - } - } - } - - compression_type -} - -#[tokio::main] -async fn main() -> Result<(), Error> { - let args = Args::parse(); - match args.command { - Command::Contribute { token } => { - let url = |path: String| format!("http://localhost:8000/{}", path); - println!("client token {}", token); +async fn handle( + req: hyper::Request, +) -> Result, Box> { + match (req.method(), req.uri().path()) { + (&Method::POST, "/contribute") => { + let whole_body = req.collect().await?.aggregate(); + let Contribute { + bucket, + jwt, + api_key, + contributor_id, + payload_id, + } = serde_json::from_reader(whole_body.reader())?; + const SUPABASE_PROJECT: &str = "https://bffcolwcakqrhlznyjns.supabase.co"; + const APIKEY: &str = "apikey"; + let authorization_header = format!("Bearer {}", jwt); + let client = Postgrest::new(format!("{SUPABASE_PROJECT}/rest/v1")) + .insert_header(APIKEY, api_key) + .insert_header(AUTHORIZATION, authorization_header.clone()); + let current_contributor = client + .from("current_contributor_id") + .select("id") + .execute() + .await? + .json::>() + .await? + .first() + .cloned() + .ok_or(Error::ContributorNotFound)?; + assert!( + current_contributor.id == contributor_id, + "not current contributor." + ); + let current_payload = client + .from("current_payload_id") + .select("payload_id") + .execute() + .await? + .json::>() + .await? + .first() + .cloned() + .ok_or(Error::PayloadNotFound)?; + let current_payload_download_url = format!( + "{SUPABASE_PROJECT}/storage/v1/object/contributions/{}", + ¤t_payload.id + ); let client = reqwest::ClientBuilder::new() .default_headers(HeaderMap::from_iter([( AUTHORIZATION, - HeaderValue::from_str(&format!("Bearer {}", token)).unwrap(), + HeaderValue::from_str(&authorization_header)?, )])) - .build() - .unwrap(); - println!("joining the queue"); - client - .post(url("join".into())) - .send() - .await - .unwrap() - .error_for_status() - .unwrap(); - println!("waiting our turn..."); - let expected_state_file_hash = loop { - let response = client.get(url("me".into())).send().await.unwrap(); - if response.status().is_success() { - println!("finally our turn!"); - let expected_hash = response.bytes().await.unwrap(); - break expected_hash; - } - let response = client.get(url("contribution".into())).send().await.unwrap(); - match response.status() { - StatusCode::OK => { - println!( - "contribution done: {:?}", - response.json::().await.unwrap() - ); - return Ok(()); - } - _ => {} - } - println!("checking contribution status..."); - tokio::time::sleep(std::time::Duration::from_millis(1000)).await; - }; - println!("verifying state file integrity..."); + .build()?; + println!("checking payload file..."); enum StateFileAction { Download(usize), Done(Vec), } - let state_path = Path::new("./state.ph2"); + let state_path = current_payload.id; let action = match get_state_file(&state_path).await { - (content, _) if content.len() < CONTRIBUTION_SIZE => { + content if content.len() < CONTRIBUTION_SIZE => { println!("partial download, continuing from {}...", content.len()); StateFileAction::Download(content.len()) } - (content, hash) - if content.len() == CONTRIBUTION_SIZE && hash == *expected_state_file_hash => - { - println!("integrity verified, download complete."); + content if content.len() == CONTRIBUTION_SIZE => { + println!("download complete."); StateFileAction::Done(content) } _ => { - println!("invalid size or invalid hash detected, redownloading..."); + println!("invalid size detected, redownloading..."); StateFileAction::Download(0) } }; let payload = match action { StateFileAction::Download(start_position) => { let mut response = client - .get(url("state".into())) + .get(current_payload_download_url) .header(RANGE, format!("bytes={}-", start_position)) .send() - .await - .unwrap() - .error_for_status() - .unwrap(); + .await? + .error_for_status()?; let headers = response.headers(); - assert!( - get_compression_type(headers).is_none(), - "compression not supported." - ); let total_length = start_position - + u64::from_str(headers.get(CONTENT_LENGTH).unwrap().to_str().unwrap()) - .unwrap() as usize; + + u64::from_str( + headers + .get(CONTENT_LENGTH) + .ok_or(Error::HeaderNotFound(CONTENT_LENGTH.as_str().into()))? + .to_str()?, + )? as usize; println!("state file length: {}", total_length); assert!( total_length == CONTRIBUTION_SIZE, @@ -196,68 +162,230 @@ async fn main() -> Result<(), Error> { let mut state_file = tokio::fs::OpenOptions::new() .write(true) .create(false) - .open(state_path) - .await - .unwrap(); - state_file.set_len(start_position as u64).await.unwrap(); + .open(&state_path) + .await?; + state_file.set_len(start_position as u64).await?; state_file .seek(SeekFrom::Start(start_position as u64)) - .await - .unwrap(); + .await?; let mut i = 0; - while let Some(chunk) = response.chunk().await.unwrap() { + while let Some(chunk) = response.chunk().await? { if i % 10 == 0 { println!("eta: chunk {}.", i); } - let written = state_file.write(&chunk).await.unwrap(); + let written = state_file.write(&chunk).await?; assert!(written == chunk.len(), "couldn't write chunk."); - state_file.sync_data().await.unwrap(); + state_file.sync_data().await?; i += 1; } println!("download complete"); - println!("verifying integrity..."); - let final_content = tokio::fs::read(state_path).await.unwrap(); - let final_content_hash = sha2::Sha256::new() - .chain_update(&final_content) - .finalize() - .to_vec(); - assert!( - &final_content_hash == expected_state_file_hash.as_ref(), - "invalid file hash after download." - ); - println!("integrity verified"); + let final_content = tokio::fs::read(&state_path).await?; final_content } StateFileAction::Done(content) => content, }; - println!("generating contribution, may take some time..."); - let phase2_contribution = phase2_contribute(&payload).unwrap(); + let phase2_contribution = if let Ok(true) = tokio::fs::metadata(&payload_id) + .await + .map(|meta| meta.size() as usize == CONTRIBUTION_SIZE) + { + println!("loading completed contribution..."); + tokio::fs::read(&payload_id).await? + } else { + println!("generating contribution, may take some time..."); + let phase2_contribution = phase2_contribute(&payload)?; + tokio::fs::write(&payload_id, &phase2_contribution).await?; + phase2_contribution + }; println!("uploading contribution..."); - for i in 0..=CONTRIBUTION_CHUNKS { - println!("eta: chunk {}.", i); - let chunk = &phase2_contribution[i * CONTRIBUTION_CHUNK_SIZE - ..std::cmp::min((i + 1) * CONTRIBUTION_CHUNK_SIZE, CONTRIBUTION_SIZE)]; - client - .post(url(format!("contribute/{}", i))) - .body(chunk.to_vec()) + let pool = PoolBuilder::new() + .path("db.sqlite3") + .journal_mode(JournalMode::Wal) + .open() + .await?; + + pool.conn(|conn| { + conn.execute( + "CREATE TABLE IF NOT EXISTS resumable_upload ( + location TEXT PRIMARY KEY NOT NULL, + create_at TIMESTAMPTZ NOT NULL DEFAULT(unixepoch()), + expire TIMSTAMPTZ NOT NULL + )", + (), // empty list of parameters. + )?; + Ok(()) + }) + .await?; + let mut upload_location = pool + .conn(move |conn| { + let mut stmt = conn.prepare_cached( + "SELECT location FROM resumable_upload WHERE expire > unixepoch() LIMIT 1", + )?; + let mut rows = stmt.query(())?; + if let Some(row) = rows.next()? { + Ok(Some(row.get::<_, String>(0)?)) + } else { + Ok(None) + } + }) + .await?; + if let Some(ref location) = upload_location { + if client + .head(location) + .header("Tus-Resumable", "1.0.0") .send() - .await - .unwrap() + .await? .error_for_status() - .unwrap(); + .is_err() + { + upload_location = None; + } } - Ok(()) - } - Command::Verify { - phase2_payload_path, - phase2_contrib_path, - } => { - // let phase2_payload = - // std::fs::read(phase2_payload_path).map_err(|_| Error::FailedToReadPhase2Payload)?; - // let phase2_contrib = - // std::fs::read(phase2_contrib_path).map_err(|_| Error::FailedToReadPhase2Contrib)?; - // phase2_verify(&phase2_payload, &phase2_contrib)?; - Ok(()) + let upload_location = match upload_location { + Some(location) => { + println!("location already stored in db."); + location + } + None => { + println!("location not found, generating a new one..."); + // ===================================================== + // https://tus.io/protocols/resumable-upload#creation == + // ===================================================== + let response = client + .post(format!("{SUPABASE_PROJECT}/storage/v1/upload/resumable")) + .header("Tus-Resumable", "1.0.0") + .header("Upload-Length", CONTRIBUTION_SIZE.to_string()) + .header( + "Upload-Metadata", + format!( + "bucketName {},objectName {}", + BASE64_STANDARD.encode(&bucket), + BASE64_STANDARD.encode(&payload_id) + ), + ) + .send() + .await?; + let location = response + .headers() + .get(LOCATION) + .ok_or(Error::HeaderNotFound(LOCATION.as_str().into()))? + .to_str()? + .to_string(); + let expire = response + .headers() + .get("Upload-Expires") + .ok_or(Error::HeaderNotFound("Upload-Expires".into()))? + .to_str()? + .into(); + let expire = parse_http_date(expire)?; + let expire_timestamp = expire.duration_since(UNIX_EPOCH)?.as_secs(); + let location_clone = location.clone(); + pool.conn(move |conn| { + let mut stmt = conn.prepare_cached( + "INSERT INTO resumable_upload (location, expire) VALUES (?, ?)", + )?; + let r = stmt.execute((location_clone, expire_timestamp))?; + assert!(r == 1); + Ok(()) + }) + .await?; + location + } + }; + + println!("upload location: {upload_location}"); + + // ================================================= + // https://tus.io/protocols/resumable-upload#head == + // ================================================= + let response = client + .head(&upload_location) + .header("Tus-Resumable", "1.0.0") + .send() + .await? + .error_for_status()?; + let upload_length = usize::from_str( + response + .headers() + .get("Upload-Length") + .ok_or(Error::HeaderNotFound("Upload-Length".into()))? + .to_str()?, + )?; + let upload_offset = usize::from_str( + response + .headers() + .get("Upload-Offset") + .ok_or(Error::HeaderNotFound("Upload-Offset".into()))? + .to_str()?, + )?; + assert!(upload_length == CONTRIBUTION_SIZE, "invalid upload-length."); + println!("upload-offset: {}", upload_offset); + if upload_offset < upload_length { + println!("uploading contribution..."); + // ================================================== + // https://tus.io/protocols/resumable-upload#patch == + // ================================================== + client + .patch(&upload_location) + .header("Tus-Resumable", "1.0.0") + .header("Content-Type", "application/offset+octet-stream") + .header("Upload-Offset", upload_offset.to_string()) + .body( + phase2_contribution + .into_iter() + .skip(upload_offset) + .collect::>(), + ) + .send() + .await? + .error_for_status()?; + } + println!("upload complete."); + Ok(hyper::Response::builder() + .header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") + .header(hyper::header::CONTENT_TYPE, "application/json") + .status(hyper::StatusCode::OK) + .body(BoxBody::default()) + .unwrap()) } + // Preflight options request from the browser. + (&Method::OPTIONS, "/contribute") => Ok(hyper::Response::builder() + .header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") + .header( + hyper::header::ACCESS_CONTROL_ALLOW_HEADERS, + hyper::header::CONTENT_TYPE, + ) + .header(hyper::header::ACCESS_CONTROL_ALLOW_METHODS, "POST, OPTIONS") + .status(hyper::StatusCode::OK) + .body(BoxBody::default()) + .unwrap()), + _ => Ok(hyper::Response::builder() + .status(hyper::StatusCode::NOT_FOUND) + .body(full(NOTFOUND)) + .unwrap()), + } +} + +fn full>(chunk: T) -> BoxBody { + Full::new(chunk.into()) + .map_err(|never| match never {}) + .boxed() +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let addr = SocketAddr::from(([127, 0, 0, 1], 0x1337)); + let listener = TcpListener::bind(addr).await?; + loop { + let (stream, _) = listener.accept().await?; + let io = TokioIo::new(stream); + tokio::task::spawn(async move { + // Finally, we bind the incoming connection to our `hello` service + if let Err(err) = hyper::server::conn::http1::Builder::new() + .serve_connection(io, service_fn(handle)) + .await + { + eprintln!("Error serving connection: {:?}", err); + } + }); } } diff --git a/mpc/coordinator/Cargo.toml b/mpc/coordinator/Cargo.toml index 9ed39e7388..55eb53abe0 100644 --- a/mpc/coordinator/Cargo.toml +++ b/mpc/coordinator/Cargo.toml @@ -4,13 +4,12 @@ name = "mpc-coordinator" version = "0.1.0" [dependencies] -clap = { version = "4.5", features = ["derive"] } -futures.workspace = true -mpc-shared = { workspace = true } -priority-queue = "2.0.3" -rocket = { version = "0.5.1", features = ["json", "msgpack", "uuid"] } -rocket-authorization = "1.0.0" -rocket_seek_stream = "0.2.6" -rust-fsm = { version = "0.7.0", features = ["diagram"] } -sha2 = { workspace = true } -thiserror = { workspace = true } +async-sqlite = "0.2.2" +clap = { version = "4.5", features = ["derive"] } +futures.workspace = true +mpc-shared = { workspace = true } +postgrest = "1.0" +reqwest = { workspace = true, features = ["json"] } +serde_json = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["full"] } diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index ce8fe9b172..43a932db82 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -1,3 +1,6 @@ +----------- +-- Queue -- +----------- CREATE TABLE queue ( id uuid PRIMARY KEY, payload_id uuid NOT NULL DEFAULT(gen_random_uuid()), @@ -5,9 +8,34 @@ CREATE TABLE queue ( score integer NOT NULL ); +ALTER TABLE queue ENABLE ROW LEVEL SECURITY; +ALTER TABLE queue ADD FOREIGN KEY (id) REFERENCES auth.users(id); +CREATE UNIQUE INDEX idx_queue_score_id ON queue(score, id); +CREATE UNIQUE INDEX idx_queue_score ON queue(score); +CREATE UNIQUE INDEX idx_queue_id_payload ON queue(id, payload_id); + +CREATE POLICY view_all + ON queue + FOR SELECT + TO authenticated + USING ( + true + ); + +CREATE OR REPLACE VIEW current_queue_position AS + SELECT COUNT(*) AS position + FROM queue q + WHERE q.score > ( + SELECT qq.score FROM queue qq WHERE qq.id = auth.uid() + ) AND NOT EXISTS (SELECT cs.id FROM contribution_status cs WHERE cs.id = q.id); + +ALTER VIEW current_queue_position SET (security_invoker = on); + CREATE OR REPLACE FUNCTION min_score() RETURNS INTEGER AS $$ - SELECT COALESCE(MIN(score) - 1, 1000000) FROM queue -$$ LANGUAGE SQL; +BEGIN + RETURN (SELECT COALESCE(MIN(score) - 1, 1000000) FROM queue); +END +$$ LANGUAGE plpgsql; CREATE OR REPLACE FUNCTION set_initial_score_trigger() RETURNS TRIGGER AS $$ BEGIN @@ -18,32 +46,68 @@ $$ LANGUAGE plpgsql; CREATE TRIGGER queue_set_initial_score BEFORE INSERT ON queue FOR EACH ROW EXECUTE FUNCTION set_initial_score_trigger(); -CREATE UNIQUE INDEX idx_queue_score_id ON queue(score, id); -CREATE UNIQUE INDEX idx_queue_score ON queue(score); - -ALTER TABLE queue ADD FOREIGN KEY (id) REFERENCES users(id); - +------------------------- +-- Contribution Status -- +------------------------- CREATE TABLE contribution_status( id uuid PRIMARY KEY, started timestamptz NOT NULL DEFAULT(now()), expire timestamptz NOT NULL DEFAULT(now() + INTERVAL '30 minutes') ); +ALTER TABLE contribution ENABLE ROW LEVEL SECURITY; +ALTER TABLE contribution_status ADD FOREIGN KEY (id) REFERENCES queue(id); CREATE UNIQUE INDEX idx_contribution_status_id_expire ON contribution_status(id, expire); -ALTER TABLE contribution_status ADD FOREIGN KEY (id) REFERENCES queue(id); +CREATE POLICY view_all + ON contribution_status + FOR SELECT + TO authenticated + USING ( + true + ); + +---------------------------- +-- Contribution Submitted -- +---------------------------- +CREATE TABLE contribution_submitted( + id uuid PRIMARY KEY, + created_at timestamptz NOT NULL DEFAULT(now()) +); + +ALTER TABLE contribution_submitted ENABLE ROW LEVEL SECURITY; +ALTER TABLE contribution_submitted ADD FOREIGN KEY (id) REFERENCES queue(id); + +CREATE POLICY view_all + ON contribution_submitted + FOR SELECT + TO authenticated + USING ( + true + ); +------------------ +-- Contribution -- +------------------ CREATE TABLE contribution( id uuid PRIMARY KEY, - seq SMALLSERIAL NOT NULL, + seq smallserial NOT NULL, created_at timestamptz NOT NULL DEFAULT(now()), success boolean ); +ALTER TABLE contribution ENABLE ROW LEVEL SECURITY; ALTER TABLE contribution ADD FOREIGN KEY (id) REFERENCES queue(id); - CREATE UNIQUE INDEX idx_contribution_seq ON contribution(seq); -CREATE UNIQUE INDEX idx_contribution_seq_success ON contribution(seq, success); +CREATE UNIQUE INDEX idx_contribution_seq_success ON contribution(success, seq); + +CREATE POLICY view_all + ON contribution + FOR SELECT + TO authenticated + USING ( + true + ); -- The next contributor is the one with the higest score that didn't contribute yet. CREATE OR REPLACE FUNCTION set_next_contributor_trigger() RETURNS TRIGGER AS $$ @@ -57,7 +121,7 @@ $$ LANGUAGE plpgsql; CREATE TRIGGER contribution_added AFTER INSERT ON contribution FOR EACH ROW EXECUTE FUNCTION set_next_contributor_trigger(); -- Current contributor is the highest score in the queue with the contribution --- not done yet and it's status not expired. +-- not done yet and it's status expired without payload submitted. CREATE OR REPLACE VIEW current_contributor_id AS SELECT q.id FROM queue q @@ -65,20 +129,34 @@ CREATE OR REPLACE VIEW current_contributor_id AS SELECT MAX(qq.score) FROM queue qq WHERE NOT EXISTS ( - SELECT * FROM contribution c WHERE c.id = qq.id - ) AND EXISTS ( - SELECT cs.expire FROM contribution_status cs WHERE cs.id = qq.id AND cs.expire > now() + SELECT c.id FROM contribution c WHERE c.id = qq.id + ) AND ( + EXISTS (SELECT cs.expire FROM contribution_status cs WHERE cs.id = qq.id AND cs.expire > now()) + OR + ( + EXISTS (SELECT cs.expire FROM contribution_status cs WHERE cs.id = qq.id AND cs.expire <= now()) + AND + EXISTS (SELECT cs.id FROM contribution_submitted cs WHERE cs.id = qq.id) + ) ) ); +ALTER VIEW current_contributor_id SET (security_invoker = on); + -- The current payload is from the latest successfull contribution CREATE OR REPLACE VIEW current_payload_id AS - SELECT q.payload_id - FROM contribution c - INNER JOIN queue q ON q.id = c.id - WHERE c.seq = ( - SELECT MAX(cc.seq) FROM contribution cc WHERE cc.success - ); + SELECT COALESCE( + (SELECT q.payload_id + FROM contribution c + INNER JOIN queue q USING(id) + WHERE c.seq = ( + SELECT MAX(cc.seq) FROM contribution cc WHERE cc.success + ) + ), + uuid_nil() + ) AS payload_id; + +ALTER VIEW current_payload_id SET (security_invoker = on); CREATE OR REPLACE PROCEDURE set_next_contributor() AS $$ BEGIN @@ -87,8 +165,8 @@ BEGIN SELECT MAX(qq.score) FROM queue qq WHERE NOT EXISTS ( - SELECT * FROM contribution c WHERE c.id = qq.id - ) AND NOT EXISTS( + SELECT c.id FROM contribution c WHERE c.id = qq.id + ) AND NOT EXISTS ( SELECT cs.expire FROM contribution_status cs WHERE cs.id = qq.id AND cs.expire < now() ) ); @@ -96,4 +174,98 @@ BEGIN END $$ LANGUAGE plpgsql; +-- On expiry, rotate the contributor SELECT cron.schedule('update-contributor', '10 seconds', 'CALL set_next_contributor()'); + +CREATE OR REPLACE FUNCTION can_upload(name varchar) RETURNS BOOLEAN AS $$ +BEGIN + RETURN ( + -- User must be the current contributor. + (SELECT cci.id FROM current_contributor_id cci) = auth.uid() + AND + -- User is only allowed to submit the expected payload. + storage.filename(name) = (SELECT q.payload_id::text FROM queue q WHERE q.id = auth.uid()) + AND + -- Do not allow the user to interact with the file after its been submitted. + NOT EXISTS (SELECT * FROM contribution_submitted cs WHERE cs.id = auth.uid()) + ); +END +$$ LANGUAGE plpgsql; + +CREATE POLICY allow_authenticated_contributor_upload_insert + ON storage.objects + FOR INSERT + TO authenticated + WITH CHECK ( + bucket_id = 'contributions' + AND + can_upload(name) + ); + +CREATE OR REPLACE FUNCTION can_download(name varchar) RETURNS BOOLEAN AS $$ +BEGIN + RETURN ( + -- User must be the current contributor. + (SELECT cci.id FROM current_contributor_id cci) = auth.uid() + AND + -- User is only allowed to download the last verified contribution. + storage.filename(name) = (SELECT cpi.payload_id::text FROM current_payload_id cpi) + AND + -- Do not allow the user to interact with the file after its contribution has been submitted. + NOT EXISTS (SELECT * FROM contribution_submitted cs WHERE cs.id = auth.uid()) + ); +END +$$ LANGUAGE plpgsql; + +CREATE POLICY allow_authenticated_contributor_download + ON storage.objects + FOR SELECT + TO authenticated + USING ( + bucket_id = 'contributions' + AND + can_download(name) + ); + +CREATE OR REPLACE PROCEDURE set_contribution_submitted(queue_id uuid) AS $$ +BEGIN + INSERT INTO contribution_submitted(id) VALUES(queue_id); +END +$$ LANGUAGE plpgsql; + +-- Phase 2 contribution payload is constant size +CREATE OR REPLACE FUNCTION expected_payload_size() RETURNS INTEGER AS $$ +BEGIN + RETURN 306032532; +END +$$ LANGUAGE plpgsql; + +-- Metadata pushed on upload. +-- { +-- "eTag": "\"c019643e056d8d687086c1e125f66ad8-1\"", +-- "size": 1000, +-- "mimetype": "binary/octet-stream", +-- "cacheControl": "no-cache", +-- "lastModified": "2024-07-27T23:03:32.000Z", +-- "contentLength": 1000, +-- "httpStatusCode": 200 +-- } +CREATE OR REPLACE FUNCTION set_contribution_submitted_trigger() RETURNS TRIGGER AS $$ +DECLARE + file_size integer; +BEGIN + IF (NEW.metadata IS NOT NULL) THEN + file_size := (NEW.metadata->>'size')::integer; + CASE + WHEN file_size = expected_payload_size() + THEN CALL set_contribution_submitted(uuid(NEW.owner_id)); + ELSE + RAISE EXCEPTION 'invalid file size, name: %, got: %, expected: %, meta: %', NEW.name, file_size, expected_payload_size(), NEW.metadata; + END CASE; + END IF; + RETURN NEW; +END +$$ LANGUAGE plpgsql; + +-- Rotate the current contributor whenever a contribution is done. +CREATE TRIGGER contribution_payload_uploaded AFTER INSERT OR UPDATE ON storage.objects FOR EACH ROW EXECUTE FUNCTION set_contribution_submitted_trigger(); diff --git a/mpc/coordinator/src/fsm.rs b/mpc/coordinator/src/fsm.rs deleted file mode 100644 index b5ca516fb2..0000000000 --- a/mpc/coordinator/src/fsm.rs +++ /dev/null @@ -1,22 +0,0 @@ -use rust_fsm::state_machine; - -state_machine! { - #[derive(Debug, Copy, Clone, PartialEq, Eq)] - pub union_mpc(AwaitContribution) - InitContributor => { - NoContributor => AwaitContributor, - ContributorSet => AwaitContribution, - }, - AwaitContributor => { - Join => InitContributor, - }, - AwaitContribution => { - Contribute => Verify, - Join => AwaitContribution, - SetPriority => AwaitContribution, - }, - Verify => { - Valid => InitContributor, - Invalid => InitContributor, - }, -} diff --git a/mpc/coordinator/src/main.rs b/mpc/coordinator/src/main.rs index 272612446d..1f0dd7f371 100644 --- a/mpc/coordinator/src/main.rs +++ b/mpc/coordinator/src/main.rs @@ -1,43 +1,17 @@ -pub mod fsm; - -use std::{ - collections::{HashMap, HashSet}, - io::Cursor, - marker::PhantomData, - sync::{ - mpsc::{channel, Sender, TryRecvError}, - Arc, RwLock, - }, - time::{SystemTime, UNIX_EPOCH}, -}; +use std::{io::SeekFrom, str::FromStr}; use clap::{Parser, Subcommand}; use mpc_shared::{ - phase2_verify, types::Contribution, CONTRIBUTION_CHUNKS, CONTRIBUTION_CHUNK_SIZE, - CONTRIBUTION_CHUNK_SIZE_FINAL, -}; -use priority_queue::PriorityQueue; -use rocket::{ - data::{Limits, ToByteUnit}, - fs::TempFile, - get, - http::{ - hyper::{header::AUTHORIZATION, HeaderValue}, - ContentType, Cookie, CookieJar, Header, Status, - }, - post, - request::{FromParam, FromRequest, Outcome, Request}, - response::status::{self, Forbidden, NotFound, Unauthorized}, - routes, - serde::{ - json::{json, Json, Value}, - Deserialize, Serialize, - }, - Response, State, + phase2_verify, + types::{Contribution, ContributorId, PayloadId}, + CONTRIBUTION_SIZE, }; -use rocket_authorization::{AuthError, Authorization, Credential}; -use rocket_seek_stream::SeekStream; -use sha2::Digest; +use postgrest::Postgrest; +use reqwest::header::{HeaderMap, HeaderValue, AUTHORIZATION, CONTENT_LENGTH, RANGE}; +use tokio::io::{AsyncSeekExt, AsyncWriteExt}; + +const SUPABASE_PROJECT: &str = "https://bffcolwcakqrhlznyjns.supabase.co"; +const APIKEY: &str = "apikey"; #[derive(Parser, Debug)] #[command(version, about, long_about = None)] @@ -50,355 +24,221 @@ struct Args { enum Command { Start { #[arg(short, long)] - phase2_payload_path: String, + jwt: String, + #[arg(short, long)] + api_key: String, }, } -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)] -#[serde(crate = "rocket::serde")] -struct Contributor { - token: String, -} - -#[derive(Debug)] -struct AppState { - queue: PriorityQueue, - processed: HashMap, - payload: Vec, - payload_hash: Vec, - contributor: Contributor, - upload_index: Index, - contribution_payload: Vec, - machine: fsm::union_mpc::StateMachine, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -enum AppMessage { - Join { - token: String, - }, - ContributePartial { - token: String, - index: Index, - payload_fraction: Vec, - }, +#[derive(thiserror::Error, Debug, Clone)] +enum Error { + #[error("couldn't find expected header: {0}")] + HeaderNotFound(String), + #[error("current contributor not found.")] + ContributorNotFound, + #[error("current payload not found.")] + CurrentPayloadNotFound, + #[error("next payload not found.")] + NextPayloadNotFound, } -struct Current; -struct Any; -struct AuthContributor(String, PhantomData); -#[rocket::async_trait] -impl Authorization for AuthContributor { - const KIND: &'static str = "Bearer"; - async fn parse(_: &str, token: &str, request: &Request) -> Result { - let app_state = request - .rocket() - .state::>>() - .unwrap() - .read() - .unwrap(); - if app_state.machine.state() == &fsm::union_mpc::State::AwaitContribution - && app_state.contributor.token == token - { - Ok(Self(token.to_string(), PhantomData)) - } else { - Err(AuthError::Unauthorized) - } - } -} -#[rocket::async_trait] -impl Authorization for AuthContributor { - const KIND: &'static str = "Bearer"; - async fn parse(_: &str, token: &str, request: &Request) -> Result { - Ok(Self(token.to_string(), PhantomData)) +async fn get_state_file(path: &str) -> Vec { + if !tokio::fs::try_exists(path).await.unwrap() { + tokio::fs::write(path, []).await.unwrap(); } + tokio::fs::read(path).await.unwrap() } -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Serialize)] -#[serde(crate = "rocket::serde")] -struct Index(u8); -impl<'a> FromParam<'a> for Index { - type Error = (); - fn from_param(param: &'a str) -> Result { - let result = u8::from_param(param).map_err(|_| ())?; - if result as usize <= CONTRIBUTION_CHUNKS { - Ok(Index(result)) - } else { - Err(()) - } +async fn download_payload( + authorization_header: String, + payload_id: &str, + payload_output: &str, +) -> Result, Box> { + let current_payload_download_url = format!( + "{SUPABASE_PROJECT}/storage/v1/object/contributions/{}", + &payload_id + ); + let client = reqwest::ClientBuilder::new() + .default_headers(HeaderMap::from_iter([( + AUTHORIZATION, + HeaderValue::from_str(&authorization_header)?, + )])) + .build()?; + println!("checking payload file..."); + enum StateFileAction { + Download(usize), + Done(Vec), } -} - -#[get("/contribution")] -async fn contribution( - c: Credential>, - app_state: &State>>, -) -> Result, NotFound<()>> { - match app_state - .read() - .unwrap() - .processed - .get(&Contributor { token: c.0 .0 }) - { - Some(contribution) => Ok(Json(contribution.clone())), - None => Err(NotFound(())), + let state_path = payload_output; + let action = match get_state_file(&state_path).await { + content if content.len() < CONTRIBUTION_SIZE => { + println!("partial download, continuing from {}...", content.len()); + StateFileAction::Download(content.len()) + } + content if content.len() == CONTRIBUTION_SIZE => { + println!("download complete."); + StateFileAction::Done(content) + } + _ => { + println!("invalid size detected, redownloading..."); + StateFileAction::Download(0) + } + }; + match action { + StateFileAction::Download(start_position) => { + let mut response = client + .get(current_payload_download_url) + .header(RANGE, format!("bytes={}-", start_position)) + .send() + .await? + .error_for_status()?; + let headers = response.headers(); + let total_length = start_position + + u64::from_str( + headers + .get(CONTENT_LENGTH) + .ok_or(Error::HeaderNotFound(CONTENT_LENGTH.as_str().into()))? + .to_str()?, + )? as usize; + println!("state file length: {}", total_length); + assert!( + total_length == CONTRIBUTION_SIZE, + "contribution length mismatch." + ); + let mut state_file = tokio::fs::OpenOptions::new() + .write(true) + .create(false) + .open(&state_path) + .await?; + state_file.set_len(start_position as u64).await?; + state_file + .seek(SeekFrom::Start(start_position as u64)) + .await?; + let mut i = 0; + while let Some(chunk) = response.chunk().await? { + if i % 10 == 0 { + println!("Eta: chunk {}.", i); + } + let written = state_file.write(&chunk).await?; + assert!(written == chunk.len(), "couldn't write chunk."); + state_file.sync_data().await?; + i += 1; + } + println!("download complete"); + let final_content = tokio::fs::read(&state_path).await?; + Ok(final_content) + } + StateFileAction::Done(content) => Ok(content), } } -#[post("/contribute/", data = "")] -async fn contribute( - c: Credential>, - index: Index, - payload: Vec, - app_state: &State>>, - tx: &State>, -) -> Result<(), Forbidden<()>> { - tx.send(AppMessage::ContributePartial { - token: c.0 .0, - index, - payload_fraction: payload, - }) - .unwrap(); - Ok(()) -} - -#[get("/me")] -async fn me( - c: Credential>, - app_state: &State>>, -) -> Result, Forbidden<()>> { - let hash = { app_state.read().unwrap().payload_hash.clone() }; - Ok(hash) -} - -#[get("/state")] -async fn state<'a>( - c: Credential>, - app_state: &State>>, -) -> Result, Forbidden<()>> { - let bytes = { app_state.read().unwrap().payload.clone() }; - let len = bytes.len() as u64; - Ok(SeekStream::with_opts( - Cursor::new(bytes), - len, - "application/octet-stream", - )) -} - -#[post("/join")] -async fn join(c: Credential>, tx: &State>) -> Status { - // TODO: verify token - tx.send(AppMessage::Join { token: c.0 .0 }).unwrap(); - Status::Ok -} - -#[get("/")] -fn index() -> &'static str { - " - Bruh u ain't mess with the kraken - " -} - -#[rocket::main] -async fn main() -> std::io::Result<()> { +#[tokio::main] +async fn main() -> Result<(), Box> { let args = Args::parse(); match args.command { - Command::Start { - phase2_payload_path, - } => { - let payload = std::fs::read(phase2_payload_path).unwrap(); - let payload_hash = sha2::Sha256::new() - .chain_update(&payload) - .finalize() - .to_vec(); - let (tx, rx) = channel::(); - let initial_contributor = Contributor { - token: "union".into(), - }; - let app_state = Arc::new(RwLock::new(AppState { - queue: PriorityQueue::with_capacity(4096), - processed: HashMap::with_capacity(4096), - contributor: initial_contributor, - payload, - payload_hash, - upload_index: Index(0), - contribution_payload: Vec::new(), - machine: fsm::union_mpc::StateMachine::new(), - })); - let app_state_clone = app_state.clone(); - rocket::tokio::spawn(async move { - let figment = rocket::Config::figment() - .merge(("limits", Limits::new().limit("bytes", 12.mebibytes()))); - rocket::custom(figment) - .manage(app_state_clone) - .manage(tx) - .mount( - "/", - routes![index, join, me, state, contribute, contribution], - ) - .launch() - .await - .unwrap(); - }); - let (tx_verify, rx_verify) = channel::<(Vec, Vec)>(); - let (tx_verify_result, rx_verify_result) = channel::(); - rocket::tokio::spawn(async move { + Command::Start { jwt, api_key } => { + let authorization_header = format!("Bearer {}", jwt); + let client = Postgrest::new(format!("{SUPABASE_PROJECT}/rest/v1")) + .insert_header(APIKEY, api_key) + .insert_header(AUTHORIZATION, authorization_header.clone()); + loop { + let current_contributor = { + let contributor = client + .from("current_contributor_id") + .select("id") + .execute() + .await? + .json::>() + .await? + .first() + .cloned() + .ok_or(Error::ContributorNotFound); + match contributor { + Ok(contributor) => contributor, + Err(_) => { + println!("no more contributor to process."); + tokio::time::sleep(std::time::Duration::from_secs(10)).await; + continue; + } + } + }; + let current_payload = client + .from("current_payload_id") + .select("payload_id") + .execute() + .await? + .json::>() + .await? + .first() + .cloned() + .ok_or(Error::CurrentPayloadNotFound)?; + let payload_current = download_payload( + authorization_header.clone(), + ¤t_payload.id, + ¤t_payload.id, + ) + .await?; + println!("awaiting contribution of {}...", ¤t_contributor.id); loop { - let (payload, contribution_payload) = rx_verify.recv().unwrap(); - println!("verifying contribution payloads"); - if phase2_verify(&payload, &contribution_payload).is_ok() { - println!("valid"); - tx_verify_result.send(true).unwrap(); - } else { - println!("invalid"); - tx_verify_result.send(false).unwrap(); + if client + .from("contribution_submitted") + .eq("id", ¤t_contributor.id) + .select("id") + .execute() + .await? + .json::>() + .await? + .len() + == 1 + { + break; } + tokio::time::sleep(std::time::Duration::from_secs(10)).await; } - }); - - loop { - { - let mut app_state = app_state.write().unwrap(); - let machine_state = *app_state.machine.state(); - let mut join = |token: String| { - let new_contributor = Contributor { token }; - if app_state.queue.get(&new_contributor).is_none() - && app_state.processed.get(&new_contributor).is_none() - && app_state.contributor != new_contributor - { - let queue_len = app_state.queue.len(); - const BASE_PRORITY: usize = 1000000; - let priority = BASE_PRORITY - queue_len; - println!( - "contributor joined: {} with priority {}", - new_contributor.token, priority - ); - app_state.queue.push(new_contributor, priority); - app_state - .machine - .consume(&fsm::union_mpc::Input::Join) - .unwrap(); - } - }; - match machine_state { - fsm::union_mpc::State::InitContributor => { - let input = match app_state.queue.pop() { - Some((contributor, _)) => { - println!("new contributor slot"); - app_state.contributor = contributor; - app_state.upload_index = Index(0); - app_state.contribution_payload.clear(); - fsm::union_mpc::Input::ContributorSet - } - None => { - println!("no contributor"); - fsm::union_mpc::Input::NoContributor - } - }; - app_state.machine.consume(&input).unwrap(); - } - fsm::union_mpc::State::AwaitContributor => match rx.try_recv() { - Ok(message) => match message { - AppMessage::Join { token } => { - join(token); - } - _ => {} - }, - Err(TryRecvError::Empty) => {} - Err(e) => { - println!("error in awaiting contributor {:?}", e); - break; - } - }, - fsm::union_mpc::State::AwaitContribution => match rx.try_recv() { - Ok(message) => match message { - AppMessage::Join { token } => { - join(token); - } - AppMessage::ContributePartial { - token, - index: Index(index), - mut payload_fraction, - } => { - if app_state.contributor.token == token - && app_state.upload_index == Index(index) - { - let expected_len = if (index as usize) < CONTRIBUTION_CHUNKS - { - CONTRIBUTION_CHUNK_SIZE - } else { - CONTRIBUTION_CHUNK_SIZE_FINAL - }; - if payload_fraction.len() == expected_len { - println!("partial contribution chunk {}", index); - app_state.upload_index = Index(index + 1); - app_state - .contribution_payload - .append(&mut payload_fraction); - if index as usize == CONTRIBUTION_CHUNKS { - println!("contribution complete"); - tx_verify - .send(( - app_state.payload.clone(), - app_state.contribution_payload.clone(), - )) - .unwrap(); - app_state.payload = - app_state.contribution_payload.clone(); - app_state - .machine - .consume(&fsm::union_mpc::Input::Contribute) - .unwrap(); - } - } else { - println!("invalid chunk length {}", index); - } - } - } - }, - Err(TryRecvError::Empty) => {} - Err(e) => { - println!("error in await {:?}", e); - break; - } - }, - fsm::union_mpc::State::Verify => match rx_verify_result.try_recv() { - Ok(result) => { - let timestamp = SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs(); - let contribution = Contribution { - success: result, - timestamp, - }; - let contributor = app_state.contributor.clone(); - app_state.processed.insert(contributor, contribution); - if result { - println!("verification succeeded."); - app_state - .machine - .consume(&fsm::union_mpc::Input::Valid) - .unwrap(); - } else { - println!("verification failed."); - app_state - .machine - .consume(&fsm::union_mpc::Input::Invalid) - .unwrap(); - } - } - Err(TryRecvError::Empty) => {} - Err(e) => { - println!("error in verify {:?}", e); - break; - } - }, - }; + println!("contribution submitted!"); + let next_payload = client + .from("queue") + .eq("id", ¤t_contributor.id) + .select("payload_id") + .execute() + .await? + .json::>() + .await? + .first() + .cloned() + .ok_or(Error::NextPayloadNotFound)?; + let payload_next = download_payload( + authorization_header.clone(), + &next_payload.id, + &next_payload.id, + ) + .await?; + if phase2_verify(&payload_current, &payload_next).is_ok() { + println!("verification succeeded."); + client + .from("contribution") + .insert(serde_json::to_string(&Contribution { + id: current_contributor.id.clone(), + success: true, + })?) + .execute() + .await? + .error_for_status()?; + tokio::fs::remove_file(¤t_payload.id).await?; + } else { + println!("verification failed."); + client + .from("contribution") + .insert(serde_json::to_string(&Contribution { + id: current_contributor.id.clone(), + success: false, + })?) + .execute() + .await? + .error_for_status()?; } - rocket::tokio::time::sleep(std::time::Duration::from_millis(1000 / 30)).await; + tokio::time::sleep(std::time::Duration::from_secs(10)).await; } - Ok(()) } } } diff --git a/mpc/shared/Cargo.toml b/mpc/shared/Cargo.toml index 44a4b1f80f..1c8b4846f3 100644 --- a/mpc/shared/Cargo.toml +++ b/mpc/shared/Cargo.toml @@ -4,5 +4,5 @@ name = "mpc-shared" version = "0.1.0" [dependencies] -rocket = { version = "0.5.1", features = ["json", "msgpack", "uuid"] } +serde = { workspace = true, features = ["derive"] } thiserror = { workspace = true } diff --git a/mpc/shared/src/lib.rs b/mpc/shared/src/lib.rs index ee50387ef9..ae058ef5b2 100644 --- a/mpc/shared/src/lib.rs +++ b/mpc/shared/src/lib.rs @@ -3,10 +3,6 @@ pub mod types; use std::ffi::{c_char, c_int}; pub const CONTRIBUTION_SIZE: usize = 306032532; -pub const CONTRIBUTION_CHUNKS: usize = 30; -pub const CONTRIBUTION_CHUNK_SIZE: usize = CONTRIBUTION_SIZE / CONTRIBUTION_CHUNKS; -pub const CONTRIBUTION_CHUNK_SIZE_FINAL: usize = - CONTRIBUTION_SIZE - (CONTRIBUTION_CHUNK_SIZE * CONTRIBUTION_CHUNKS); #[link(name = "galois")] extern "C" { diff --git a/mpc/shared/src/types.rs b/mpc/shared/src/types.rs index 9e6e7e1a69..72bdf1ba1f 100644 --- a/mpc/shared/src/types.rs +++ b/mpc/shared/src/types.rs @@ -1,8 +1,18 @@ -use rocket::serde::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct ContributorId { + pub id: String, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct PayloadId { + #[serde(rename = "payload_id")] + pub id: String, +} #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(crate = "rocket::serde")] pub struct Contribution { + pub id: String, pub success: bool, - pub timestamp: u64, } From 78bdbb68111af90ce54eb45dc977c87b367ea0e1 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Tue, 30 Jul 2024 11:25:41 +0200 Subject: [PATCH 05/52] feat(mpc): better sql and client handling --- mpc/client/src/main.rs | 605 ++++++++++++++++++----------------- mpc/coordinator/database.sql | 27 +- 2 files changed, 332 insertions(+), 300 deletions(-) diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index f05a298301..8fce0b901c 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -1,37 +1,34 @@ use std::{ - convert::Infallible, io::SeekFrom, net::SocketAddr, os::unix::fs::MetadataExt, path::Path, - str::FromStr, time::UNIX_EPOCH, + io::SeekFrom, + net::SocketAddr, + os::unix::fs::MetadataExt, + str::FromStr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::UNIX_EPOCH, }; -use async_sqlite::{rusqlite::params, JournalMode, Pool, PoolBuilder}; +use async_sqlite::{JournalMode, PoolBuilder}; use base64::{prelude::BASE64_STANDARD, Engine}; -use clap::{Parser, Subcommand}; -use http_body_util::{BodyExt, Full}; +use http_body_util::BodyExt; use httpdate::parse_http_date; use hyper::{body::Buf, service::service_fn, Method}; use hyper_util::rt::TokioIo; use mpc_shared::{ - phase2_contribute, phase2_verify, - types::{Contribution, ContributorId, PayloadId}, - CONTRIBUTION_CHUNKS, CONTRIBUTION_CHUNK_SIZE, CONTRIBUTION_SIZE, + phase2_contribute, + types::{ContributorId, PayloadId}, + CONTRIBUTION_SIZE, }; use postgrest::Postgrest; -use reqwest::{ - header::{ - HeaderMap, HeaderName, HeaderValue, AUTHORIZATION, CONTENT_ENCODING, CONTENT_LENGTH, - LOCATION, RANGE, TRANSFER_ENCODING, - }, - StatusCode, -}; -use serde::{Deserialize, Serialize}; +use reqwest::header::{HeaderMap, HeaderValue, AUTHORIZATION, CONTENT_LENGTH, LOCATION, RANGE}; +use serde::Deserialize; use tokio::{ - io::{empty, AsyncSeekExt, AsyncWriteExt}, + io::{AsyncSeekExt, AsyncWriteExt}, net::TcpListener, }; -static INTERNAL_SERVER_ERROR: &[u8] = b"Internal Server Error"; -static NOTFOUND: &[u8] = b"Not Found"; - #[derive(PartialEq, Eq, Debug, Clone, Deserialize)] #[serde(rename_all = "camelCase")] struct Contribute { @@ -65,281 +62,302 @@ async fn get_state_file(path: &str) -> Vec { type BoxBody = http_body_util::combinators::BoxBody; -async fn handle( - req: hyper::Request, -) -> Result, Box> { - match (req.method(), req.uri().path()) { - (&Method::POST, "/contribute") => { - let whole_body = req.collect().await?.aggregate(); - let Contribute { - bucket, - jwt, - api_key, - contributor_id, - payload_id, - } = serde_json::from_reader(whole_body.reader())?; - const SUPABASE_PROJECT: &str = "https://bffcolwcakqrhlznyjns.supabase.co"; - const APIKEY: &str = "apikey"; - let authorization_header = format!("Bearer {}", jwt); - let client = Postgrest::new(format!("{SUPABASE_PROJECT}/rest/v1")) - .insert_header(APIKEY, api_key) - .insert_header(AUTHORIZATION, authorization_header.clone()); - let current_contributor = client - .from("current_contributor_id") - .select("id") - .execute() - .await? - .json::>() +type DynError = Box; + +async fn contribute( + Contribute { + bucket, + jwt, + api_key, + contributor_id, + payload_id, + }: Contribute, +) -> Result<(), DynError> { + const SUPABASE_PROJECT: &str = "https://bffcolwcakqrhlznyjns.supabase.co"; + const APIKEY: &str = "apikey"; + let authorization_header = format!("Bearer {}", jwt); + let client = Postgrest::new(format!("{SUPABASE_PROJECT}/rest/v1")) + .insert_header(APIKEY, api_key) + .insert_header(AUTHORIZATION, authorization_header.clone()); + let current_contributor = client + .from("current_contributor_id") + .select("id") + .execute() + .await? + .json::>() + .await? + .first() + .cloned() + .ok_or(Error::ContributorNotFound)?; + assert!( + current_contributor.id == contributor_id, + "not current contributor." + ); + let current_payload = client + .from("current_payload_id") + .select("payload_id") + .execute() + .await? + .json::>() + .await? + .first() + .cloned() + .ok_or(Error::PayloadNotFound)?; + let current_payload_download_url = format!( + "{SUPABASE_PROJECT}/storage/v1/object/contributions/{}", + ¤t_payload.id + ); + let client = reqwest::ClientBuilder::new() + .default_headers(HeaderMap::from_iter([( + AUTHORIZATION, + HeaderValue::from_str(&authorization_header)?, + )])) + .build()?; + println!("checking payload file..."); + enum StateFileAction { + Download(usize), + Done(Vec), + } + let state_path = current_payload.id; + let action = match get_state_file(&state_path).await { + content if content.len() < CONTRIBUTION_SIZE => { + println!("partial download, continuing from {}...", content.len()); + StateFileAction::Download(content.len()) + } + content if content.len() == CONTRIBUTION_SIZE => { + println!("download complete."); + StateFileAction::Done(content) + } + _ => { + println!("invalid size detected, redownloading..."); + StateFileAction::Download(0) + } + }; + let payload = match action { + StateFileAction::Download(start_position) => { + let mut response = client + .get(current_payload_download_url) + .header(RANGE, format!("bytes={}-", start_position)) + .send() .await? - .first() - .cloned() - .ok_or(Error::ContributorNotFound)?; + .error_for_status()?; + let headers = response.headers(); + let total_length = start_position + + u64::from_str( + headers + .get(CONTENT_LENGTH) + .ok_or(Error::HeaderNotFound(CONTENT_LENGTH.as_str().into()))? + .to_str()?, + )? as usize; + println!("state file length: {}", total_length); assert!( - current_contributor.id == contributor_id, - "not current contributor." - ); - let current_payload = client - .from("current_payload_id") - .select("payload_id") - .execute() - .await? - .json::>() - .await? - .first() - .cloned() - .ok_or(Error::PayloadNotFound)?; - let current_payload_download_url = format!( - "{SUPABASE_PROJECT}/storage/v1/object/contributions/{}", - ¤t_payload.id + total_length == CONTRIBUTION_SIZE, + "contribution length mismatch." ); - let client = reqwest::ClientBuilder::new() - .default_headers(HeaderMap::from_iter([( - AUTHORIZATION, - HeaderValue::from_str(&authorization_header)?, - )])) - .build()?; - println!("checking payload file..."); - enum StateFileAction { - Download(usize), - Done(Vec), - } - let state_path = current_payload.id; - let action = match get_state_file(&state_path).await { - content if content.len() < CONTRIBUTION_SIZE => { - println!("partial download, continuing from {}...", content.len()); - StateFileAction::Download(content.len()) - } - content if content.len() == CONTRIBUTION_SIZE => { - println!("download complete."); - StateFileAction::Done(content) - } - _ => { - println!("invalid size detected, redownloading..."); - StateFileAction::Download(0) - } - }; - let payload = match action { - StateFileAction::Download(start_position) => { - let mut response = client - .get(current_payload_download_url) - .header(RANGE, format!("bytes={}-", start_position)) - .send() - .await? - .error_for_status()?; - let headers = response.headers(); - let total_length = start_position - + u64::from_str( - headers - .get(CONTENT_LENGTH) - .ok_or(Error::HeaderNotFound(CONTENT_LENGTH.as_str().into()))? - .to_str()?, - )? as usize; - println!("state file length: {}", total_length); - assert!( - total_length == CONTRIBUTION_SIZE, - "contribution length mismatch." - ); - let mut state_file = tokio::fs::OpenOptions::new() - .write(true) - .create(false) - .open(&state_path) - .await?; - state_file.set_len(start_position as u64).await?; - state_file - .seek(SeekFrom::Start(start_position as u64)) - .await?; - let mut i = 0; - while let Some(chunk) = response.chunk().await? { - if i % 10 == 0 { - println!("eta: chunk {}.", i); - } - let written = state_file.write(&chunk).await?; - assert!(written == chunk.len(), "couldn't write chunk."); - state_file.sync_data().await?; - i += 1; - } - println!("download complete"); - let final_content = tokio::fs::read(&state_path).await?; - final_content - } - StateFileAction::Done(content) => content, - }; - let phase2_contribution = if let Ok(true) = tokio::fs::metadata(&payload_id) - .await - .map(|meta| meta.size() as usize == CONTRIBUTION_SIZE) - { - println!("loading completed contribution..."); - tokio::fs::read(&payload_id).await? - } else { - println!("generating contribution, may take some time..."); - let phase2_contribution = phase2_contribute(&payload)?; - tokio::fs::write(&payload_id, &phase2_contribution).await?; - phase2_contribution - }; - println!("uploading contribution..."); - let pool = PoolBuilder::new() - .path("db.sqlite3") - .journal_mode(JournalMode::Wal) - .open() + let mut state_file = tokio::fs::OpenOptions::new() + .write(true) + .create(false) + .open(&state_path) .await?; + state_file.set_len(start_position as u64).await?; + state_file + .seek(SeekFrom::Start(start_position as u64)) + .await?; + let mut i = 0; + while let Some(chunk) = response.chunk().await? { + if i % 10 == 0 { + println!("eta: chunk {}.", i); + } + let written = state_file.write(&chunk).await?; + assert!(written == chunk.len(), "couldn't write chunk."); + state_file.sync_data().await?; + i += 1; + } + println!("download complete"); + let final_content = tokio::fs::read(&state_path).await?; + final_content + } + StateFileAction::Done(content) => content, + }; + let phase2_contribution = if let Ok(true) = tokio::fs::metadata(&payload_id) + .await + .map(|meta| meta.size() as usize == CONTRIBUTION_SIZE) + { + println!("loading completed contribution..."); + tokio::fs::read(&payload_id).await? + } else { + println!("generating contribution, may take some time..."); + let phase2_contribution = phase2_contribute(&payload)?; + tokio::fs::write(&payload_id, &phase2_contribution).await?; + phase2_contribution + }; + println!("uploading contribution..."); + let pool = PoolBuilder::new() + .path("db.sqlite3") + .journal_mode(JournalMode::Wal) + .open() + .await?; - pool.conn(|conn| { - conn.execute( - "CREATE TABLE IF NOT EXISTS resumable_upload ( + pool.conn(|conn| { + conn.execute( + "CREATE TABLE IF NOT EXISTS resumable_upload ( location TEXT PRIMARY KEY NOT NULL, create_at TIMESTAMPTZ NOT NULL DEFAULT(unixepoch()), expire TIMSTAMPTZ NOT NULL )", - (), // empty list of parameters. + (), // empty list of parameters. + )?; + Ok(()) + }) + .await?; + let mut upload_location = pool + .conn(move |conn| { + let mut stmt = conn.prepare_cached( + "SELECT location FROM resumable_upload WHERE expire > unixepoch() LIMIT 1", + )?; + let mut rows = stmt.query(())?; + if let Some(row) = rows.next()? { + Ok(Some(row.get::<_, String>(0)?)) + } else { + Ok(None) + } + }) + .await?; + if let Some(ref location) = upload_location { + if client + .head(location) + .header("Tus-Resumable", "1.0.0") + .send() + .await? + .error_for_status() + .is_err() + { + upload_location = None; + } + } + let upload_location = match upload_location { + Some(location) => { + println!("location already stored in db."); + location + } + None => { + println!("location not found, generating a new one..."); + // ===================================================== + // https://tus.io/protocols/resumable-upload#creation == + // ===================================================== + let response = client + .post(format!("{SUPABASE_PROJECT}/storage/v1/upload/resumable")) + .header("Tus-Resumable", "1.0.0") + .header("Upload-Length", CONTRIBUTION_SIZE.to_string()) + .header( + "Upload-Metadata", + format!( + "bucketName {},objectName {}", + BASE64_STANDARD.encode(&bucket), + BASE64_STANDARD.encode(&payload_id) + ), + ) + .send() + .await?; + let location = response + .headers() + .get(LOCATION) + .ok_or(Error::HeaderNotFound(LOCATION.as_str().into()))? + .to_str()? + .to_string(); + let expire = response + .headers() + .get("Upload-Expires") + .ok_or(Error::HeaderNotFound("Upload-Expires".into()))? + .to_str()? + .into(); + let expire = parse_http_date(expire)?; + let expire_timestamp = expire.duration_since(UNIX_EPOCH)?.as_secs(); + let location_clone = location.clone(); + pool.conn(move |conn| { + let mut stmt = conn.prepare_cached( + "INSERT INTO resumable_upload (location, expire) VALUES (?, ?)", )?; + let r = stmt.execute((location_clone, expire_timestamp))?; + assert!(r == 1); Ok(()) }) .await?; - let mut upload_location = pool - .conn(move |conn| { - let mut stmt = conn.prepare_cached( - "SELECT location FROM resumable_upload WHERE expire > unixepoch() LIMIT 1", - )?; - let mut rows = stmt.query(())?; - if let Some(row) = rows.next()? { - Ok(Some(row.get::<_, String>(0)?)) - } else { - Ok(None) - } - }) - .await?; - if let Some(ref location) = upload_location { - if client - .head(location) - .header("Tus-Resumable", "1.0.0") - .send() - .await? - .error_for_status() - .is_err() - { - upload_location = None; - } - } - let upload_location = match upload_location { - Some(location) => { - println!("location already stored in db."); - location - } - None => { - println!("location not found, generating a new one..."); - // ===================================================== - // https://tus.io/protocols/resumable-upload#creation == - // ===================================================== - let response = client - .post(format!("{SUPABASE_PROJECT}/storage/v1/upload/resumable")) - .header("Tus-Resumable", "1.0.0") - .header("Upload-Length", CONTRIBUTION_SIZE.to_string()) - .header( - "Upload-Metadata", - format!( - "bucketName {},objectName {}", - BASE64_STANDARD.encode(&bucket), - BASE64_STANDARD.encode(&payload_id) - ), - ) - .send() - .await?; - let location = response - .headers() - .get(LOCATION) - .ok_or(Error::HeaderNotFound(LOCATION.as_str().into()))? - .to_str()? - .to_string(); - let expire = response - .headers() - .get("Upload-Expires") - .ok_or(Error::HeaderNotFound("Upload-Expires".into()))? - .to_str()? - .into(); - let expire = parse_http_date(expire)?; - let expire_timestamp = expire.duration_since(UNIX_EPOCH)?.as_secs(); - let location_clone = location.clone(); - pool.conn(move |conn| { - let mut stmt = conn.prepare_cached( - "INSERT INTO resumable_upload (location, expire) VALUES (?, ?)", - )?; - let r = stmt.execute((location_clone, expire_timestamp))?; - assert!(r == 1); - Ok(()) - }) - .await?; - location - } - }; + location + } + }; - println!("upload location: {upload_location}"); + println!("upload location: {upload_location}"); - // ================================================= - // https://tus.io/protocols/resumable-upload#head == - // ================================================= - let response = client - .head(&upload_location) - .header("Tus-Resumable", "1.0.0") - .send() - .await? - .error_for_status()?; - let upload_length = usize::from_str( - response - .headers() - .get("Upload-Length") - .ok_or(Error::HeaderNotFound("Upload-Length".into()))? - .to_str()?, - )?; - let upload_offset = usize::from_str( - response - .headers() - .get("Upload-Offset") - .ok_or(Error::HeaderNotFound("Upload-Offset".into()))? - .to_str()?, - )?; - assert!(upload_length == CONTRIBUTION_SIZE, "invalid upload-length."); - println!("upload-offset: {}", upload_offset); - if upload_offset < upload_length { - println!("uploading contribution..."); - // ================================================== - // https://tus.io/protocols/resumable-upload#patch == - // ================================================== - client - .patch(&upload_location) - .header("Tus-Resumable", "1.0.0") - .header("Content-Type", "application/offset+octet-stream") - .header("Upload-Offset", upload_offset.to_string()) - .body( - phase2_contribution - .into_iter() - .skip(upload_offset) - .collect::>(), - ) - .send() - .await? - .error_for_status()?; - } - println!("upload complete."); + // ================================================= + // https://tus.io/protocols/resumable-upload#head == + // ================================================= + let response = client + .head(&upload_location) + .header("Tus-Resumable", "1.0.0") + .send() + .await? + .error_for_status()?; + let upload_length = usize::from_str( + response + .headers() + .get("Upload-Length") + .ok_or(Error::HeaderNotFound("Upload-Length".into()))? + .to_str()?, + )?; + let upload_offset = usize::from_str( + response + .headers() + .get("Upload-Offset") + .ok_or(Error::HeaderNotFound("Upload-Offset".into()))? + .to_str()?, + )?; + assert!(upload_length == CONTRIBUTION_SIZE, "invalid upload-length."); + println!("upload-offset: {}", upload_offset); + if upload_offset < upload_length { + println!("uploading contribution..."); + // ================================================== + // https://tus.io/protocols/resumable-upload#patch == + // ================================================== + client + .patch(&upload_location) + .header("Tus-Resumable", "1.0.0") + .header("Content-Type", "application/offset+octet-stream") + .header("Upload-Offset", upload_offset.to_string()) + .body( + phase2_contribution + .into_iter() + .skip(upload_offset) + .collect::>(), + ) + .send() + .await? + .error_for_status()?; + } + println!("upload complete."); + Ok(()) +} + +async fn handle( + handling: Arc, + req: hyper::Request, +) -> Result, DynError> { + match (req.method(), req.uri().path()) { + (&Method::POST, "/contribute") + if handling + .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) + .is_ok() => + { + let result = (|| async { + let whole_body = req.collect().await?.aggregate(); + contribute(serde_json::from_reader(whole_body.reader())?).await?; + Ok::<_, DynError>(()) + })() + .await; + handling + .compare_exchange(true, false, Ordering::SeqCst, Ordering::SeqCst) + .expect("impossible"); + result?; Ok(hyper::Response::builder() .header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") .header(hyper::header::CONTENT_TYPE, "application/json") @@ -347,7 +365,14 @@ async fn handle( .body(BoxBody::default()) .unwrap()) } - // Preflight options request from the browser. + // Busy building + (&Method::POST, "/contribute") => Ok(hyper::Response::builder() + .header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") + .header(hyper::header::CONTENT_TYPE, "application/json") + .status(hyper::StatusCode::TOO_MANY_REQUESTS) + .body(BoxBody::default()) + .unwrap()), + // CORS preflight request. (&Method::OPTIONS, "/contribute") => Ok(hyper::Response::builder() .header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") .header( @@ -360,28 +385,28 @@ async fn handle( .unwrap()), _ => Ok(hyper::Response::builder() .status(hyper::StatusCode::NOT_FOUND) - .body(full(NOTFOUND)) + .body(BoxBody::default()) .unwrap()), } } -fn full>(chunk: T) -> BoxBody { - Full::new(chunk.into()) - .map_err(|never| match never {}) - .boxed() -} - #[tokio::main] -async fn main() -> Result<(), Box> { +async fn main() -> Result<(), DynError> { let addr = SocketAddr::from(([127, 0, 0, 1], 0x1337)); let listener = TcpListener::bind(addr).await?; + let handling = Arc::new(AtomicBool::new(false)); loop { let (stream, _) = listener.accept().await?; let io = TokioIo::new(stream); + // TODO: can't we avoid the clone tower? + let handling_clone = handling.clone(); tokio::task::spawn(async move { // Finally, we bind the incoming connection to our `hello` service if let Err(err) = hyper::server::conn::http1::Builder::new() - .serve_connection(io, service_fn(handle)) + .serve_connection( + io, + service_fn(move |req| handle(handling_clone.clone(), req)), + ) .await { eprintln!("Error serving connection: {:?}", err); diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 43a932db82..0265b1947a 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -22,12 +22,23 @@ CREATE POLICY view_all true ); +-- Materialized ? CREATE OR REPLACE VIEW current_queue_position AS - SELECT COUNT(*) AS position - FROM queue q - WHERE q.score > ( - SELECT qq.score FROM queue qq WHERE qq.id = auth.uid() - ) AND NOT EXISTS (SELECT cs.id FROM contribution_status cs WHERE cs.id = q.id); + SELECT + CASE WHEN (SELECT cci.id FROM current_contributor_id cci) = auth.uid() THEN + 0 + ELSE + ( + SELECT COUNT(*) + 1 + FROM queue q + WHERE + -- Better score + q.score > (SELECT qq.score FROM queue qq WHERE qq.id = auth.uid()) + AND + -- Contribution round not started + NOT EXISTS (SELECT cs.id FROM contribution_status cs WHERE cs.id = q.id) + ) + END AS position; ALTER VIEW current_queue_position SET (security_invoker = on); @@ -133,11 +144,7 @@ CREATE OR REPLACE VIEW current_contributor_id AS ) AND ( EXISTS (SELECT cs.expire FROM contribution_status cs WHERE cs.id = qq.id AND cs.expire > now()) OR - ( - EXISTS (SELECT cs.expire FROM contribution_status cs WHERE cs.id = qq.id AND cs.expire <= now()) - AND - EXISTS (SELECT cs.id FROM contribution_submitted cs WHERE cs.id = qq.id) - ) + EXISTS (SELECT cs.id FROM contribution_submitted cs WHERE cs.id = qq.id) ) ); From 93ce5ee09287c437d0d00b0f37b3b6a18fd9f518 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Tue, 30 Jul 2024 12:08:47 +0200 Subject: [PATCH 06/52] fix(mpc): non-static --- tools/rust/rust.nix | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tools/rust/rust.nix b/tools/rust/rust.nix index eb50aff572..6f6f28ad1c 100644 --- a/tools/rust/rust.nix +++ b/tools/rust/rust.nix @@ -124,12 +124,7 @@ _: { inherit mkBuildStdToolchain mkNightly rustSrc; toolchains = { - nightly = mkNightly { - targets = [ - "wasm32-unknown-unknown" - "x86_64-unknown-linux-musl" - ]; - }; + nightly = mkNightly { }; # for use in the devShell dev = pkgs.rust-bin.nightly.${nightlyVersion}.default.override { From 59b56c428c951d7f6f6fb32b00b9526469a0417a Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Tue, 30 Jul 2024 12:16:24 +0200 Subject: [PATCH 07/52] fix(galois): properly convert ptau to phase1 --- galoisd/cmd/galoisd/cmd/phase1_init.go | 75 +++++++++++++---------- galoisd/cmd/galoisd/cmd/phase2_extract.go | 12 ++-- 2 files changed, 48 insertions(+), 39 deletions(-) diff --git a/galoisd/cmd/galoisd/cmd/phase1_init.go b/galoisd/cmd/galoisd/cmd/phase1_init.go index ed31856d4b..bb20d0719f 100644 --- a/galoisd/cmd/galoisd/cmd/phase1_init.go +++ b/galoisd/cmd/galoisd/cmd/phase1_init.go @@ -1,6 +1,8 @@ package cmd import ( + "crypto/sha256" + "github.com/spf13/cobra" "encoding/binary" @@ -269,7 +271,6 @@ func (ptauFile *PtauFile) ReadBetaG2() (bn254.G2Affine, error) { func ReadPtau(zkeyPath string) (Ptau, error) { reader, err := os.Open(zkeyPath) - if err != nil { return Ptau{}, err } @@ -278,12 +279,21 @@ func ReadPtau(zkeyPath string) (Ptau, error) { var ptauStr = make([]byte, 4) _, err = reader.Read(ptauStr) + if err != nil { + return Ptau{}, err + } // version _, err = readULE32(reader) + if err != nil { + return Ptau{}, err + } // number of sections _, err = readULE32(reader) + if err != nil { + return Ptau{}, err + } numSections := uint32(7) @@ -291,25 +301,36 @@ func ReadPtau(zkeyPath string) (Ptau, error) { // 1-based indexing, so we need to allocate one more than the number of sections sections := make([][]SectionSegment, numSections+1) for i := uint32(0); i < numSections; i++ { - ht, _ := readULE32(reader) - hl, _ := readULE64(reader) + ht, err := readULE32(reader) + if err != nil { + return Ptau{}, err + } + hl, err := readULE64(reader) + if err != nil { + return Ptau{}, err + } if sections[ht] == nil { sections[ht] = make([]SectionSegment, 0) } - pos, _ := reader.Seek(0, io.SeekCurrent) + pos, err := reader.Seek(0, io.SeekCurrent) + if err != nil { + return Ptau{}, err + } sections[ht] = append(sections[ht], SectionSegment{pos: uint64(pos), size: hl}) reader.Seek(int64(hl), io.SeekCurrent) } // section size _, err = readBigInt(reader, 8) + if err != nil { + return Ptau{}, err + } // Header (1) seekToUniqueSection(reader, sections, 1) // Read header header, err := readPtauHeader(reader) - if err != nil { return Ptau{}, err } @@ -322,7 +343,6 @@ func ReadPtau(zkeyPath string) (Ptau, error) { twoToPower := uint32(1 << header.Power) PtauPubKey.TauG1, err = readG1Array(reader, twoToPower*2-1) - if err != nil { return Ptau{}, err } @@ -331,7 +351,6 @@ func ReadPtau(zkeyPath string) (Ptau, error) { seekToUniqueSection(reader, sections, 3) PtauPubKey.TauG2, err = readG2Array(reader, twoToPower) - if err != nil { return Ptau{}, err } @@ -340,7 +359,6 @@ func ReadPtau(zkeyPath string) (Ptau, error) { seekToUniqueSection(reader, sections, 4) PtauPubKey.AlphaTauG1, err = readG1Array(reader, twoToPower) - if err != nil { return Ptau{}, err } @@ -349,7 +367,6 @@ func ReadPtau(zkeyPath string) (Ptau, error) { seekToUniqueSection(reader, sections, 5) PtauPubKey.BetaTauG1, err = readG1Array(reader, twoToPower) - if err != nil { return Ptau{}, err } @@ -358,7 +375,6 @@ func ReadPtau(zkeyPath string) (Ptau, error) { seekToUniqueSection(reader, sections, 6) PtauPubKey.BetaG2, err = readG2(reader) - if err != nil { return Ptau{}, err } @@ -368,9 +384,7 @@ func ReadPtau(zkeyPath string) (Ptau, error) { func readPtauHeader(reader io.ReadSeeker) (PtauHeader, error) { var header PtauHeader - n8, err := readULE32(reader) - if err != nil { return PtauHeader{}, err } @@ -378,7 +392,6 @@ func readPtauHeader(reader io.ReadSeeker) (PtauHeader, error) { header.N8 = n8 prime, err := readBigInt(reader, n8) - if err != nil { return PtauHeader{}, err } @@ -386,13 +399,11 @@ func readPtauHeader(reader io.ReadSeeker) (PtauHeader, error) { header.Prime = prime power, err := readULE32(reader) - if err != nil { return PtauHeader{}, err } header.Power = power - return header, nil } @@ -400,7 +411,6 @@ func readG1Array(reader io.ReadSeeker, numPoints uint32) ([]G1, error) { g1s := make([]G1, numPoints) for i := uint32(0); i < numPoints; i++ { g1, err := readG1(reader) - if err != nil { return []G1{}, err } @@ -428,13 +438,11 @@ func readG2Array(reader io.ReadSeeker, numPoints uint32) ([]G2, error) { func readTauG2(reader io.ReadSeeker) ([]G2, error) { tauG2_s, err := readG2(reader) - if err != nil { return []G2{}, err } tauG2_sx, err := readG2(reader) - if err != nil { return []G2{}, err } @@ -446,7 +454,6 @@ func readG1(reader io.ReadSeeker) (G1, error) { var g1 G1 x, err := readBigInt(reader, BN254_FIELD_ELEMENT_SIZE) - if err != nil { return G1{}, err } @@ -454,7 +461,6 @@ func readG1(reader io.ReadSeeker) (G1, error) { g1[0] = x y, err := readBigInt(reader, BN254_FIELD_ELEMENT_SIZE) - if err != nil { return G1{}, err } @@ -468,7 +474,6 @@ func readG2(reader io.ReadSeeker) (G2, error) { var g2 G2 x0, err := readBigInt(reader, BN254_FIELD_ELEMENT_SIZE) - if err != nil { return G2{}, err } @@ -476,7 +481,6 @@ func readG2(reader io.ReadSeeker) (G2, error) { g2[0] = x0 x1, err := readBigInt(reader, BN254_FIELD_ELEMENT_SIZE) - if err != nil { return G2{}, err } @@ -484,7 +488,6 @@ func readG2(reader io.ReadSeeker) (G2, error) { g2[1] = x1 y0, err := readBigInt(reader, BN254_FIELD_ELEMENT_SIZE) - if err != nil { return G2{}, err } @@ -492,7 +495,6 @@ func readG2(reader io.ReadSeeker) (G2, error) { g2[2] = y0 y1, err := readBigInt(reader, BN254_FIELD_ELEMENT_SIZE) - if err != nil { return G2{}, err } @@ -506,7 +508,6 @@ func readULE32(reader io.Reader) (uint32, error) { var buffer = make([]byte, 4) _, err := reader.Read(buffer) - if err != nil { return 0, err } @@ -518,7 +519,6 @@ func readULE64(reader io.Reader) (uint64, error) { var buffer = make([]byte, 8) _, err := reader.Read(buffer) - if err != nil { return 0, err } @@ -643,9 +643,15 @@ func ReadZkey(zkeyPath string) (Zkey, error) { // zkey var zkeyStr = make([]byte, 4) _, err = reader.Read(zkeyStr) + if err != nil { + return Zkey{}, err + } // version _, err = readULE32(reader) + if err != nil { + return Zkey{}, err + } // number of sections numSections, err := readULE32(reader) @@ -666,10 +672,12 @@ func ReadZkey(zkeyPath string) (Zkey, error) { // section size _, err = readBigInt(reader, 8) + if err != nil { + return Zkey{}, err + } seekToUniqueSection(reader, sections, 1) header, err := readHeader(reader, sections) - if err != nil { return Zkey{}, err } @@ -720,43 +728,36 @@ func readHeaderGroth16(reader io.ReadSeeker) (HeaderGroth, error) { var header = HeaderGroth{} n8q, err := readULE32(reader) - if err != nil { return header, err } q, err := readBigInt(reader, n8q) - if err != nil { return header, err } n8r, err := readULE32(reader) - if err != nil { return header, err } r, err := readBigInt(reader, n8r) - if err != nil { return header, err } nVars, err := readULE32(reader) - if err != nil { return header, err } nPublic, err := readULE32(reader) - if err != nil { return header, err } domainSize, err := readULE32(reader) - if err != nil { return header, err } @@ -851,6 +852,8 @@ func convertPtauToPhase1(ptau Ptau) (phase1 mpc.Phase1, err error) { } } + phase1 = mpc.InitPhase1(int(ptau.Header.Power)); + phase1.Parameters.G1.Tau = tauG1 phase1.Parameters.G1.AlphaTau = alphaTauG1 phase1.Parameters.G1.BetaTau = betaTauG1 @@ -858,5 +861,9 @@ func convertPtauToPhase1(ptau Ptau) (phase1 mpc.Phase1, err error) { phase1.Parameters.G2.Tau = tauG2 phase1.Parameters.G2.Beta = betaG2 + sha := sha256.New() + phase1.WriteTo(sha) + phase1.Hash = sha.Sum(nil) + return phase1, nil } diff --git a/galoisd/cmd/galoisd/cmd/phase2_extract.go b/galoisd/cmd/galoisd/cmd/phase2_extract.go index d0464f6c51..d58438b9cf 100644 --- a/galoisd/cmd/galoisd/cmd/phase2_extract.go +++ b/galoisd/cmd/galoisd/cmd/phase2_extract.go @@ -1,6 +1,8 @@ package cmd import ( + "fmt" + mpc "github.com/consensys/gnark/backend/groth16/bn254/mpcsetup" bn254 "github.com/consensys/gnark/constraint/bn254" "github.com/spf13/cobra" @@ -16,31 +18,31 @@ func Phase2ExtractCmd() *cobra.Command { var r1cs bn254.R1CS err := readFrom(r1csPath, &r1cs) if err != nil { - return err + return fmt.Errorf("failed to read r1cs: %v", err); } phase1Path := args[1] var srs1 mpc.Phase1 err = readFrom(phase1Path, &srs1) if err != nil { - return err + return fmt.Errorf("failed to read phase1: %v", err); } phase2Path := args[2] var srs2 mpc.Phase2 err = readFrom(phase2Path, &srs2) if err != nil { - return err + return fmt.Errorf("failed to read phase2: %v", err); } phase2EvalsPath := args[3] var evals mpc.Phase2Evaluations err = readFrom(phase2EvalsPath, &evals) if err != nil { - return err + return fmt.Errorf("failed to read phase2 evals: %v", err); } pk, vk := mpc.ExtractKeys(&r1cs, &srs1, &srs2, &evals) pkOutput := args[4] err = saveTo(pkOutput, &pk) if err != nil { - return err + return fmt.Errorf("failed to write pk: %v", err); } vkOutput := args[5] return saveTo(vkOutput, &vk) From 4bd1cc801fbc9fa75274e8012421af4cde726a6e Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Tue, 30 Jul 2024 19:20:59 +0200 Subject: [PATCH 08/52] fix(mpc): refactor in shared --- mpc/client/src/main.rs | 134 +++------------------- mpc/coordinator/src/main.rs | 193 ++++--------------------------- mpc/shared/Cargo.toml | 4 + mpc/shared/src/lib.rs | 1 + mpc/shared/src/supabase.rs | 223 ++++++++++++++++++++++++++++++++++++ 5 files changed, 266 insertions(+), 289 deletions(-) create mode 100644 mpc/shared/src/supabase.rs diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index 8fce0b901c..8e1e1ddad8 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -1,5 +1,4 @@ use std::{ - io::SeekFrom, net::SocketAddr, os::unix::fs::MetadataExt, str::FromStr, @@ -16,18 +15,10 @@ use http_body_util::BodyExt; use httpdate::parse_http_date; use hyper::{body::Buf, service::service_fn, Method}; use hyper_util::rt::TokioIo; -use mpc_shared::{ - phase2_contribute, - types::{ContributorId, PayloadId}, - CONTRIBUTION_SIZE, -}; -use postgrest::Postgrest; -use reqwest::header::{HeaderMap, HeaderValue, AUTHORIZATION, CONTENT_LENGTH, LOCATION, RANGE}; +use mpc_shared::{phase2_contribute, supabase::SupabaseMPCApi, CONTRIBUTION_SIZE}; +use reqwest::header::LOCATION; use serde::Deserialize; -use tokio::{ - io::{AsyncSeekExt, AsyncWriteExt}, - net::TcpListener, -}; +use tokio::net::TcpListener; #[derive(PartialEq, Eq, Debug, Clone, Deserialize)] #[serde(rename_all = "camelCase")] @@ -53,13 +44,6 @@ enum Error { Phase2VerificationFailed(#[from] mpc_shared::Phase2VerificationError), } -async fn get_state_file(path: &str) -> Vec { - if !tokio::fs::try_exists(path).await.unwrap() { - tokio::fs::write(path, []).await.unwrap(); - } - tokio::fs::read(path).await.unwrap() -} - type BoxBody = http_body_util::combinators::BoxBody; type DynError = Box; @@ -74,111 +58,22 @@ async fn contribute( }: Contribute, ) -> Result<(), DynError> { const SUPABASE_PROJECT: &str = "https://bffcolwcakqrhlznyjns.supabase.co"; - const APIKEY: &str = "apikey"; - let authorization_header = format!("Bearer {}", jwt); - let client = Postgrest::new(format!("{SUPABASE_PROJECT}/rest/v1")) - .insert_header(APIKEY, api_key) - .insert_header(AUTHORIZATION, authorization_header.clone()); + let client = SupabaseMPCApi::new(SUPABASE_PROJECT.into(), api_key, jwt); let current_contributor = client - .from("current_contributor_id") - .select("id") - .execute() - .await? - .json::>() + .current_contributor() .await? - .first() - .cloned() .ok_or(Error::ContributorNotFound)?; assert!( current_contributor.id == contributor_id, "not current contributor." ); let current_payload = client - .from("current_payload_id") - .select("payload_id") - .execute() + .current_payload() .await? - .json::>() - .await? - .first() - .cloned() .ok_or(Error::PayloadNotFound)?; - let current_payload_download_url = format!( - "{SUPABASE_PROJECT}/storage/v1/object/contributions/{}", - ¤t_payload.id - ); - let client = reqwest::ClientBuilder::new() - .default_headers(HeaderMap::from_iter([( - AUTHORIZATION, - HeaderValue::from_str(&authorization_header)?, - )])) - .build()?; - println!("checking payload file..."); - enum StateFileAction { - Download(usize), - Done(Vec), - } - let state_path = current_payload.id; - let action = match get_state_file(&state_path).await { - content if content.len() < CONTRIBUTION_SIZE => { - println!("partial download, continuing from {}...", content.len()); - StateFileAction::Download(content.len()) - } - content if content.len() == CONTRIBUTION_SIZE => { - println!("download complete."); - StateFileAction::Done(content) - } - _ => { - println!("invalid size detected, redownloading..."); - StateFileAction::Download(0) - } - }; - let payload = match action { - StateFileAction::Download(start_position) => { - let mut response = client - .get(current_payload_download_url) - .header(RANGE, format!("bytes={}-", start_position)) - .send() - .await? - .error_for_status()?; - let headers = response.headers(); - let total_length = start_position - + u64::from_str( - headers - .get(CONTENT_LENGTH) - .ok_or(Error::HeaderNotFound(CONTENT_LENGTH.as_str().into()))? - .to_str()?, - )? as usize; - println!("state file length: {}", total_length); - assert!( - total_length == CONTRIBUTION_SIZE, - "contribution length mismatch." - ); - let mut state_file = tokio::fs::OpenOptions::new() - .write(true) - .create(false) - .open(&state_path) - .await?; - state_file.set_len(start_position as u64).await?; - state_file - .seek(SeekFrom::Start(start_position as u64)) - .await?; - let mut i = 0; - while let Some(chunk) = response.chunk().await? { - if i % 10 == 0 { - println!("eta: chunk {}.", i); - } - let written = state_file.write(&chunk).await?; - assert!(written == chunk.len(), "couldn't write chunk."); - state_file.sync_data().await?; - i += 1; - } - println!("download complete"); - let final_content = tokio::fs::read(&state_path).await?; - final_content - } - StateFileAction::Done(content) => content, - }; + let payload = client + .download_payload(¤t_payload.id, ¤t_payload.id) + .await?; let phase2_contribution = if let Ok(true) = tokio::fs::metadata(&payload_id) .await .map(|meta| meta.size() as usize == CONTRIBUTION_SIZE) @@ -197,7 +92,6 @@ async fn contribute( .journal_mode(JournalMode::Wal) .open() .await?; - pool.conn(|conn| { conn.execute( "CREATE TABLE IF NOT EXISTS resumable_upload ( @@ -223,8 +117,9 @@ async fn contribute( } }) .await?; + let upload_client = client.new_reqwest_builder()?.build()?; if let Some(ref location) = upload_location { - if client + if upload_client .head(location) .header("Tus-Resumable", "1.0.0") .send() @@ -232,6 +127,7 @@ async fn contribute( .error_for_status() .is_err() { + println!("upload location expired, removing it..."); upload_location = None; } } @@ -245,7 +141,7 @@ async fn contribute( // ===================================================== // https://tus.io/protocols/resumable-upload#creation == // ===================================================== - let response = client + let response = upload_client .post(format!("{SUPABASE_PROJECT}/storage/v1/upload/resumable")) .header("Tus-Resumable", "1.0.0") .header("Upload-Length", CONTRIBUTION_SIZE.to_string()) @@ -292,7 +188,7 @@ async fn contribute( // ================================================= // https://tus.io/protocols/resumable-upload#head == // ================================================= - let response = client + let response = upload_client .head(&upload_location) .header("Tus-Resumable", "1.0.0") .send() @@ -319,7 +215,7 @@ async fn contribute( // ================================================== // https://tus.io/protocols/resumable-upload#patch == // ================================================== - client + upload_client .patch(&upload_location) .header("Tus-Resumable", "1.0.0") .header("Content-Type", "application/offset+octet-stream") diff --git a/mpc/coordinator/src/main.rs b/mpc/coordinator/src/main.rs index 1f0dd7f371..0a79dec8b9 100644 --- a/mpc/coordinator/src/main.rs +++ b/mpc/coordinator/src/main.rs @@ -1,17 +1,7 @@ -use std::{io::SeekFrom, str::FromStr}; - use clap::{Parser, Subcommand}; -use mpc_shared::{ - phase2_verify, - types::{Contribution, ContributorId, PayloadId}, - CONTRIBUTION_SIZE, -}; -use postgrest::Postgrest; -use reqwest::header::{HeaderMap, HeaderValue, AUTHORIZATION, CONTENT_LENGTH, RANGE}; -use tokio::io::{AsyncSeekExt, AsyncWriteExt}; +use mpc_shared::{phase2_verify, supabase::SupabaseMPCApi}; const SUPABASE_PROJECT: &str = "https://bffcolwcakqrhlznyjns.supabase.co"; -const APIKEY: &str = "apikey"; #[derive(Parser, Debug)] #[command(version, about, long_about = None)] @@ -32,8 +22,6 @@ enum Command { #[derive(thiserror::Error, Debug, Clone)] enum Error { - #[error("couldn't find expected header: {0}")] - HeaderNotFound(String), #[error("current contributor not found.")] ContributorNotFound, #[error("current payload not found.")] @@ -42,200 +30,65 @@ enum Error { NextPayloadNotFound, } -async fn get_state_file(path: &str) -> Vec { - if !tokio::fs::try_exists(path).await.unwrap() { - tokio::fs::write(path, []).await.unwrap(); - } - tokio::fs::read(path).await.unwrap() -} - -async fn download_payload( - authorization_header: String, - payload_id: &str, - payload_output: &str, -) -> Result, Box> { - let current_payload_download_url = format!( - "{SUPABASE_PROJECT}/storage/v1/object/contributions/{}", - &payload_id - ); - let client = reqwest::ClientBuilder::new() - .default_headers(HeaderMap::from_iter([( - AUTHORIZATION, - HeaderValue::from_str(&authorization_header)?, - )])) - .build()?; - println!("checking payload file..."); - enum StateFileAction { - Download(usize), - Done(Vec), - } - let state_path = payload_output; - let action = match get_state_file(&state_path).await { - content if content.len() < CONTRIBUTION_SIZE => { - println!("partial download, continuing from {}...", content.len()); - StateFileAction::Download(content.len()) - } - content if content.len() == CONTRIBUTION_SIZE => { - println!("download complete."); - StateFileAction::Done(content) - } - _ => { - println!("invalid size detected, redownloading..."); - StateFileAction::Download(0) - } - }; - match action { - StateFileAction::Download(start_position) => { - let mut response = client - .get(current_payload_download_url) - .header(RANGE, format!("bytes={}-", start_position)) - .send() - .await? - .error_for_status()?; - let headers = response.headers(); - let total_length = start_position - + u64::from_str( - headers - .get(CONTENT_LENGTH) - .ok_or(Error::HeaderNotFound(CONTENT_LENGTH.as_str().into()))? - .to_str()?, - )? as usize; - println!("state file length: {}", total_length); - assert!( - total_length == CONTRIBUTION_SIZE, - "contribution length mismatch." - ); - let mut state_file = tokio::fs::OpenOptions::new() - .write(true) - .create(false) - .open(&state_path) - .await?; - state_file.set_len(start_position as u64).await?; - state_file - .seek(SeekFrom::Start(start_position as u64)) - .await?; - let mut i = 0; - while let Some(chunk) = response.chunk().await? { - if i % 10 == 0 { - println!("Eta: chunk {}.", i); - } - let written = state_file.write(&chunk).await?; - assert!(written == chunk.len(), "couldn't write chunk."); - state_file.sync_data().await?; - i += 1; - } - println!("download complete"); - let final_content = tokio::fs::read(&state_path).await?; - Ok(final_content) - } - StateFileAction::Done(content) => Ok(content), - } -} - #[tokio::main] async fn main() -> Result<(), Box> { let args = Args::parse(); match args.command { Command::Start { jwt, api_key } => { - let authorization_header = format!("Bearer {}", jwt); - let client = Postgrest::new(format!("{SUPABASE_PROJECT}/rest/v1")) - .insert_header(APIKEY, api_key) - .insert_header(AUTHORIZATION, authorization_header.clone()); + let client = SupabaseMPCApi::new(SUPABASE_PROJECT.into(), api_key, jwt); loop { + println!("awaiting current contributor slot..."); let current_contributor = { - let contributor = client - .from("current_contributor_id") - .select("id") - .execute() + match client + .current_contributor() .await? - .json::>() - .await? - .first() - .cloned() - .ok_or(Error::ContributorNotFound); - match contributor { + .ok_or(Error::ContributorNotFound) + { Ok(contributor) => contributor, Err(_) => { - println!("no more contributor to process."); tokio::time::sleep(std::time::Duration::from_secs(10)).await; continue; } } }; + println!("current contributor slot: {}", ¤t_contributor.id); let current_payload = client - .from("current_payload_id") - .select("payload_id") - .execute() + .current_payload() .await? - .json::>() - .await? - .first() - .cloned() .ok_or(Error::CurrentPayloadNotFound)?; - let payload_current = download_payload( - authorization_header.clone(), - ¤t_payload.id, - ¤t_payload.id, - ) - .await?; + let payload_current = client + .download_payload(¤t_payload.id, ¤t_payload.id) + .await?; println!("awaiting contribution of {}...", ¤t_contributor.id); loop { if client - .from("contribution_submitted") - .eq("id", ¤t_contributor.id) - .select("id") - .execute() - .await? - .json::>() + .contribution_submitted(¤t_contributor.id) .await? - .len() - == 1 { break; } tokio::time::sleep(std::time::Duration::from_secs(10)).await; } - println!("contribution submitted!"); + println!("detected contribution submission, downloading..."); let next_payload = client - .from("queue") - .eq("id", ¤t_contributor.id) - .select("payload_id") - .execute() - .await? - .json::>() + .contributor_payload(¤t_contributor.id) .await? - .first() - .cloned() .ok_or(Error::NextPayloadNotFound)?; - let payload_next = download_payload( - authorization_header.clone(), - &next_payload.id, - &next_payload.id, - ) - .await?; + let payload_next = client + .download_payload(&next_payload.id, &next_payload.id) + .await?; + println!("verifying payload..."); if phase2_verify(&payload_current, &payload_next).is_ok() { println!("verification succeeded."); client - .from("contribution") - .insert(serde_json::to_string(&Contribution { - id: current_contributor.id.clone(), - success: true, - })?) - .execute() - .await? - .error_for_status()?; + .insert_contribution(current_contributor.id.clone(), true) + .await?; tokio::fs::remove_file(¤t_payload.id).await?; } else { println!("verification failed."); client - .from("contribution") - .insert(serde_json::to_string(&Contribution { - id: current_contributor.id.clone(), - success: false, - })?) - .execute() - .await? - .error_for_status()?; + .insert_contribution(current_contributor.id.clone(), false) + .await?; } tokio::time::sleep(std::time::Duration::from_secs(10)).await; } diff --git a/mpc/shared/Cargo.toml b/mpc/shared/Cargo.toml index 1c8b4846f3..c642afe909 100644 --- a/mpc/shared/Cargo.toml +++ b/mpc/shared/Cargo.toml @@ -6,3 +6,7 @@ version = "0.1.0" [dependencies] serde = { workspace = true, features = ["derive"] } thiserror = { workspace = true } +postgrest = "1.0" +reqwest = { workspace = true, features = ["json"] } +serde_json = { workspace = true } +tokio = { workspace = true, features = ["full"] } \ No newline at end of file diff --git a/mpc/shared/src/lib.rs b/mpc/shared/src/lib.rs index ae058ef5b2..c3618fb265 100644 --- a/mpc/shared/src/lib.rs +++ b/mpc/shared/src/lib.rs @@ -1,4 +1,5 @@ pub mod types; +pub mod supabase; use std::ffi::{c_char, c_int}; diff --git a/mpc/shared/src/supabase.rs b/mpc/shared/src/supabase.rs new file mode 100644 index 0000000000..2dc6089767 --- /dev/null +++ b/mpc/shared/src/supabase.rs @@ -0,0 +1,223 @@ +use std::{io::SeekFrom, str::FromStr}; + +use postgrest::Postgrest; +use reqwest::{ + header::{HeaderMap, HeaderValue, AUTHORIZATION, CONTENT_LENGTH, RANGE}, + ClientBuilder, +}; +use tokio::io::{AsyncSeekExt, AsyncWriteExt}; + +use crate::{ + types::{Contribution, ContributorId, PayloadId}, + CONTRIBUTION_SIZE, +}; + +const API_KEY: &str = "apikey"; + +pub type DynError = Box; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum StateFileAction { + Download(usize), + Done(Vec), +} + +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error { + #[error("couldn't find expected header: {0}")] + HeaderNotFound(String), + #[error("current contributor not found.")] + ContributorNotFound, + #[error("current payload not found.")] + CurrentPayloadNotFound, + #[error("next payload not found.")] + NextPayloadNotFound, +} + +pub struct SupabaseMPCApi { + project_url: String, + jwt: String, + client: Postgrest, +} + +impl SupabaseMPCApi { + pub fn new(project_url: String, api_key: String, jwt: String) -> Self { + let client = Postgrest::new(format!("{project_url}/rest/v1")) + .insert_header(API_KEY, &api_key) + .insert_header(AUTHORIZATION, format!("Bearer {}", &jwt)); + Self { + project_url, + jwt, + client, + } + } + + pub fn new_reqwest_builder(&self) -> Result { + Ok(ClientBuilder::new().default_headers(HeaderMap::from_iter([( + AUTHORIZATION, + HeaderValue::from_str(&format!("Bearer {}", &self.jwt))?, + )]))) + } + + pub async fn current_contributor(&self) -> Result, DynError> { + Ok(self + .client + .from("current_contributor_id") + .select("id") + .execute() + .await? + .json::>() + .await? + .first() + .cloned()) + } + + pub async fn current_payload(&self) -> Result, DynError> { + Ok(self + .client + .from("current_payload_id") + .select("payload_id") + .execute() + .await? + .json::>() + .await? + .first() + .cloned()) + } + + pub async fn contribution_submitted(&self, contributor_id: &str) -> Result { + Ok(self + .client + .from("contribution_submitted") + .eq("id", &contributor_id) + .select("id") + .execute() + .await? + .json::>() + .await? + .len() + == 1) + } + + pub async fn contributor_payload( + &self, + contributor_id: &str, + ) -> Result, DynError> { + Ok(self + .client + .from("queue") + .eq("id", &contributor_id) + .select("payload_id") + .execute() + .await? + .json::>() + .await? + .first() + .cloned()) + } + + pub async fn insert_contribution( + &self, + contributor_id: String, + success: bool, + ) -> Result<(), DynError> { + self.client + .from("contribution") + .insert(serde_json::to_string(&Contribution { + id: contributor_id, + success, + })?) + .execute() + .await? + .error_for_status()?; + Ok(()) + } + + pub async fn download_payload( + &self, + payload_id: &str, + payload_output: &str, + ) -> Result, Box> { + let current_payload_download_url = format!( + "{}/storage/v1/object/contributions/{}", + &self.project_url, &payload_id + ); + let client = ClientBuilder::new() + .default_headers(HeaderMap::from_iter([( + AUTHORIZATION, + HeaderValue::from_str(&format!("Bearer {}", &self.jwt))?, + )])) + .build()?; + println!("checking payload file..."); + let state_path = payload_output; + let action = match get_state_file(&state_path).await { + content if content.len() < CONTRIBUTION_SIZE => { + println!("partial download, continuing from {}...", content.len()); + StateFileAction::Download(content.len()) + } + content if content.len() == CONTRIBUTION_SIZE => { + println!("download complete."); + StateFileAction::Done(content) + } + _ => { + println!("invalid size detected, redownloading..."); + StateFileAction::Download(0) + } + }; + match action { + StateFileAction::Download(start_position) => { + let mut response = client + .get(current_payload_download_url) + .header(RANGE, format!("bytes={}-", start_position)) + .send() + .await? + .error_for_status()?; + let headers = response.headers(); + let total_length = start_position + + u64::from_str( + headers + .get(CONTENT_LENGTH) + .ok_or(Error::HeaderNotFound(CONTENT_LENGTH.as_str().into()))? + .to_str()?, + )? as usize; + println!("state file length: {}", total_length); + assert!( + total_length == CONTRIBUTION_SIZE, + "contribution length mismatch." + ); + let mut state_file = tokio::fs::OpenOptions::new() + .write(true) + .create(false) + .open(&state_path) + .await?; + state_file.set_len(start_position as u64).await?; + state_file + .seek(SeekFrom::Start(start_position as u64)) + .await?; + let mut i = 0; + while let Some(chunk) = response.chunk().await? { + let k = CONTRIBUTION_SIZE / i; + if k > 10 { + println!("downloaded: {}%", i); + i = 0; + } + let written = state_file.write(&chunk).await?; + assert!(written == chunk.len(), "couldn't write chunk."); + state_file.sync_data().await?; + i += written; + } + println!("download complete"); + let final_content = tokio::fs::read(&state_path).await?; + Ok(final_content) + } + StateFileAction::Done(content) => Ok(content), + } + } +} + +async fn get_state_file(path: &str) -> Vec { + if !tokio::fs::try_exists(path).await.unwrap() { + tokio::fs::write(path, []).await.unwrap(); + } + tokio::fs::read(path).await.unwrap() +} From 33657619d4687eb8d9013633df29cccab60c7b16 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Wed, 31 Jul 2024 13:27:36 +0200 Subject: [PATCH 09/52] feat(mpc): ui for cli --- mpc/client/Cargo.toml | 1 + mpc/client/src/main.rs | 316 +++++++++++++++++++++++++--------- mpc/client/src/types.rs | 17 ++ mpc/client/src/ui.rs | 321 +++++++++++++++++++++++++++++++++++ mpc/coordinator/database.sql | 2 +- mpc/coordinator/src/main.rs | 41 +++-- mpc/shared/src/supabase.rs | 37 ++-- 7 files changed, 612 insertions(+), 123 deletions(-) create mode 100644 mpc/client/src/types.rs create mode 100644 mpc/client/src/ui.rs diff --git a/mpc/client/Cargo.toml b/mpc/client/Cargo.toml index 17a4dae0ce..dafe51e673 100644 --- a/mpc/client/Cargo.toml +++ b/mpc/client/Cargo.toml @@ -16,6 +16,7 @@ mpc-shared = { workspace = true } postgrest = "1.0" rand = "0.8.5" ratatui = "0.27.0" +throbber-widgets-tui = "0.6" reqwest = { workspace = true, features = ["json"] } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index 8e1e1ddad8..2083c98f3d 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -1,4 +1,8 @@ +mod types; +mod ui; + use std::{ + io, net::SocketAddr, os::unix::fs::MetadataExt, str::FromStr, @@ -6,19 +10,35 @@ use std::{ atomic::{AtomicBool, Ordering}, Arc, }, - time::UNIX_EPOCH, + time::{Duration, Instant, UNIX_EPOCH}, }; use async_sqlite::{JournalMode, PoolBuilder}; use base64::{prelude::BASE64_STANDARD, Engine}; -use http_body_util::BodyExt; +use crossterm::event; +use http_body_util::{BodyExt, Full}; use httpdate::parse_http_date; -use hyper::{body::Buf, service::service_fn, Method}; -use hyper_util::rt::TokioIo; +use hyper::{ + body::{Buf, Bytes}, + service::service_fn, + Method, +}; +use hyper_util::{rt::TokioIo, server::graceful::GracefulShutdown}; use mpc_shared::{phase2_contribute, supabase::SupabaseMPCApi, CONTRIBUTION_SIZE}; +use ratatui::{backend::CrosstermBackend, Terminal, Viewport}; use reqwest::header::LOCATION; use serde::Deserialize; -use tokio::net::TcpListener; +use tokio::{ + net::TcpListener, + sync::{ + broadcast::{self, Receiver, Sender}, + mpsc, oneshot, RwLock, + }, +}; +use types::Status; + +const SUPABASE_PROJECT: &str = "https://bffcolwcakqrhlznyjns.supabase.co"; +const ENDPOINT: &str = "/contribute"; #[derive(PartialEq, Eq, Debug, Clone, Deserialize)] #[serde(rename_all = "camelCase")] @@ -32,6 +52,8 @@ struct Contribute { #[derive(thiserror::Error, Debug, Clone)] enum Error { + #[error("we are not the current contributor.")] + NotCurrentContributor, #[error("couldn't find expected header: {0}")] HeaderNotFound(String), #[error("current contributor not found.")] @@ -44,11 +66,12 @@ enum Error { Phase2VerificationFailed(#[from] mpc_shared::Phase2VerificationError), } -type BoxBody = http_body_util::combinators::BoxBody; +type BoxBody = http_body_util::combinators::BoxBody; type DynError = Box; async fn contribute( + tx_status: Sender, Contribute { bucket, jwt, @@ -57,36 +80,58 @@ async fn contribute( payload_id, }: Contribute, ) -> Result<(), DynError> { - const SUPABASE_PROJECT: &str = "https://bffcolwcakqrhlznyjns.supabase.co"; let client = SupabaseMPCApi::new(SUPABASE_PROJECT.into(), api_key, jwt); let current_contributor = client .current_contributor() .await? .ok_or(Error::ContributorNotFound)?; - assert!( - current_contributor.id == contributor_id, - "not current contributor." - ); + if current_contributor.id != contributor_id { + return Err(Error::NotCurrentContributor.into()); + } let current_payload = client .current_payload() .await? .ok_or(Error::PayloadNotFound)?; + tx_status + .send(Status::DownloadStarted(current_payload.id.clone())) + .expect("impossible"); let payload = client - .download_payload(¤t_payload.id, ¤t_payload.id) + .download_payload(¤t_payload.id, ¤t_payload.id, |percent| { + let tx_status = tx_status.clone(); + let current_payload_clone = current_payload.id.clone(); + async move { + tx_status + .send(Status::Downloading(current_payload_clone, percent as u8)) + .expect("impossible"); + } + }) .await?; + tx_status + .send(Status::DownloadEnded(current_payload.id.clone())) + .expect("impossible"); let phase2_contribution = if let Ok(true) = tokio::fs::metadata(&payload_id) .await .map(|meta| meta.size() as usize == CONTRIBUTION_SIZE) { - println!("loading completed contribution..."); tokio::fs::read(&payload_id).await? } else { - println!("generating contribution, may take some time..."); - let phase2_contribution = phase2_contribute(&payload)?; + tx_status + .send(Status::ContributionStarted) + .expect("impossible"); + let (tx_contrib, rx_contrib) = oneshot::channel(); + let handle = tokio::task::spawn_blocking(move || { + tx_contrib + .send(phase2_contribute(&payload)) + .expect("impossible"); + }); + let phase2_contribution = rx_contrib.await??; + handle.await?; + tx_status + .send(Status::ContributionEnded) + .expect("impossible"); tokio::fs::write(&payload_id, &phase2_contribution).await?; phase2_contribution }; - println!("uploading contribution..."); let pool = PoolBuilder::new() .path("db.sqlite3") .journal_mode(JournalMode::Wal) @@ -127,17 +172,12 @@ async fn contribute( .error_for_status() .is_err() { - println!("upload location expired, removing it..."); upload_location = None; } } let upload_location = match upload_location { - Some(location) => { - println!("location already stored in db."); - location - } + Some(location) => location, None => { - println!("location not found, generating a new one..."); // ===================================================== // https://tus.io/protocols/resumable-upload#creation == // ===================================================== @@ -182,9 +222,6 @@ async fn contribute( location } }; - - println!("upload location: {upload_location}"); - // ================================================= // https://tus.io/protocols/resumable-upload#head == // ================================================= @@ -209,9 +246,10 @@ async fn contribute( .to_str()?, )?; assert!(upload_length == CONTRIBUTION_SIZE, "invalid upload-length."); - println!("upload-offset: {}", upload_offset); if upload_offset < upload_length { - println!("uploading contribution..."); + tx_status + .send(Status::UploadStarted(payload_id.clone())) + .expect("impossible"); // ================================================== // https://tus.io/protocols/resumable-upload#patch == // ================================================== @@ -229,84 +267,202 @@ async fn contribute( .send() .await? .error_for_status()?; + tx_status + .send(Status::UploadEnded(payload_id.clone())) + .expect("impossible"); } - println!("upload complete."); Ok(()) } +fn full>(chunk: T) -> BoxBody { + Full::new(chunk.into()) + .map_err(|never| match never {}) + .boxed() +} + async fn handle( - handling: Arc, + lock: Arc, + tx_status: Sender, + latest_status: Arc>, req: hyper::Request, ) -> Result, DynError> { + let response = |status, body| { + Ok(hyper::Response::builder() + .header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") + .header(hyper::header::CONTENT_TYPE, "application/json") + .status(status) + .body(body) + .unwrap()) + }; + let response_empty = |status| response(status, BoxBody::default()); match (req.method(), req.uri().path()) { - (&Method::POST, "/contribute") - if handling + (&Method::POST, ENDPOINT) + if lock .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) .is_ok() => { - let result = (|| async { - let whole_body = req.collect().await?.aggregate(); - contribute(serde_json::from_reader(whole_body.reader())?).await?; - Ok::<_, DynError>(()) - })() - .await; - handling - .compare_exchange(true, false, Ordering::SeqCst, Ordering::SeqCst) - .expect("impossible"); - result?; - Ok(hyper::Response::builder() - .header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") - .header(hyper::header::CONTENT_TYPE, "application/json") - .status(hyper::StatusCode::OK) - .body(BoxBody::default()) - .unwrap()) + tx_status.send(Status::Initializing).expect("impossible"); + tokio::spawn(async move { + let result = (|| async { + let whole_body = req.collect().await?.aggregate(); + contribute( + tx_status.clone(), + serde_json::from_reader(whole_body.reader())?, + ) + .await?; + Ok::<_, DynError>(()) + })() + .await; + match result { + Ok(_) => { + lock.compare_exchange(true, false, Ordering::SeqCst, Ordering::SeqCst) + .expect("impossible"); + tx_status.send(Status::Successful).expect("impossible") + } + Err(e) => tx_status + .send(Status::Failed(format!("{:#?}", e))) + .expect("impossible"), + } + }); + response_empty(hyper::StatusCode::ACCEPTED) } - // Busy building - (&Method::POST, "/contribute") => Ok(hyper::Response::builder() - .header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") - .header(hyper::header::CONTENT_TYPE, "application/json") - .status(hyper::StatusCode::TOO_MANY_REQUESTS) - .body(BoxBody::default()) - .unwrap()), + // FE must poll GET and dispatch accordingly. + (&Method::POST, ENDPOINT) => response_empty(hyper::StatusCode::SERVICE_UNAVAILABLE), + (&Method::GET, ENDPOINT) => match latest_status.read().await.clone() { + Status::Failed(e) => { + lock.compare_exchange(true, false, Ordering::SeqCst, Ordering::SeqCst) + .expect("impossible"); + // Only idle if the FE poll after a failure. + tx_status.send(Status::Idle).expect("impossible"); + response( + hyper::StatusCode::INTERNAL_SERVER_ERROR, + full(serde_json::to_vec(&format!("{:#?}", e)).expect("impossible")), + ) + } + x => response( + hyper::StatusCode::OK, + full(serde_json::to_vec(&x).expect("impossible")), + ), + }, // CORS preflight request. - (&Method::OPTIONS, "/contribute") => Ok(hyper::Response::builder() + (&Method::OPTIONS, ENDPOINT) => Ok(hyper::Response::builder() .header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") .header( hyper::header::ACCESS_CONTROL_ALLOW_HEADERS, hyper::header::CONTENT_TYPE, ) - .header(hyper::header::ACCESS_CONTROL_ALLOW_METHODS, "POST, OPTIONS") + .header( + hyper::header::ACCESS_CONTROL_ALLOW_METHODS, + format!( + "{}, {}, {}", + Method::OPTIONS.as_str(), + Method::GET.as_str(), + Method::POST.as_str() + ), + ) .status(hyper::StatusCode::OK) .body(BoxBody::default()) .unwrap()), - _ => Ok(hyper::Response::builder() - .status(hyper::StatusCode::NOT_FOUND) - .body(BoxBody::default()) - .unwrap()), + _ => response_empty(hyper::StatusCode::NOT_FOUND), } } +async fn input_and_status_handling( + latest_status: Arc>, + mut rx_status: Receiver, + tx_ui: mpsc::UnboundedSender, +) { + let tx_ui_clone = tx_ui.clone(); + tokio::spawn(async move { + while let Ok(status) = rx_status.recv().await { + *latest_status.write().await = status.clone(); + tx_ui_clone + .send(ui::Event::NewStatus(status)) + .expect("impossible"); + } + }); + tokio::spawn(async move { + let tick_rate = Duration::from_millis(1000 / 60); + let mut last_tick = Instant::now(); + loop { + // poll for tick rate duration, if no events, sent tick event. + let timeout = tick_rate.saturating_sub(last_tick.elapsed()); + if event::poll(timeout).unwrap() { + match event::read().unwrap() { + event::Event::Key(key) => tx_ui.send(ui::Event::Input(key)).unwrap(), + event::Event::Resize(_, _) => tx_ui.send(ui::Event::Resize).unwrap(), + _ => {} + }; + } + if last_tick.elapsed() >= tick_rate { + if let Err(_) = tx_ui.send(ui::Event::Tick) { + break; + } + last_tick = Instant::now(); + } + } + }); +} + #[tokio::main] async fn main() -> Result<(), DynError> { - let addr = SocketAddr::from(([127, 0, 0, 1], 0x1337)); - let listener = TcpListener::bind(addr).await?; - let handling = Arc::new(AtomicBool::new(false)); - loop { - let (stream, _) = listener.accept().await?; - let io = TokioIo::new(stream); - // TODO: can't we avoid the clone tower? - let handling_clone = handling.clone(); - tokio::task::spawn(async move { - // Finally, we bind the incoming connection to our `hello` service - if let Err(err) = hyper::server::conn::http1::Builder::new() - .serve_connection( - io, - service_fn(move |req| handle(handling_clone.clone(), req)), - ) - .await - { - eprintln!("Error serving connection: {:?}", err); + let status = Arc::new(RwLock::new(Status::Idle)); + let lock = Arc::new(AtomicBool::new(false)); + let (tx_status, rx_status) = broadcast::channel(64); + let graceful = GracefulShutdown::new(); + let (tx_shutdown, mut rx_shutdown) = oneshot::channel::<()>(); + let status_clone = status.clone(); + let handle = tokio::spawn(async move { + let addr = SocketAddr::from(([127, 0, 0, 1], 0x1337)); + let listener = TcpListener::bind(addr).await.unwrap(); + loop { + tokio::select! { + Ok((stream, _)) = listener.accept() => { + let io = TokioIo::new(stream); + let status_clone = status_clone.clone(); + let tx_status_clone = tx_status.clone(); + let lock_clone = lock.clone(); + let conn = hyper::server::conn::http1::Builder::new().serve_connection( + io, + service_fn(move |req| { + handle( + lock_clone.clone(), + tx_status_clone.clone(), + status_clone.clone(), + req, + ) + }), + ); + let fut = graceful.watch(conn); + tokio::task::spawn(async move { + if let Err(err) = fut.await { + eprintln!("error serving connection: {:?}", err); + } + }); + } + _ = &mut rx_shutdown => { + graceful.shutdown().await; + break + } } - }); - } + } + }); + // Dispatch terminal + let (tx_ui, rx_ui) = mpsc::unbounded_channel(); + crossterm::terminal::enable_raw_mode()?; + let stdout = io::stdout(); + let backend = CrosstermBackend::new(stdout); + let mut terminal = Terminal::with_options( + backend, + ratatui::TerminalOptions { + viewport: Viewport::Inline(8), + }, + )?; + input_and_status_handling(status, rx_status, tx_ui).await; + ui::run_ui(&mut terminal, rx_ui).await?; + crossterm::terminal::disable_raw_mode()?; + terminal.clear()?; + tx_shutdown.send(()).expect("impossible"); + handle.await.expect("impossible"); + Ok(()) } diff --git a/mpc/client/src/types.rs b/mpc/client/src/types.rs new file mode 100644 index 0000000000..c224eb7452 --- /dev/null +++ b/mpc/client/src/types.rs @@ -0,0 +1,17 @@ +use serde::Serialize; + +#[derive(PartialEq, Eq, Debug, Clone, Serialize)] +#[serde(rename_all = "camelCase")] +pub enum Status { + Idle, + Initializing, + DownloadStarted(String), + Downloading(String, u8), + DownloadEnded(String), + ContributionStarted, + ContributionEnded, + UploadStarted(String), + UploadEnded(String), + Failed(String), + Successful, +} diff --git a/mpc/client/src/ui.rs b/mpc/client/src/ui.rs new file mode 100644 index 0000000000..ba76a7b8ba --- /dev/null +++ b/mpc/client/src/ui.rs @@ -0,0 +1,321 @@ +use std::time::Instant; + +use ratatui::{ + crossterm::event::{self, KeyEvent}, + prelude::{ + symbols, Alignment, Backend, Color, Constraint, Frame, Layout, Line, Modifier, Rect, Span, + Style, Terminal, Widget, + }, + widgets::{block, Block, Gauge, LineGauge, List, ListItem, Paragraph}, +}; +use throbber_widgets_tui::ThrobberState; +use tokio::sync::mpsc::{self, error::TryRecvError}; + +use crate::types::Status; + +pub enum Event { + Input(KeyEvent), + Tick, + NewStatus(Status), + Resize, +} + +enum UiState { + Idle, + Downloading(String, u8, Instant), + Contributing(Instant), + Uploading(String, u8, Instant), + Successful, + Failed(String), +} + +fn ui(f: &mut Frame, state: &UiState, throbber_state: &mut ThrobberState) { + let area = f.size(); + + let block = + Block::new().title(block::Title::from("Contribution Steps").alignment(Alignment::Center)); + f.render_widget(block, area); + + let vertical = Layout::vertical([Constraint::Length(2), Constraint::Length(4)]).margin(1); + let horizontal = Layout::horizontal([Constraint::Percentage(20), Constraint::Percentage(80)]); + let [progress_area, main] = vertical.areas(area); + let [list_area, gauge_area] = horizontal.areas(main); + let chunks = ratatui::layout::Layout::default() + .direction(ratatui::layout::Direction::Horizontal) + .constraints( + [ + ratatui::layout::Constraint::Min(10), + ratatui::layout::Constraint::Percentage(100), + ] + .as_ref(), + ) + .split(list_area); + + // total progress + let steps_done = match state { + UiState::Idle => 0, + UiState::Downloading(_, _, _) => 0, + UiState::Contributing(_) => 1, + UiState::Uploading(_, _, _) => 2, + UiState::Successful => 3, + UiState::Failed(_) => 3, + }; + let num_steps = 3; + #[allow(clippy::cast_precision_loss)] + let progress = LineGauge::default() + .filled_style(Style::default().fg(Color::Blue)) + .label(format!("{steps_done}/{num_steps}")) + .ratio(steps_done as f64 / num_steps as f64); + f.render_widget(progress, progress_area); + + match state { + UiState::Idle => { + // Set full with state + let full = throbber_widgets_tui::Throbber::default() + .label("Awaiting...") + .style(ratatui::style::Style::default().fg(ratatui::style::Color::Cyan)) + .throbber_style( + ratatui::style::Style::default().add_modifier(ratatui::style::Modifier::BOLD), + ) + .throbber_set(throbber_widgets_tui::CLOCK) + .use_type(throbber_widgets_tui::WhichUse::Spin); + f.render_stateful_widget(full, chunks[0], throbber_state); + } + UiState::Downloading(name, progress, started_at) => { + // in progress download + let item = ListItem::new(Line::from(vec![ + Span::raw(symbols::DOT), + Span::styled( + format!(" download {:>2}", name), + Style::default() + .fg(Color::LightGreen) + .add_modifier(Modifier::BOLD), + ), + Span::raw(format!(" ({}s)", started_at.elapsed().as_secs())), + ])); + + let list = List::new(vec![item]); + f.render_widget(list, list_area); + + let gauge = Gauge::default() + .gauge_style(Style::default().fg(Color::Yellow)) + .ratio(*progress as f64 / 100.0); + if gauge_area.top().saturating_add(0 as u16) > area.bottom() { + return; + } + f.render_widget( + gauge, + Rect { + x: gauge_area.left(), + y: gauge_area.top().saturating_add(0 as u16), + width: gauge_area.width, + height: 1, + }, + ); + } + UiState::Contributing(_) => { + let full = throbber_widgets_tui::Throbber::default() + .label("Your contribution is being computed, please be patient.") + .style(ratatui::style::Style::default().fg(ratatui::style::Color::Cyan)) + .throbber_style( + ratatui::style::Style::default().add_modifier(ratatui::style::Modifier::BOLD), + ) + .throbber_set(throbber_widgets_tui::CLOCK) + .use_type(throbber_widgets_tui::WhichUse::Spin); + f.render_stateful_widget(full, chunks[0], throbber_state); + } + UiState::Uploading(name, progress, started_at) => { + let item = ListItem::new(Line::from(vec![ + Span::raw(symbols::DOT), + Span::styled( + format!(" upload {:>2}", name), + Style::default() + .fg(Color::LightGreen) + .add_modifier(Modifier::BOLD), + ), + Span::raw(format!(" ({}s)", started_at.elapsed().as_secs())), + ])); + + let list = List::new(vec![item]); + f.render_widget(list, list_area); + + let gauge = Gauge::default() + .gauge_style(Style::default().fg(Color::Yellow)) + .ratio(*progress as f64 / 100.0); + if gauge_area.top().saturating_add(0 as u16) > area.bottom() { + return; + } + f.render_widget( + gauge, + Rect { + x: gauge_area.left(), + y: gauge_area.top().saturating_add(0 as u16), + width: gauge_area.width, + height: 1, + }, + ); + } + UiState::Successful => { + // Set full with state + let full = throbber_widgets_tui::Throbber::default() + .label("Contribution successfully upload...") + .style(ratatui::style::Style::default().fg(ratatui::style::Color::Cyan)) + .throbber_style( + ratatui::style::Style::default().add_modifier(ratatui::style::Modifier::BOLD), + ) + .throbber_set(throbber_widgets_tui::CLOCK) + .use_type(throbber_widgets_tui::WhichUse::Spin); + f.render_stateful_widget(full, chunks[0], throbber_state); + } + UiState::Failed(error) => { + // Set full with state + let full = throbber_widgets_tui::Throbber::default() + .label(format!("Failed to contribute: {}", error)) + .style(ratatui::style::Style::default().fg(ratatui::style::Color::Red)) + .throbber_style( + ratatui::style::Style::default().add_modifier(ratatui::style::Modifier::BOLD), + ) + .throbber_set(throbber_widgets_tui::CLOCK) + .use_type(throbber_widgets_tui::WhichUse::Spin); + f.render_stateful_widget(full, chunks[0], throbber_state); + } + } +} + +pub async fn run_ui( + terminal: &mut Terminal, + mut rx: mpsc::UnboundedReceiver, +) -> Result<(), Box> { + let mut state = UiState::Idle; + // let mut download_name = "".into(); + // let mut download_started_at = Instant::now(); + // let mut upload_name = "".into(); + // let mut upload_started_at = Instant::now(); + let mut throbber_state = ThrobberState::default(); + let mut redraw = false; + loop { + if redraw { + throbber_state.calc_next(); + terminal.draw(|f| ui(f, &state, &mut throbber_state))?; + redraw = false; + } + match rx.try_recv() { + Ok(e) => match e { + Event::Input(event) => { + if event.code == event::KeyCode::Char('q') { + break; + } + } + Event::Resize => { + terminal.autoresize()?; + } + Event::Tick => { + redraw = true; + } + Event::NewStatus(new_status) => { + state = match (new_status, state) { + (Status::Idle, _) => UiState::Idle, + (Status::DownloadStarted(name), _) => { + terminal.insert_before(1, |buf| { + Paragraph::new(Line::from(vec![ + Span::from("Started "), + Span::styled( + format!("downloading checkpoint {}", &name), + Style::default().add_modifier(Modifier::BOLD), + ), + ])) + .render(buf.area, buf); + })?; + UiState::Downloading(name, 0, Instant::now()) + } + ( + Status::Downloading(name, progress), + UiState::Downloading(_, _, started_at), + ) => UiState::Downloading(name, progress, started_at), + ( + Status::DownloadEnded(_), + UiState::Downloading(name, progress, started_at), + ) => { + terminal.insert_before(1, |buf| { + Paragraph::new(Line::from(vec![ + Span::from("Finished "), + Span::styled( + format!("downloading checkpoint {}", &name), + Style::default().add_modifier(Modifier::BOLD), + ), + Span::from(format!(" in {}s", started_at.elapsed().as_secs())), + ])) + .render(buf.area, buf); + })?; + UiState::Downloading(name, progress, started_at) + } + (Status::ContributionStarted, _) => { + terminal.insert_before(1, |buf| { + Paragraph::new(Line::from(vec![ + Span::from("Started "), + Span::styled( + "contribution computation", + Style::default().add_modifier(Modifier::BOLD), + ), + ])) + .render(buf.area, buf); + })?; + UiState::Contributing(Instant::now()) + } + (Status::ContributionEnded, UiState::Contributing(started_at)) => { + terminal.insert_before(1, |buf| { + Paragraph::new(Line::from(vec![ + Span::from("Finished "), + Span::styled( + "contribution computation", + Style::default().add_modifier(Modifier::BOLD), + ), + Span::from(format!(" in {}s", started_at.elapsed().as_secs())), + ])) + .render(buf.area, buf); + })?; + UiState::Contributing(started_at) + } + (Status::UploadStarted(name), _) => { + terminal.insert_before(1, |buf| { + Paragraph::new(Line::from(vec![ + Span::from("Started "), + Span::styled( + format!("uploading contribution {}", &name), + Style::default().add_modifier(Modifier::BOLD), + ), + ])) + .render(buf.area, buf); + })?; + UiState::Uploading(name, 0, Instant::now()) + } + ( + Status::UploadEnded(_), + UiState::Uploading(name, progress, started_at), + ) => { + terminal.insert_before(1, |buf| { + Paragraph::new(Line::from(vec![ + Span::from("Finished "), + Span::styled( + format!("uploading contribution {}", &name), + Style::default().add_modifier(Modifier::BOLD), + ), + Span::from(format!(" in {}s", started_at.elapsed().as_secs())), + ])) + .render(buf.area, buf); + })?; + UiState::Uploading(name, progress, started_at) + } + (Status::Successful, _) => UiState::Successful, + (Status::Failed(err), _) => UiState::Failed(err), + (_, s) => s, + }; + } + }, + Err(TryRecvError::Empty) => {} + _ => panic!("impossible"), + }; + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + } + Ok(()) +} diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 0265b1947a..15c10b54f3 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -38,7 +38,7 @@ CREATE OR REPLACE VIEW current_queue_position AS -- Contribution round not started NOT EXISTS (SELECT cs.id FROM contribution_status cs WHERE cs.id = q.id) ) - END AS position; + END AS position; ALTER VIEW current_queue_position SET (security_invoker = on); diff --git a/mpc/coordinator/src/main.rs b/mpc/coordinator/src/main.rs index 0a79dec8b9..69e652feaf 100644 --- a/mpc/coordinator/src/main.rs +++ b/mpc/coordinator/src/main.rs @@ -36,46 +36,45 @@ async fn main() -> Result<(), Box> { match args.command { Command::Start { jwt, api_key } => { let client = SupabaseMPCApi::new(SUPABASE_PROJECT.into(), api_key, jwt); + let progress = |percent| async move { println!("downloaded: {:.2}%", percent) }; loop { - println!("awaiting current contributor slot..."); + println!("downloading current payload..."); + let current_payload = client + .current_payload() + .await? + .ok_or(Error::CurrentPayloadNotFound)?; + let payload_current = client + .download_payload(¤t_payload.id, ¤t_payload.id, progress) + .await?; let current_contributor = { match client .current_contributor() .await? .ok_or(Error::ContributorNotFound) { - Ok(contributor) => contributor, + Ok(contributor) => { + println!("awaiting contribution of {}...", &contributor.id); + if client.contribution_submitted(&contributor.id).await? { + contributor + } else { + tokio::time::sleep(std::time::Duration::from_secs(10)).await; + continue; + } + } Err(_) => { + println!("awaiting contributor to join queue..."); tokio::time::sleep(std::time::Duration::from_secs(10)).await; continue; } } }; - println!("current contributor slot: {}", ¤t_contributor.id); - let current_payload = client - .current_payload() - .await? - .ok_or(Error::CurrentPayloadNotFound)?; - let payload_current = client - .download_payload(¤t_payload.id, ¤t_payload.id) - .await?; - println!("awaiting contribution of {}...", ¤t_contributor.id); - loop { - if client - .contribution_submitted(¤t_contributor.id) - .await? - { - break; - } - tokio::time::sleep(std::time::Duration::from_secs(10)).await; - } println!("detected contribution submission, downloading..."); let next_payload = client .contributor_payload(¤t_contributor.id) .await? .ok_or(Error::NextPayloadNotFound)?; let payload_next = client - .download_payload(&next_payload.id, &next_payload.id) + .download_payload(&next_payload.id, &next_payload.id, progress) .await?; println!("verifying payload..."); if phase2_verify(&payload_current, &payload_next).is_ok() { diff --git a/mpc/shared/src/supabase.rs b/mpc/shared/src/supabase.rs index 2dc6089767..4996a7353a 100644 --- a/mpc/shared/src/supabase.rs +++ b/mpc/shared/src/supabase.rs @@ -1,4 +1,4 @@ -use std::{io::SeekFrom, str::FromStr}; +use std::{future::Future, io::SeekFrom, str::FromStr}; use postgrest::Postgrest; use reqwest::{ @@ -133,11 +133,15 @@ impl SupabaseMPCApi { Ok(()) } - pub async fn download_payload( + pub async fn download_payload( &self, payload_id: &str, payload_output: &str, - ) -> Result, Box> { + mut progress: impl FnMut(f64) -> F, + ) -> Result, Box> + where + F: Future, + { let current_payload_download_url = format!( "{}/storage/v1/object/contributions/{}", &self.project_url, &payload_id @@ -148,21 +152,13 @@ impl SupabaseMPCApi { HeaderValue::from_str(&format!("Bearer {}", &self.jwt))?, )])) .build()?; - println!("checking payload file..."); let state_path = payload_output; let action = match get_state_file(&state_path).await { content if content.len() < CONTRIBUTION_SIZE => { - println!("partial download, continuing from {}...", content.len()); StateFileAction::Download(content.len()) } - content if content.len() == CONTRIBUTION_SIZE => { - println!("download complete."); - StateFileAction::Done(content) - } - _ => { - println!("invalid size detected, redownloading..."); - StateFileAction::Download(0) - } + content if content.len() == CONTRIBUTION_SIZE => StateFileAction::Done(content), + _ => StateFileAction::Download(0), }; match action { StateFileAction::Download(start_position) => { @@ -180,7 +176,6 @@ impl SupabaseMPCApi { .ok_or(Error::HeaderNotFound(CONTENT_LENGTH.as_str().into()))? .to_str()?, )? as usize; - println!("state file length: {}", total_length); assert!( total_length == CONTRIBUTION_SIZE, "contribution length mismatch." @@ -194,19 +189,19 @@ impl SupabaseMPCApi { state_file .seek(SeekFrom::Start(start_position as u64)) .await?; - let mut i = 0; + let mut i = start_position; + let mut freq = 0; while let Some(chunk) = response.chunk().await? { - let k = CONTRIBUTION_SIZE / i; - if k > 10 { - println!("downloaded: {}%", i); - i = 0; + let k = (i as f64 / CONTRIBUTION_SIZE as f64) * 100.; + if freq % 200 == 0 { + progress(k).await; } let written = state_file.write(&chunk).await?; assert!(written == chunk.len(), "couldn't write chunk."); - state_file.sync_data().await?; i += written; + freq += 1; } - println!("download complete"); + state_file.sync_data().await?; let final_content = tokio::fs::read(&state_path).await?; Ok(final_content) } From 9364788989f8a378615c6f3077387ec071a99612 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Wed, 31 Jul 2024 15:23:13 +0200 Subject: [PATCH 10/52] feat(mpc): better ui and messages --- mpc/client/src/main.rs | 2 +- mpc/client/src/ui.rs | 130 +++++++++++++++++++++++------------ mpc/coordinator/database.sql | 4 +- 3 files changed, 88 insertions(+), 48 deletions(-) diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index 2083c98f3d..f9adfefc33 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -320,7 +320,7 @@ async fn handle( tx_status.send(Status::Successful).expect("impossible") } Err(e) => tx_status - .send(Status::Failed(format!("{:#?}", e))) + .send(Status::Failed(format!("{:?}", e))) .expect("impossible"), } }); diff --git a/mpc/client/src/ui.rs b/mpc/client/src/ui.rs index ba76a7b8ba..c8de9e57b4 100644 --- a/mpc/client/src/ui.rs +++ b/mpc/client/src/ui.rs @@ -23,7 +23,9 @@ pub enum Event { enum UiState { Idle, Downloading(String, u8, Instant), + DownloadEnded, Contributing(Instant), + ContributionEnded, Uploading(String, u8, Instant), Successful, Failed(String), @@ -32,12 +34,13 @@ enum UiState { fn ui(f: &mut Frame, state: &UiState, throbber_state: &mut ThrobberState) { let area = f.size(); - let block = - Block::new().title(block::Title::from("Contribution Steps").alignment(Alignment::Center)); + let block = Block::new().title( + block::Title::from("Contribution Steps (press `q` to exit)").alignment(Alignment::Center), + ); f.render_widget(block, area); let vertical = Layout::vertical([Constraint::Length(2), Constraint::Length(4)]).margin(1); - let horizontal = Layout::horizontal([Constraint::Percentage(20), Constraint::Percentage(80)]); + let horizontal = Layout::horizontal([Constraint::Percentage(40), Constraint::Percentage(60)]); let [progress_area, main] = vertical.areas(area); let [list_area, gauge_area] = horizontal.areas(main); let chunks = ratatui::layout::Layout::default() @@ -55,7 +58,9 @@ fn ui(f: &mut Frame, state: &UiState, throbber_state: &mut ThrobberState) { let steps_done = match state { UiState::Idle => 0, UiState::Downloading(_, _, _) => 0, + UiState::DownloadEnded => 1, UiState::Contributing(_) => 1, + UiState::ContributionEnded => 2, UiState::Uploading(_, _, _) => 2, UiState::Successful => 3, UiState::Failed(_) => 3, @@ -72,8 +77,8 @@ fn ui(f: &mut Frame, state: &UiState, throbber_state: &mut ThrobberState) { UiState::Idle => { // Set full with state let full = throbber_widgets_tui::Throbber::default() - .label("Awaiting...") - .style(ratatui::style::Style::default().fg(ratatui::style::Color::Cyan)) + .label("Awaiting orders...") + .style(ratatui::style::Style::default().fg(ratatui::style::Color::White)) .throbber_style( ratatui::style::Style::default().add_modifier(ratatui::style::Modifier::BOLD), ) @@ -86,9 +91,9 @@ fn ui(f: &mut Frame, state: &UiState, throbber_state: &mut ThrobberState) { let item = ListItem::new(Line::from(vec![ Span::raw(symbols::DOT), Span::styled( - format!(" download {:>2}", name), + format!(" downloading {:>2}", name), Style::default() - .fg(Color::LightGreen) + .fg(Color::White) .add_modifier(Modifier::BOLD), ), Span::raw(format!(" ({}s)", started_at.elapsed().as_secs())), @@ -98,7 +103,7 @@ fn ui(f: &mut Frame, state: &UiState, throbber_state: &mut ThrobberState) { f.render_widget(list, list_area); let gauge = Gauge::default() - .gauge_style(Style::default().fg(Color::Yellow)) + .gauge_style(Style::default().fg(Color::Cyan)) .ratio(*progress as f64 / 100.0); if gauge_area.top().saturating_add(0 as u16) > area.bottom() { return; @@ -115,8 +120,8 @@ fn ui(f: &mut Frame, state: &UiState, throbber_state: &mut ThrobberState) { } UiState::Contributing(_) => { let full = throbber_widgets_tui::Throbber::default() - .label("Your contribution is being computed, please be patient.") - .style(ratatui::style::Style::default().fg(ratatui::style::Color::Cyan)) + .label("Your contribution is being computed, please be patient...") + .style(ratatui::style::Style::default().fg(ratatui::style::Color::White)) .throbber_style( ratatui::style::Style::default().add_modifier(ratatui::style::Modifier::BOLD), ) @@ -124,36 +129,18 @@ fn ui(f: &mut Frame, state: &UiState, throbber_state: &mut ThrobberState) { .use_type(throbber_widgets_tui::WhichUse::Spin); f.render_stateful_widget(full, chunks[0], throbber_state); } - UiState::Uploading(name, progress, started_at) => { + UiState::Uploading(name, _, started_at) => { let item = ListItem::new(Line::from(vec![ Span::raw(symbols::DOT), Span::styled( - format!(" upload {:>2}", name), + format!(" uploading {:>2}", name), Style::default() - .fg(Color::LightGreen) + .fg(Color::White) .add_modifier(Modifier::BOLD), ), Span::raw(format!(" ({}s)", started_at.elapsed().as_secs())), ])); - - let list = List::new(vec![item]); - f.render_widget(list, list_area); - - let gauge = Gauge::default() - .gauge_style(Style::default().fg(Color::Yellow)) - .ratio(*progress as f64 / 100.0); - if gauge_area.top().saturating_add(0 as u16) > area.bottom() { - return; - } - f.render_widget( - gauge, - Rect { - x: gauge_area.left(), - y: gauge_area.top().saturating_add(0 as u16), - width: gauge_area.width, - height: 1, - }, - ); + f.render_widget(List::new(vec![item]), list_area); } UiState::Successful => { // Set full with state @@ -179,6 +166,30 @@ fn ui(f: &mut Frame, state: &UiState, throbber_state: &mut ThrobberState) { .use_type(throbber_widgets_tui::WhichUse::Spin); f.render_stateful_widget(full, chunks[0], throbber_state); } + UiState::DownloadEnded => { + // Set full with state + let full = throbber_widgets_tui::Throbber::default() + .label("Initializing contribution...") + .style(ratatui::style::Style::default().fg(ratatui::style::Color::White)) + .throbber_style( + ratatui::style::Style::default().add_modifier(ratatui::style::Modifier::BOLD), + ) + .throbber_set(throbber_widgets_tui::CLOCK) + .use_type(throbber_widgets_tui::WhichUse::Spin); + f.render_stateful_widget(full, chunks[0], throbber_state); + } + UiState::ContributionEnded => { + // Set full with state + let full = throbber_widgets_tui::Throbber::default() + .label("Initializing upload...") + .style(ratatui::style::Style::default().fg(ratatui::style::Color::White)) + .throbber_style( + ratatui::style::Style::default().add_modifier(ratatui::style::Modifier::BOLD), + ) + .throbber_set(throbber_widgets_tui::CLOCK) + .use_type(throbber_widgets_tui::WhichUse::Spin); + f.render_stateful_widget(full, chunks[0], throbber_state); + } } } @@ -193,6 +204,7 @@ pub async fn run_ui( // let mut upload_started_at = Instant::now(); let mut throbber_state = ThrobberState::default(); let mut redraw = false; + let mut start_time = Instant::now(); loop { if redraw { throbber_state.calc_next(); @@ -216,11 +228,12 @@ pub async fn run_ui( state = match (new_status, state) { (Status::Idle, _) => UiState::Idle, (Status::DownloadStarted(name), _) => { + start_time = Instant::now(); terminal.insert_before(1, |buf| { Paragraph::new(Line::from(vec![ - Span::from("Started "), + Span::from("Started downloading"), Span::styled( - format!("downloading checkpoint {}", &name), + format!("checkpoint {}", &name), Style::default().add_modifier(Modifier::BOLD), ), ])) @@ -232,10 +245,7 @@ pub async fn run_ui( Status::Downloading(name, progress), UiState::Downloading(_, _, started_at), ) => UiState::Downloading(name, progress, started_at), - ( - Status::DownloadEnded(_), - UiState::Downloading(name, progress, started_at), - ) => { + (Status::DownloadEnded(_), UiState::Downloading(name, _, started_at)) => { terminal.insert_before(1, |buf| { Paragraph::new(Line::from(vec![ Span::from("Finished "), @@ -247,14 +257,14 @@ pub async fn run_ui( ])) .render(buf.area, buf); })?; - UiState::Downloading(name, progress, started_at) + UiState::DownloadEnded } (Status::ContributionStarted, _) => { terminal.insert_before(1, |buf| { Paragraph::new(Line::from(vec![ Span::from("Started "), Span::styled( - "contribution computation", + "contribution computation...", Style::default().add_modifier(Modifier::BOLD), ), ])) @@ -274,14 +284,14 @@ pub async fn run_ui( ])) .render(buf.area, buf); })?; - UiState::Contributing(started_at) + UiState::ContributionEnded } (Status::UploadStarted(name), _) => { terminal.insert_before(1, |buf| { Paragraph::new(Line::from(vec![ - Span::from("Started "), + Span::from("Started uploading"), Span::styled( - format!("uploading contribution {}", &name), + format!("contribution {}", &name), Style::default().add_modifier(Modifier::BOLD), ), ])) @@ -306,8 +316,38 @@ pub async fn run_ui( })?; UiState::Uploading(name, progress, started_at) } - (Status::Successful, _) => UiState::Successful, - (Status::Failed(err), _) => UiState::Failed(err), + (Status::Successful, _) => { + terminal.insert_before(1, |buf| { + Paragraph::new(Line::from(vec![ + Span::from("Done, "), + Span::styled( + "successfully contributed", + Style::default() + .add_modifier(Modifier::BOLD) + .fg(Color::Green), + ), + Span::from(format!(" in {}s", start_time.elapsed().as_secs())), + ])) + .render(buf.area, buf); + })?; + UiState::Successful + } + (Status::Failed(err), _) => { + terminal.insert_before(1, |buf| { + Paragraph::new(Line::from(vec![ + Span::from("Done "), + Span::styled( + format!("contribution failed: {}", err), + Style::default() + .add_modifier(Modifier::BOLD) + .fg(Color::Red), + ), + Span::from(format!(" in {}s", start_time.elapsed().as_secs())), + ])) + .render(buf.area, buf); + })?; + UiState::Failed(err) + } (_, s) => s, }; } diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 15c10b54f3..67e0163f28 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -87,7 +87,7 @@ CREATE TABLE contribution_submitted( ); ALTER TABLE contribution_submitted ENABLE ROW LEVEL SECURITY; -ALTER TABLE contribution_submitted ADD FOREIGN KEY (id) REFERENCES queue(id); +ALTER TABLE contribution_submitted ADD FOREIGN KEY (id) REFERENCES contribution_status(id); CREATE POLICY view_all ON contribution_submitted @@ -108,7 +108,7 @@ CREATE TABLE contribution( ); ALTER TABLE contribution ENABLE ROW LEVEL SECURITY; -ALTER TABLE contribution ADD FOREIGN KEY (id) REFERENCES queue(id); +ALTER TABLE contribution ADD FOREIGN KEY (id) REFERENCES contribution_status(id); CREATE UNIQUE INDEX idx_contribution_seq ON contribution(seq); CREATE UNIQUE INDEX idx_contribution_seq_success ON contribution(success, seq); From 27911f6ad9c6b57e7ebf6983a346b160f28982e8 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Wed, 31 Jul 2024 15:49:56 +0200 Subject: [PATCH 11/52] fix(mpc): final sql --- mpc/coordinator/database.sql | 82 +++++++++++++++++++++++------------- 1 file changed, 52 insertions(+), 30 deletions(-) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 67e0163f28..6f57c89231 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -1,3 +1,8 @@ +BEGIN; + +-- Default bucket for contributions upload +INSERT INTO storage.buckets(id, name, public) VALUES('contributions', 'contributions', false); + ----------- -- Queue -- ----------- @@ -22,26 +27,6 @@ CREATE POLICY view_all true ); --- Materialized ? -CREATE OR REPLACE VIEW current_queue_position AS - SELECT - CASE WHEN (SELECT cci.id FROM current_contributor_id cci) = auth.uid() THEN - 0 - ELSE - ( - SELECT COUNT(*) + 1 - FROM queue q - WHERE - -- Better score - q.score > (SELECT qq.score FROM queue qq WHERE qq.id = auth.uid()) - AND - -- Contribution round not started - NOT EXISTS (SELECT cs.id FROM contribution_status cs WHERE cs.id = q.id) - ) - END AS position; - -ALTER VIEW current_queue_position SET (security_invoker = on); - CREATE OR REPLACE FUNCTION min_score() RETURNS INTEGER AS $$ BEGIN RETURN (SELECT COALESCE(MIN(score) - 1, 1000000) FROM queue); @@ -55,7 +40,11 @@ BEGIN END $$ LANGUAGE plpgsql; -CREATE TRIGGER queue_set_initial_score BEFORE INSERT ON queue FOR EACH ROW EXECUTE FUNCTION set_initial_score_trigger(); +CREATE TRIGGER queue_set_initial_score +BEFORE INSERT +ON queue +FOR EACH ROW +EXECUTE FUNCTION set_initial_score_trigger(); ------------------------- -- Contribution Status -- @@ -66,7 +55,7 @@ CREATE TABLE contribution_status( expire timestamptz NOT NULL DEFAULT(now() + INTERVAL '30 minutes') ); -ALTER TABLE contribution ENABLE ROW LEVEL SECURITY; +ALTER TABLE contribution_status ENABLE ROW LEVEL SECURITY; ALTER TABLE contribution_status ADD FOREIGN KEY (id) REFERENCES queue(id); CREATE UNIQUE INDEX idx_contribution_status_id_expire ON contribution_status(id, expire); @@ -83,11 +72,13 @@ CREATE POLICY view_all ---------------------------- CREATE TABLE contribution_submitted( id uuid PRIMARY KEY, + object_id uuid NOT NULL, created_at timestamptz NOT NULL DEFAULT(now()) ); ALTER TABLE contribution_submitted ENABLE ROW LEVEL SECURITY; ALTER TABLE contribution_submitted ADD FOREIGN KEY (id) REFERENCES contribution_status(id); +ALTER TABLE contribution_submitted ADD FOREIGN KEY (object_id) REFERENCES storage.objects(id); CREATE POLICY view_all ON contribution_submitted @@ -129,7 +120,11 @@ END $$ LANGUAGE plpgsql; -- Rotate the current contributor whenever a contribution is done. -CREATE TRIGGER contribution_added AFTER INSERT ON contribution FOR EACH ROW EXECUTE FUNCTION set_next_contributor_trigger(); +CREATE TRIGGER contribution_added +AFTER INSERT +ON contribution +FOR EACH ROW +EXECUTE FUNCTION set_next_contributor_trigger(); -- Current contributor is the highest score in the queue with the contribution -- not done yet and it's status expired without payload submitted. @@ -150,6 +145,26 @@ CREATE OR REPLACE VIEW current_contributor_id AS ALTER VIEW current_contributor_id SET (security_invoker = on); +-- Materialized ? +CREATE OR REPLACE VIEW current_queue_position AS + SELECT + CASE WHEN (SELECT cci.id FROM current_contributor_id cci) = auth.uid() THEN + 0 + ELSE + ( + SELECT COUNT(*) + 1 + FROM queue q + WHERE + -- Better score + q.score > (SELECT qq.score FROM queue qq WHERE qq.id = auth.uid()) + AND + -- Contribution round not started + NOT EXISTS (SELECT cs.id FROM contribution_status cs WHERE cs.id = q.id) + ) + END AS position; + +ALTER VIEW current_queue_position SET (security_invoker = on); + -- The current payload is from the latest successfull contribution CREATE OR REPLACE VIEW current_payload_id AS SELECT COALESCE( @@ -181,9 +196,6 @@ BEGIN END $$ LANGUAGE plpgsql; --- On expiry, rotate the contributor -SELECT cron.schedule('update-contributor', '10 seconds', 'CALL set_next_contributor()'); - CREATE OR REPLACE FUNCTION can_upload(name varchar) RETURNS BOOLEAN AS $$ BEGIN RETURN ( @@ -234,9 +246,9 @@ CREATE POLICY allow_authenticated_contributor_download can_download(name) ); -CREATE OR REPLACE PROCEDURE set_contribution_submitted(queue_id uuid) AS $$ +CREATE OR REPLACE PROCEDURE set_contribution_submitted(queue_id uuid, object_id uuid) AS $$ BEGIN - INSERT INTO contribution_submitted(id) VALUES(queue_id); + INSERT INTO contribution_submitted(id, object_id) VALUES(queue_id, object_id); END $$ LANGUAGE plpgsql; @@ -261,11 +273,12 @@ CREATE OR REPLACE FUNCTION set_contribution_submitted_trigger() RETURNS TRIGGER DECLARE file_size integer; BEGIN + -- For some reason, supa pushes placeholder files. IF (NEW.metadata IS NOT NULL) THEN file_size := (NEW.metadata->>'size')::integer; CASE WHEN file_size = expected_payload_size() - THEN CALL set_contribution_submitted(uuid(NEW.owner_id)); + THEN CALL set_contribution_submitted(uuid(NEW.owner_id), NEW.id); ELSE RAISE EXCEPTION 'invalid file size, name: %, got: %, expected: %, meta: %', NEW.name, file_size, expected_payload_size(), NEW.metadata; END CASE; @@ -275,4 +288,13 @@ END $$ LANGUAGE plpgsql; -- Rotate the current contributor whenever a contribution is done. -CREATE TRIGGER contribution_payload_uploaded AFTER INSERT OR UPDATE ON storage.objects FOR EACH ROW EXECUTE FUNCTION set_contribution_submitted_trigger(); +CREATE TRIGGER contribution_payload_uploaded +AFTER INSERT OR UPDATE +ON storage.objects +FOR EACH ROW +EXECUTE FUNCTION set_contribution_submitted_trigger(); + +-- Will rotate the current contributor if the slot expired without any contribution submitted +SELECT cron.schedule('update-contributor', '10 seconds', 'CALL set_next_contributor()'); + +COMMIT; From f824329fefe661a9f2a9d6f3a1bd2a3ff456e716 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Wed, 31 Jul 2024 18:00:12 +0200 Subject: [PATCH 12/52] fix(mpc): allow exiting cli at any point --- app/src/routes/ceremony/+page.svelte | 4 ++-- mpc/client/Cargo.toml | 1 + mpc/client/src/main.rs | 22 ++++++++++++---------- mpc/coordinator/src/main.rs | 2 +- 4 files changed, 16 insertions(+), 13 deletions(-) diff --git a/app/src/routes/ceremony/+page.svelte b/app/src/routes/ceremony/+page.svelte index e30a462c79..b1cb587985 100644 --- a/app/src/routes/ceremony/+page.svelte +++ b/app/src/routes/ceremony/+page.svelte @@ -3,9 +3,9 @@ import { createClient, type Provider } from "@supabase/supabase-js"; const apiKey = - "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImJmZmNvbHdjYWtxcmhsem55am5zIiwicm9sZSI6ImFub24iLCJpYXQiOjE3MjIwOTc5OTgsImV4cCI6MjAzNzY3Mzk5OH0.9dVeafP8atsYqwdtPVYmzIhqMr_DEkHKdfoN3eqxjC0"; + "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Ind3cXB5bGJyY3ByaXlhcXVnenNpIiwicm9sZSI6ImFub24iLCJpYXQiOjE3MjI0MzMyMjQsImV4cCI6MjAzODAwOTIyNH0.UQOmQ-wE63O32lyrLDO7ryowrM5LNA2UILHDA7hTH8E"; const supabase = createClient( - "https://bffcolwcakqrhlznyjns.supabase.co", + "https://wwqpylbrcpriyaqugzsi.supabase.co", apiKey, ); diff --git a/mpc/client/Cargo.toml b/mpc/client/Cargo.toml index dafe51e673..4e20e5aa8a 100644 --- a/mpc/client/Cargo.toml +++ b/mpc/client/Cargo.toml @@ -22,3 +22,4 @@ serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } +tokio-util = "0.7" \ No newline at end of file diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index f9adfefc33..696663763f 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -35,9 +35,10 @@ use tokio::{ mpsc, oneshot, RwLock, }, }; +use tokio_util::sync::CancellationToken; use types::Status; -const SUPABASE_PROJECT: &str = "https://bffcolwcakqrhlznyjns.supabase.co"; +const SUPABASE_PROJECT: &str = "https://wwqpylbrcpriyaqugzsi.supabase.co"; const ENDPOINT: &str = "/contribute"; #[derive(PartialEq, Eq, Debug, Clone, Deserialize)] @@ -376,9 +377,9 @@ async fn input_and_status_handling( tokio::spawn(async move { while let Ok(status) = rx_status.recv().await { *latest_status.write().await = status.clone(); - tx_ui_clone - .send(ui::Event::NewStatus(status)) - .expect("impossible"); + if let Err(_) = tx_ui_clone.send(ui::Event::NewStatus(status)) { + break; + } } }); tokio::spawn(async move { @@ -410,8 +411,9 @@ async fn main() -> Result<(), DynError> { let lock = Arc::new(AtomicBool::new(false)); let (tx_status, rx_status) = broadcast::channel(64); let graceful = GracefulShutdown::new(); - let (tx_shutdown, mut rx_shutdown) = oneshot::channel::<()>(); let status_clone = status.clone(); + let token = CancellationToken::new(); + let token_clone = token.clone(); let handle = tokio::spawn(async move { let addr = SocketAddr::from(([127, 0, 0, 1], 0x1337)); let listener = TcpListener::bind(addr).await.unwrap(); @@ -440,12 +442,12 @@ async fn main() -> Result<(), DynError> { } }); } - _ = &mut rx_shutdown => { - graceful.shutdown().await; - break + _ = token_clone.cancelled() => { + break; } } } + graceful.shutdown().await; }); // Dispatch terminal let (tx_ui, rx_ui) = mpsc::unbounded_channel(); @@ -462,7 +464,7 @@ async fn main() -> Result<(), DynError> { ui::run_ui(&mut terminal, rx_ui).await?; crossterm::terminal::disable_raw_mode()?; terminal.clear()?; - tx_shutdown.send(()).expect("impossible"); + token.cancel(); handle.await.expect("impossible"); - Ok(()) + std::process::exit(0); } diff --git a/mpc/coordinator/src/main.rs b/mpc/coordinator/src/main.rs index 69e652feaf..87f9de45d8 100644 --- a/mpc/coordinator/src/main.rs +++ b/mpc/coordinator/src/main.rs @@ -1,7 +1,7 @@ use clap::{Parser, Subcommand}; use mpc_shared::{phase2_verify, supabase::SupabaseMPCApi}; -const SUPABASE_PROJECT: &str = "https://bffcolwcakqrhlznyjns.supabase.co"; +const SUPABASE_PROJECT: &str = "https://wwqpylbrcpriyaqugzsi.supabase.co"; #[derive(Parser, Debug)] #[command(version, about, long_about = None)] From 2cade4a164fca8aee0eede72316fbf129b4f26dd Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Wed, 31 Jul 2024 18:43:24 +0200 Subject: [PATCH 13/52] fix(mpc): better ui --- mpc/client/src/ui.rs | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/mpc/client/src/ui.rs b/mpc/client/src/ui.rs index c8de9e57b4..b674b43a3c 100644 --- a/mpc/client/src/ui.rs +++ b/mpc/client/src/ui.rs @@ -118,9 +118,12 @@ fn ui(f: &mut Frame, state: &UiState, throbber_state: &mut ThrobberState) { }, ); } - UiState::Contributing(_) => { + UiState::Contributing(started_at) => { let full = throbber_widgets_tui::Throbber::default() - .label("Your contribution is being computed, please be patient...") + .label(format!( + "Your contribution is being computed, please be patient (may take a while)... ({}s)", + started_at.elapsed().as_secs() + )) .style(ratatui::style::Style::default().fg(ratatui::style::Color::White)) .throbber_style( ratatui::style::Style::default().add_modifier(ratatui::style::Modifier::BOLD), @@ -145,7 +148,7 @@ fn ui(f: &mut Frame, state: &UiState, throbber_state: &mut ThrobberState) { UiState::Successful => { // Set full with state let full = throbber_widgets_tui::Throbber::default() - .label("Contribution successfully upload...") + .label("Contribution successfully uploaded...") .style(ratatui::style::Style::default().fg(ratatui::style::Color::Cyan)) .throbber_style( ratatui::style::Style::default().add_modifier(ratatui::style::Modifier::BOLD), @@ -231,7 +234,7 @@ pub async fn run_ui( start_time = Instant::now(); terminal.insert_before(1, |buf| { Paragraph::new(Line::from(vec![ - Span::from("Started downloading"), + Span::from("Started downloading "), Span::styled( format!("checkpoint {}", &name), Style::default().add_modifier(Modifier::BOLD), @@ -248,9 +251,9 @@ pub async fn run_ui( (Status::DownloadEnded(_), UiState::Downloading(name, _, started_at)) => { terminal.insert_before(1, |buf| { Paragraph::new(Line::from(vec![ - Span::from("Finished "), + Span::from("Finished downloading "), Span::styled( - format!("downloading checkpoint {}", &name), + format!("checkpoint {}", &name), Style::default().add_modifier(Modifier::BOLD), ), Span::from(format!(" in {}s", started_at.elapsed().as_secs())), @@ -289,7 +292,7 @@ pub async fn run_ui( (Status::UploadStarted(name), _) => { terminal.insert_before(1, |buf| { Paragraph::new(Line::from(vec![ - Span::from("Started uploading"), + Span::from("Started uploading "), Span::styled( format!("contribution {}", &name), Style::default().add_modifier(Modifier::BOLD), @@ -305,9 +308,9 @@ pub async fn run_ui( ) => { terminal.insert_before(1, |buf| { Paragraph::new(Line::from(vec![ - Span::from("Finished "), + Span::from("Finished uploading "), Span::styled( - format!("uploading contribution {}", &name), + format!("contribution {}", &name), Style::default().add_modifier(Modifier::BOLD), ), Span::from(format!(" in {}s", started_at.elapsed().as_secs())), From ce83eaa21e40079795667d80c5f68b1ef8abc419 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Mon, 2 Sep 2024 20:54:28 +0200 Subject: [PATCH 14/52] feat(mpc): add readme/fixup sql and code --- mpc/README.md | 58 ++++++++++++++++++++++++++++++++++++ mpc/client/src/main.rs | 9 +++--- mpc/coordinator/database.sql | 2 +- mpc/mpc.nix | 6 ++-- 4 files changed, 67 insertions(+), 8 deletions(-) create mode 100644 mpc/README.md diff --git a/mpc/README.md b/mpc/README.md new file mode 100644 index 0000000000..e4c6af1371 --- /dev/null +++ b/mpc/README.md @@ -0,0 +1,58 @@ +# Introduction + +This project contains the client and coordinator to conduct Groth16 multi-party computation for the circuit SRS. +Three components are in play: +- Supabase : host the state machine in postgresql and exposes api and storage services to upload contributions. +- Coordinator: contact Supabase and verify contribution to step the state machine. +- Client: pure function that accepts the current contributor id and generate then upload a contribution payload. + +## Supabase + +Hosts the database, storage services and state machine of the MPC round. Provides instant API on top of them. + +## Coordinator + +The coordinator is in charge of verifying contributions. When a contribution is deemed valid, it dispatches the value to Supabase (insert an entry), effectively stepping the MPC state machine. + +## Client + +Exposes an API to contribute at `localhost:4919`: +- `OPTIONS /contribute` +- `POST /contribute` a `Contribute` object in body. Returns : + - a `202 Accepted` if the contribution started. + - a `503 Unavailable` if the client is busy (likely already contributing). +- `GET /contribute` returns : + - a `200 Ok` if everything is ok with the body containing an encoded `Status` representing the client status (idle, contributing etc...). + - a `500 InternalServerError` if the contribution failed unexpectedly, the body contains the error message. + +### Structures + +#### Contribute +```json +{ + "supabase_project": "", + "bucket": "", + "jwt": "", + "api_key": "", + "contributor_id": "", + "payload_id": "" +} +``` + +#### Status +```rust +#[serde(rename_all = "camelCase")] +pub enum Status { + Idle, + Initializing, + DownloadStarted(String), + Downloading(String, u8), + DownloadEnded(String), + ContributionStarted, + ContributionEnded, + UploadStarted(String), + UploadEnded(String), + Failed(String), + Successful, +} +``` diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index 696663763f..cac88e0800 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -38,12 +38,12 @@ use tokio::{ use tokio_util::sync::CancellationToken; use types::Status; -const SUPABASE_PROJECT: &str = "https://wwqpylbrcpriyaqugzsi.supabase.co"; const ENDPOINT: &str = "/contribute"; #[derive(PartialEq, Eq, Debug, Clone, Deserialize)] #[serde(rename_all = "camelCase")] struct Contribute { + supabase_project: String, bucket: String, jwt: String, api_key: String, @@ -74,6 +74,7 @@ type DynError = Box; async fn contribute( tx_status: Sender, Contribute { + supabase_project, bucket, jwt, api_key, @@ -81,7 +82,7 @@ async fn contribute( payload_id, }: Contribute, ) -> Result<(), DynError> { - let client = SupabaseMPCApi::new(SUPABASE_PROJECT.into(), api_key, jwt); + let client = SupabaseMPCApi::new(supabase_project.clone(), api_key, jwt); let current_contributor = client .current_contributor() .await? @@ -183,7 +184,7 @@ async fn contribute( // https://tus.io/protocols/resumable-upload#creation == // ===================================================== let response = upload_client - .post(format!("{SUPABASE_PROJECT}/storage/v1/upload/resumable")) + .post(format!("{supabase_project}/storage/v1/upload/resumable")) .header("Tus-Resumable", "1.0.0") .header("Upload-Length", CONTRIBUTION_SIZE.to_string()) .header( @@ -415,7 +416,7 @@ async fn main() -> Result<(), DynError> { let token = CancellationToken::new(); let token_clone = token.clone(); let handle = tokio::spawn(async move { - let addr = SocketAddr::from(([127, 0, 0, 1], 0x1337)); + let addr = SocketAddr::from(([0, 0, 0, 0], 0x1337)); let listener = TcpListener::bind(addr).await.unwrap(); loop { tokio::select! { diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 6f57c89231..7d759a62b2 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -16,8 +16,8 @@ CREATE TABLE queue ( ALTER TABLE queue ENABLE ROW LEVEL SECURITY; ALTER TABLE queue ADD FOREIGN KEY (id) REFERENCES auth.users(id); CREATE UNIQUE INDEX idx_queue_score_id ON queue(score, id); -CREATE UNIQUE INDEX idx_queue_score ON queue(score); CREATE UNIQUE INDEX idx_queue_id_payload ON queue(id, payload_id); +CREATE INDEX idx_queue_score ON queue(score); CREATE POLICY view_all ON queue diff --git a/mpc/mpc.nix b/mpc/mpc.nix index 206acf4665..91e8c31d2d 100644 --- a/mpc/mpc.nix +++ b/mpc/mpc.nix @@ -13,15 +13,15 @@ in { packages = mpc-coordinator.packages // mpc-client.packages // { - mpc-image = pkgs.dockerTools.buildImage { + mpc-client-image = pkgs.dockerTools.buildImage { name = "${self'.packages.mpc-client.name}-image"; copyToRoot = pkgs.buildEnv { name = "image-root"; - paths = [ pkgs.coreutils-full pkgs.cacert ]; + paths = [ pkgs.coreutils-full pkgs.cacert pkgs.ncurses ]; pathsToLink = [ "/bin" ]; }; config = { - Entrypoint = [ (pkgs.lib.getExe self'.packages.mpc) ]; + Entrypoint = [ (pkgs.lib.getExe self'.packages.mpc-client) ]; Env = [ "SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt" ]; }; }; From 7aeb2e975dd14c770a580ade505c22184200b3bd Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Mon, 9 Sep 2024 14:12:21 +0200 Subject: [PATCH 15/52] feat(mpc): parametric project url for coordinator --- mpc/coordinator/src/main.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/mpc/coordinator/src/main.rs b/mpc/coordinator/src/main.rs index 87f9de45d8..e5d4ad2466 100644 --- a/mpc/coordinator/src/main.rs +++ b/mpc/coordinator/src/main.rs @@ -1,8 +1,6 @@ use clap::{Parser, Subcommand}; use mpc_shared::{phase2_verify, supabase::SupabaseMPCApi}; -const SUPABASE_PROJECT: &str = "https://wwqpylbrcpriyaqugzsi.supabase.co"; - #[derive(Parser, Debug)] #[command(version, about, long_about = None)] struct Args { @@ -14,9 +12,9 @@ struct Args { enum Command { Start { #[arg(short, long)] - jwt: String, + url: String, #[arg(short, long)] - api_key: String, + jwt: String, }, } @@ -34,8 +32,8 @@ enum Error { async fn main() -> Result<(), Box> { let args = Args::parse(); match args.command { - Command::Start { jwt, api_key } => { - let client = SupabaseMPCApi::new(SUPABASE_PROJECT.into(), api_key, jwt); + Command::Start { url, jwt } => { + let client = SupabaseMPCApi::new(url, jwt.clone(), jwt); let progress = |percent| async move { println!("downloaded: {:.2}%", percent) }; loop { println!("downloading current payload..."); From 59c0accbcc156df4d0e5b3dd0e23f241c5883ea1 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Mon, 9 Sep 2024 14:12:49 +0200 Subject: [PATCH 16/52] feat(mpc): add current queue view and auto queue join --- mpc/coordinator/database.sql | 45 ++++++++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 7d759a62b2..528d68d6fe 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -27,6 +27,22 @@ CREATE POLICY view_all true ); + +CREATE OR REPLACE VIEW current_queue AS + ( + SELECT *, (SELECT COUNT(*) FROM queue qq + WHERE + NOT EXISTS (SELECT cs.id FROM contribution_status cs WHERE cs.id = qq.id) + AND qq.score > q.score + ) + 1 AS position FROM queue q + WHERE + -- Contribution round not started + NOT EXISTS (SELECT cs.id FROM contribution_status cs WHERE cs.id = q.id) + ORDER BY q.score DESC + ); + +ALTER VIEW current_queue SET (security_invoker = on); + CREATE OR REPLACE FUNCTION min_score() RETURNS INTEGER AS $$ BEGIN RETURN (SELECT COALESCE(MIN(score) - 1, 1000000) FROM queue); @@ -80,6 +96,9 @@ ALTER TABLE contribution_submitted ENABLE ROW LEVEL SECURITY; ALTER TABLE contribution_submitted ADD FOREIGN KEY (id) REFERENCES contribution_status(id); ALTER TABLE contribution_submitted ADD FOREIGN KEY (object_id) REFERENCES storage.objects(id); +CREATE INDEX idx_contribution_submitted_object ON contribution_submitted(object_id); +CREATE INDEX idx_contribution_submitted_id_created_at ON contribution_submitted(id, created_at); + CREATE POLICY view_all ON contribution_submitted FOR SELECT @@ -95,11 +114,11 @@ CREATE TABLE contribution( id uuid PRIMARY KEY, seq smallserial NOT NULL, created_at timestamptz NOT NULL DEFAULT(now()), - success boolean + success boolean NOT NULL ); ALTER TABLE contribution ENABLE ROW LEVEL SECURITY; -ALTER TABLE contribution ADD FOREIGN KEY (id) REFERENCES contribution_status(id); +ALTER TABLE contribution ADD FOREIGN KEY (id) REFERENCES contribution_submitted(id); CREATE UNIQUE INDEX idx_contribution_seq ON contribution(seq); CREATE UNIQUE INDEX idx_contribution_seq_success ON contribution(success, seq); @@ -221,6 +240,14 @@ CREATE POLICY allow_authenticated_contributor_upload_insert can_upload(name) ); +CREATE POLICY allow_service_insert + ON storage.objects + FOR INSERT + TO service_role + WITH CHECK ( + true + ); + CREATE OR REPLACE FUNCTION can_download(name varchar) RETURNS BOOLEAN AS $$ BEGIN RETURN ( @@ -297,4 +324,18 @@ EXECUTE FUNCTION set_contribution_submitted_trigger(); -- Will rotate the current contributor if the slot expired without any contribution submitted SELECT cron.schedule('update-contributor', '10 seconds', 'CALL set_next_contributor()'); +-- Automatically join the queue +CREATE OR REPLACE FUNCTION user_join_queue() RETURNS TRIGGER AS $$ +BEGIN + INSERT INTO public.queue(id) VALUES(NEW.id); + RETURN NEW; +END +$$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = ''; + +CREATE TRIGGER user_join_queue + AFTER INSERT + ON auth.users + FOR EACH ROW + EXECUTE FUNCTION user_join_queue(); + COMMIT; From a2670a020b69203bcf6dbc550de182f0789a5832 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Mon, 9 Sep 2024 14:17:57 +0200 Subject: [PATCH 17/52] feat(mpc): parametric expiration --- mpc/coordinator/database.sql | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 528d68d6fe..039a7c076a 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -65,10 +65,16 @@ EXECUTE FUNCTION set_initial_score_trigger(); ------------------------- -- Contribution Status -- ------------------------- +CREATE OR REPLACE FUNCTION expiration_delay() RETURNS INTERVAL AS $$ +BEGIN + RETURN INTERVAL '30 minutes'; +END +$$ LANGUAGE plpgsql SET search_path = ''; + CREATE TABLE contribution_status( id uuid PRIMARY KEY, started timestamptz NOT NULL DEFAULT(now()), - expire timestamptz NOT NULL DEFAULT(now() + INTERVAL '30 minutes') + expire timestamptz NOT NULL DEFAULT(now() + expiration_delay()) ); ALTER TABLE contribution_status ENABLE ROW LEVEL SECURITY; @@ -330,7 +336,7 @@ BEGIN INSERT INTO public.queue(id) VALUES(NEW.id); RETURN NEW; END -$$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = ''; +$$ LANGUAGE plpgsql SET search_path = ''; CREATE TRIGGER user_join_queue AFTER INSERT From 71a9a53a78f61ade6d422feef30ae28eee99d4fb Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Wed, 11 Sep 2024 11:54:27 +0200 Subject: [PATCH 18/52] feat(mpc): more view for averages and string search_path --- mpc/coordinator/database.sql | 75 ++++++++++++++++++++++-------------- 1 file changed, 47 insertions(+), 28 deletions(-) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 039a7c076a..55f116f573 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -45,16 +45,16 @@ ALTER VIEW current_queue SET (security_invoker = on); CREATE OR REPLACE FUNCTION min_score() RETURNS INTEGER AS $$ BEGIN - RETURN (SELECT COALESCE(MIN(score) - 1, 1000000) FROM queue); + RETURN (SELECT COALESCE(MIN(score) - 1, 1000000) FROM public.queue); END -$$ LANGUAGE plpgsql; +$$ LANGUAGE plpgsql SET search_path = ''; CREATE OR REPLACE FUNCTION set_initial_score_trigger() RETURNS TRIGGER AS $$ BEGIN - NEW.score := min_score(); + NEW.score := public.min_score(); RETURN NEW; END -$$ LANGUAGE plpgsql; +$$ LANGUAGE plpgsql SET search_path = ''; CREATE TRIGGER queue_set_initial_score BEFORE INSERT @@ -80,6 +80,7 @@ CREATE TABLE contribution_status( ALTER TABLE contribution_status ENABLE ROW LEVEL SECURITY; ALTER TABLE contribution_status ADD FOREIGN KEY (id) REFERENCES queue(id); CREATE UNIQUE INDEX idx_contribution_status_id_expire ON contribution_status(id, expire); +CREATE UNIQUE INDEX idx_contribution_status_id_started ON contribution_status(id, started); CREATE POLICY view_all ON contribution_status @@ -89,6 +90,7 @@ CREATE POLICY view_all true ); + ---------------------------- -- Contribution Submitted -- ---------------------------- @@ -103,7 +105,7 @@ ALTER TABLE contribution_submitted ADD FOREIGN KEY (id) REFERENCES contribution_ ALTER TABLE contribution_submitted ADD FOREIGN KEY (object_id) REFERENCES storage.objects(id); CREATE INDEX idx_contribution_submitted_object ON contribution_submitted(object_id); -CREATE INDEX idx_contribution_submitted_id_created_at ON contribution_submitted(id, created_at); +CREATE UNIQUE INDEX idx_contribution_submitted_id_created_at ON contribution_submitted(id, created_at); CREATE POLICY view_all ON contribution_submitted @@ -127,6 +129,7 @@ ALTER TABLE contribution ENABLE ROW LEVEL SECURITY; ALTER TABLE contribution ADD FOREIGN KEY (id) REFERENCES contribution_submitted(id); CREATE UNIQUE INDEX idx_contribution_seq ON contribution(seq); CREATE UNIQUE INDEX idx_contribution_seq_success ON contribution(success, seq); +CREATE UNIQUE INDEX idx_contribution_id_created_at ON contribution(id, created_at); CREATE POLICY view_all ON contribution @@ -139,10 +142,10 @@ CREATE POLICY view_all -- The next contributor is the one with the higest score that didn't contribute yet. CREATE OR REPLACE FUNCTION set_next_contributor_trigger() RETURNS TRIGGER AS $$ BEGIN - CALL set_next_contributor(); + CALL public.set_next_contributor(); RETURN NEW; END -$$ LANGUAGE plpgsql; +$$ LANGUAGE plpgsql SET search_path = ''; -- Rotate the current contributor whenever a contribution is done. CREATE TRIGGER contribution_added @@ -151,6 +154,22 @@ ON contribution FOR EACH ROW EXECUTE FUNCTION set_next_contributor_trigger(); +CREATE OR REPLACE VIEW current_verification_average AS ( + SELECT AVG(c.created_at - cs.created_at) AS verification_average + FROM contribution c + INNER JOIN contribution_submitted cs ON (c.id = cs.id) +); + +ALTER VIEW current_verification_average SET (security_invoker = on); + +CREATE OR REPLACE VIEW current_contribution_average AS ( + SELECT AVG(cs.created_at - c.started) AS contribution_average + FROM contribution_status c + INNER JOIN contribution_submitted cs ON (c.id = cs.id) +); + +ALTER VIEW current_contribution_average SET (security_invoker = on); + -- Current contributor is the highest score in the queue with the contribution -- not done yet and it's status expired without payload submitted. CREATE OR REPLACE VIEW current_contributor_id AS @@ -207,34 +226,34 @@ ALTER VIEW current_payload_id SET (security_invoker = on); CREATE OR REPLACE PROCEDURE set_next_contributor() AS $$ BEGIN - IF (SELECT COUNT(*) FROM current_contributor_id) = 0 THEN - INSERT INTO contribution_status(id) SELECT q.id FROM queue q WHERE q.score = ( + IF (SELECT COUNT(*) FROM public.current_contributor_id) = 0 THEN + INSERT INTO public.contribution_status(id) SELECT q.id FROM public.queue q WHERE q.score = ( SELECT MAX(qq.score) - FROM queue qq + FROM public.queue qq WHERE NOT EXISTS ( - SELECT c.id FROM contribution c WHERE c.id = qq.id + SELECT c.id FROM public.contribution c WHERE c.id = qq.id ) AND NOT EXISTS ( - SELECT cs.expire FROM contribution_status cs WHERE cs.id = qq.id AND cs.expire < now() + SELECT cs.expire FROM public.contribution_status cs WHERE cs.id = qq.id AND cs.expire < now() ) ); END IF; END -$$ LANGUAGE plpgsql; +$$ LANGUAGE plpgsql SET search_path = ''; CREATE OR REPLACE FUNCTION can_upload(name varchar) RETURNS BOOLEAN AS $$ BEGIN RETURN ( -- User must be the current contributor. - (SELECT cci.id FROM current_contributor_id cci) = auth.uid() + (SELECT cci.id FROM public.current_contributor_id cci) = auth.uid() AND -- User is only allowed to submit the expected payload. - storage.filename(name) = (SELECT q.payload_id::text FROM queue q WHERE q.id = auth.uid()) + storage.filename(name) = (SELECT q.payload_id::text FROM public.queue q WHERE q.id = auth.uid()) AND -- Do not allow the user to interact with the file after its been submitted. - NOT EXISTS (SELECT * FROM contribution_submitted cs WHERE cs.id = auth.uid()) + NOT EXISTS (SELECT * FROM public.contribution_submitted cs WHERE cs.id = auth.uid()) ); END -$$ LANGUAGE plpgsql; +$$ LANGUAGE plpgsql SET search_path = ''; CREATE POLICY allow_authenticated_contributor_upload_insert ON storage.objects @@ -258,16 +277,16 @@ CREATE OR REPLACE FUNCTION can_download(name varchar) RETURNS BOOLEAN AS $$ BEGIN RETURN ( -- User must be the current contributor. - (SELECT cci.id FROM current_contributor_id cci) = auth.uid() + (SELECT cci.id FROM public.current_contributor_id cci) = auth.uid() AND -- User is only allowed to download the last verified contribution. - storage.filename(name) = (SELECT cpi.payload_id::text FROM current_payload_id cpi) + storage.filename(name) = (SELECT cpi.payload_id::text FROM public.current_payload_id cpi) AND -- Do not allow the user to interact with the file after its contribution has been submitted. - NOT EXISTS (SELECT * FROM contribution_submitted cs WHERE cs.id = auth.uid()) + NOT EXISTS (SELECT * FROM public.contribution_submitted cs WHERE cs.id = auth.uid()) ); END -$$ LANGUAGE plpgsql; +$$ LANGUAGE plpgsql SET search_path = ''; CREATE POLICY allow_authenticated_contributor_download ON storage.objects @@ -281,16 +300,16 @@ CREATE POLICY allow_authenticated_contributor_download CREATE OR REPLACE PROCEDURE set_contribution_submitted(queue_id uuid, object_id uuid) AS $$ BEGIN - INSERT INTO contribution_submitted(id, object_id) VALUES(queue_id, object_id); + INSERT INTO public.contribution_submitted(id, object_id) VALUES(queue_id, object_id); END -$$ LANGUAGE plpgsql; +$$ LANGUAGE plpgsql SET search_path = ''; -- Phase 2 contribution payload is constant size CREATE OR REPLACE FUNCTION expected_payload_size() RETURNS INTEGER AS $$ BEGIN RETURN 306032532; END -$$ LANGUAGE plpgsql; +$$ LANGUAGE plpgsql SET search_path = ''; -- Metadata pushed on upload. -- { @@ -310,15 +329,15 @@ BEGIN IF (NEW.metadata IS NOT NULL) THEN file_size := (NEW.metadata->>'size')::integer; CASE - WHEN file_size = expected_payload_size() - THEN CALL set_contribution_submitted(uuid(NEW.owner_id), NEW.id); + WHEN file_size = public.expected_payload_size() + THEN CALL public.set_contribution_submitted(uuid(NEW.owner_id), NEW.id); ELSE RAISE EXCEPTION 'invalid file size, name: %, got: %, expected: %, meta: %', NEW.name, file_size, expected_payload_size(), NEW.metadata; END CASE; END IF; RETURN NEW; END -$$ LANGUAGE plpgsql; +$$ LANGUAGE plpgsql SET search_path = ''; -- Rotate the current contributor whenever a contribution is done. CREATE TRIGGER contribution_payload_uploaded @@ -336,7 +355,7 @@ BEGIN INSERT INTO public.queue(id) VALUES(NEW.id); RETURN NEW; END -$$ LANGUAGE plpgsql SET search_path = ''; +$$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = ''; CREATE TRIGGER user_join_queue AFTER INSERT From e4ef2bac4af796f0f6df3773c5b9fd1ef6e427dc Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Thu, 12 Sep 2024 14:10:34 +0200 Subject: [PATCH 19/52] feat(mpc): waitlist & redeemable code --- mpc/coordinator/database.sql | 71 ++++++++++++++++++++++++++++-------- 1 file changed, 56 insertions(+), 15 deletions(-) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 55f116f573..d54cae79f0 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -3,14 +3,35 @@ BEGIN; -- Default bucket for contributions upload INSERT INTO storage.buckets(id, name, public) VALUES('contributions', 'contributions', false); +CREATE TABLE waitlist( + id UUID PRIMARY KEY +); + +ALTER TABLE waitlist ENABLE ROW LEVEL SECURITY; +ALTER TABLE waitlist ADD FOREIGN KEY (id) REFERENCES auth.users(id); + +CREATE POLICY allow_insert_self + ON waitlist + FOR INSERT + TO authenticated + WITH CHECK ( + (SELECT auth.uid()) = id + ); + ----------- -- Queue -- ----------- +CREATE OR REPLACE FUNCTION open_to_public() RETURNS boolean AS $$ +BEGIN + RETURN false; +END +$$ LANGUAGE plpgsql SET search_path = ''; + CREATE TABLE queue ( id uuid PRIMARY KEY, payload_id uuid NOT NULL DEFAULT(gen_random_uuid()), joined timestamptz NOT NULL DEFAULT (now()), - score integer NOT NULL + score INTEGER NOT NULL ); ALTER TABLE queue ENABLE ROW LEVEL SECURITY; @@ -27,6 +48,13 @@ CREATE POLICY view_all true ); +CREATE POLICY allow_insert_self_if_open + ON queue + FOR INSERT + TO authenticated + WITH CHECK ( + (SELECT auth.uid()) = id AND open_to_public() + ); CREATE OR REPLACE VIEW current_queue AS ( @@ -62,6 +90,33 @@ ON queue FOR EACH ROW EXECUTE FUNCTION set_initial_score_trigger(); +----------- +-- Code -- +----------- +CREATE TABLE code ( + id text PRIMARY KEY, + user_id uuid DEFAULT NULL +); + +ALTER TABLE code ENABLE ROW LEVEL SECURITY; +ALTER TABLE code ADD FOREIGN KEY (user_id) REFERENCES auth.users(id); +CREATE UNIQUE INDEX idx_code_user_id ON code(user_id); + +CREATE OR REPLACE FUNCTION redeem(code_id text) RETURNS void AS $$ + DECLARE + redeemed_code public.code%ROWTYPE := NULL; +BEGIN + UPDATE public.code c + SET id = (SELECT auth.uid()) + WHERE c.id = code_id + RETURNING * INTO redeemed_code; + IF (redeemed_code IS NULL) THEN + RAISE EXCEPTION 'redeem_code_invalid'; + END IF; + INSERT INTO public.queue(id) VALUES ((SELECT auth.uid())); +END +$$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = ''; + ------------------------- -- Contribution Status -- ------------------------- @@ -349,18 +404,4 @@ EXECUTE FUNCTION set_contribution_submitted_trigger(); -- Will rotate the current contributor if the slot expired without any contribution submitted SELECT cron.schedule('update-contributor', '10 seconds', 'CALL set_next_contributor()'); --- Automatically join the queue -CREATE OR REPLACE FUNCTION user_join_queue() RETURNS TRIGGER AS $$ -BEGIN - INSERT INTO public.queue(id) VALUES(NEW.id); - RETURN NEW; -END -$$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = ''; - -CREATE TRIGGER user_join_queue - AFTER INSERT - ON auth.users - FOR EACH ROW - EXECUTE FUNCTION user_join_queue(); - COMMIT; From 55bf85d06a19a5605360aaccc1730cee2f83e528 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Thu, 12 Sep 2024 20:38:10 +0200 Subject: [PATCH 20/52] feat(mpc): join queue facility --- mpc/coordinator/database.sql | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index d54cae79f0..68bce68692 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -48,14 +48,6 @@ CREATE POLICY view_all true ); -CREATE POLICY allow_insert_self_if_open - ON queue - FOR INSERT - TO authenticated - WITH CHECK ( - (SELECT auth.uid()) = id AND open_to_public() - ); - CREATE OR REPLACE VIEW current_queue AS ( SELECT *, (SELECT COUNT(*) FROM queue qq @@ -107,8 +99,9 @@ CREATE OR REPLACE FUNCTION redeem(code_id text) RETURNS void AS $$ redeemed_code public.code%ROWTYPE := NULL; BEGIN UPDATE public.code c - SET id = (SELECT auth.uid()) - WHERE c.id = code_id + SET user_id = (SELECT auth.uid()) + WHERE c.id = encode(sha256(code_id::bytea), 'hex') + AND c.user_id IS NULL RETURNING * INTO redeemed_code; IF (redeemed_code IS NULL) THEN RAISE EXCEPTION 'redeem_code_invalid'; @@ -117,6 +110,20 @@ BEGIN END $$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = ''; +CREATE OR REPLACE FUNCTION join_queue(code_id text) RETURNS void AS $$ +BEGIN + IF (code_id IS NULL) THEN + IF (public.open_to_public()) THEN + INSERT INTO public.queue(id) VALUES ((SELECT auth.uid())); + ELSE + RAISE EXCEPTION 'not_open_yet'; + END IF; + ELSE + PERFORM public.redeem(code_id); + END IF; +END +$$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = ''; + ------------------------- -- Contribution Status -- ------------------------- From 4377f5317fe4930eee22f24b7bc5a579b993c694 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Thu, 12 Sep 2024 22:09:51 +0200 Subject: [PATCH 21/52] feat(mpc): open with mutex --- mpc/client/src/main.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index cac88e0800..0b31cd66ab 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -13,7 +13,7 @@ use std::{ time::{Duration, Instant, UNIX_EPOCH}, }; -use async_sqlite::{JournalMode, PoolBuilder}; +use async_sqlite::{rusqlite::OpenFlags, JournalMode, PoolBuilder}; use base64::{prelude::BASE64_STANDARD, Engine}; use crossterm::event; use http_body_util::{BodyExt, Full}; @@ -136,6 +136,12 @@ async fn contribute( }; let pool = PoolBuilder::new() .path("db.sqlite3") + .flags( + OpenFlags::SQLITE_OPEN_READ_WRITE + | OpenFlags::SQLITE_OPEN_CREATE + | OpenFlags::SQLITE_OPEN_FULL_MUTEX + | OpenFlags::SQLITE_OPEN_URI, + ) .journal_mode(JournalMode::Wal) .open() .await?; @@ -153,7 +159,7 @@ async fn contribute( .await?; let mut upload_location = pool .conn(move |conn| { - let mut stmt = conn.prepare_cached( + let mut stmt = conn.prepare( "SELECT location FROM resumable_upload WHERE expire > unixepoch() LIMIT 1", )?; let mut rows = stmt.query(())?; @@ -213,7 +219,7 @@ async fn contribute( let expire_timestamp = expire.duration_since(UNIX_EPOCH)?.as_secs(); let location_clone = location.clone(); pool.conn(move |conn| { - let mut stmt = conn.prepare_cached( + let mut stmt = conn.prepare( "INSERT INTO resumable_upload (location, expire) VALUES (?, ?)", )?; let r = stmt.execute((location_clone, expire_timestamp))?; @@ -439,7 +445,6 @@ async fn main() -> Result<(), DynError> { let fut = graceful.watch(conn); tokio::task::spawn(async move { if let Err(err) = fut.await { - eprintln!("error serving connection: {:?}", err); } }); } From 7a287039ab7c37031da9802404f7137a8ecc40b3 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Thu, 12 Sep 2024 23:07:43 +0200 Subject: [PATCH 22/52] feat(mpc): upgrade goPkgs --- mpc/client/src/main.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index 0b31cd66ab..61944d0708 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -444,8 +444,7 @@ async fn main() -> Result<(), DynError> { ); let fut = graceful.watch(conn); tokio::task::spawn(async move { - if let Err(err) = fut.await { - } + let _ = fut.await; }); } _ = token_clone.cancelled() => { From 718288596441ad1a39fa134162ebd2ce0ba7d83d Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Fri, 13 Sep 2024 22:15:26 +0200 Subject: [PATCH 23/52] feat(mpc): attest contrib using gpg --- mpc/client/Cargo.toml | 38 ++++++------ mpc/client/src/main.rs | 84 +++++++++++++++++++++++++-- mpc/client/src/types.rs | 24 +++++++- mpc/coordinator/Cargo.toml | 2 + mpc/coordinator/database.sql | 109 ++++++++++++++++++++++++++++------- mpc/coordinator/src/main.rs | 56 +++++++++++++++++- mpc/shared/Cargo.toml | 13 +++-- mpc/shared/src/lib.rs | 6 +- mpc/shared/src/supabase.rs | 50 +++++++++++++++- mpc/shared/src/types.rs | 7 +++ 10 files changed, 329 insertions(+), 60 deletions(-) diff --git a/mpc/client/Cargo.toml b/mpc/client/Cargo.toml index 4e20e5aa8a..ccb9d7127d 100644 --- a/mpc/client/Cargo.toml +++ b/mpc/client/Cargo.toml @@ -4,22 +4,24 @@ name = "mpc-client" version = "0.1.0" [dependencies] -async-sqlite = "0.2.2" -base64 = { workspace = true } -clap = { version = "4.5", features = ["derive"] } -crossterm = "0.27.0" -http-body-util = "0.1" -httpdate = "1.0" -hyper = { version = "1", features = ["full"] } -hyper-util = { version = "0.1", features = ["full"] } -mpc-shared = { workspace = true } -postgrest = "1.0" -rand = "0.8.5" -ratatui = "0.27.0" +async-sqlite = "0.2.2" +base64 = { workspace = true } +clap = { version = "4.5", features = ["derive"] } +crossterm = "0.27.0" +hex = { workspace = true } +http-body-util = "0.1" +httpdate = "1.0" +hyper = { version = "1", features = ["full"] } +hyper-util = { version = "0.1", features = ["full"] } +mpc-shared = { workspace = true } +pgp = "0.13" +postgrest = "1.0" +rand = "0.8.5" +ratatui = "0.27.0" +reqwest = { workspace = true, features = ["json"] } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +thiserror = { workspace = true } throbber-widgets-tui = "0.6" -reqwest = { workspace = true, features = ["json"] } -serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true } -thiserror = { workspace = true } -tokio = { workspace = true, features = ["full"] } -tokio-util = "0.7" \ No newline at end of file +tokio = { workspace = true, features = ["full"] } +tokio-util = "0.7" diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index 61944d0708..91eef09b9f 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -15,7 +15,7 @@ use std::{ use async_sqlite::{rusqlite::OpenFlags, JournalMode, PoolBuilder}; use base64::{prelude::BASE64_STANDARD, Engine}; -use crossterm::event; +use crossterm::{cursor::Show, event, execute}; use http_body_util::{BodyExt, Full}; use httpdate::parse_http_date; use hyper::{ @@ -24,7 +24,13 @@ use hyper::{ Method, }; use hyper_util::{rt::TokioIo, server::graceful::GracefulShutdown}; -use mpc_shared::{phase2_contribute, supabase::SupabaseMPCApi, CONTRIBUTION_SIZE}; +use mpc_shared::{phase2_contribute, signed_message, supabase::SupabaseMPCApi, CONTRIBUTION_SIZE}; +use pgp::{ + cleartext::CleartextSignedMessage, + crypto::{hash::HashAlgorithm, sym::SymmetricKeyAlgorithm}, + types::SecretKeyTrait, + ArmorOptions, Deserializable, KeyType, SecretKeyParamsBuilder, SignedSecretKey, +}; use ratatui::{backend::CrosstermBackend, Terminal, Viewport}; use reqwest::header::LOCATION; use serde::Deserialize; @@ -40,6 +46,8 @@ use types::Status; const ENDPOINT: &str = "/contribute"; +const CONTRIB_SK_PATH: &str = "contrib_key.sk.asc"; + #[derive(PartialEq, Eq, Debug, Clone, Deserialize)] #[serde(rename_all = "camelCase")] struct Contribute { @@ -49,6 +57,7 @@ struct Contribute { api_key: String, contributor_id: String, payload_id: String, + user_email: Option, } #[derive(thiserror::Error, Debug, Clone)] @@ -71,6 +80,25 @@ type BoxBody = http_body_util::combinators::BoxBody; type DynError = Box; +fn generate_pgp_key(email: String) -> SignedSecretKey { + let mut key_params = SecretKeyParamsBuilder::default(); + key_params + .key_type(KeyType::EdDSA) + .can_certify(false) + .can_sign(true) + .can_encrypt(false) + .primary_user_id(email) + .preferred_symmetric_algorithms( + [SymmetricKeyAlgorithm::AES256].to_vec().try_into().unwrap(), + ) + .preferred_hash_algorithms([HashAlgorithm::None].to_vec().try_into().unwrap()); + let secret_key_params = key_params.build().expect("impossible"); + let secret_key = secret_key_params.generate().expect("impossible"); + let passwd_fn = || String::new(); + let signed_secret_key = secret_key.sign(passwd_fn).expect("impossible"); + signed_secret_key +} + async fn contribute( tx_status: Sender, Contribute { @@ -80,8 +108,26 @@ async fn contribute( api_key, contributor_id, payload_id, + user_email, }: Contribute, ) -> Result<(), DynError> { + let mut secret_key = if let Ok(_) = tokio::fs::metadata(CONTRIB_SK_PATH).await { + SignedSecretKey::from_armor_single::<&[u8]>( + tokio::fs::read(CONTRIB_SK_PATH).await?.as_ref(), + ) + .expect("impossible") + .0 + } else { + let secret_key = generate_pgp_key(user_email.unwrap_or("placeholder@test.com".into())); + tokio::fs::write( + CONTRIB_SK_PATH, + secret_key + .to_armored_bytes(ArmorOptions::default()) + .expect("impossible"), + ) + .await?; + secret_key + }; let client = SupabaseMPCApi::new(supabase_project.clone(), api_key, jwt); let current_contributor = client .current_contributor() @@ -134,6 +180,32 @@ async fn contribute( tokio::fs::write(&payload_id, &phase2_contribution).await?; phase2_contribution }; + + // ------------------------ + // Sign and submits the sig + // Gnark phase2 contribution appends the sha256 hash at the end + let phase2_contribution_hash = &phase2_contribution[phase2_contribution.len() - 32..]; + let signature = CleartextSignedMessage::sign( + &signed_message(&payload_id, &hex::encode(phase2_contribution_hash)), + &mut secret_key, + || String::new(), + ) + .expect("impossible"); + let public_key = secret_key + .public_key() + .sign(&secret_key, || String::new()) + .expect("impossible") + .to_armored_bytes(ArmorOptions::default()) + .expect("impossible"); + client + .insert_contribution_signature( + current_contributor.id, + public_key, + signature + .to_armored_bytes(ArmorOptions::default()) + .expect("impossible"), + ) + .await?; let pool = PoolBuilder::new() .path("db.sqlite3") .flags( @@ -219,9 +291,8 @@ async fn contribute( let expire_timestamp = expire.duration_since(UNIX_EPOCH)?.as_secs(); let location_clone = location.clone(); pool.conn(move |conn| { - let mut stmt = conn.prepare( - "INSERT INTO resumable_upload (location, expire) VALUES (?, ?)", - )?; + let mut stmt = + conn.prepare("INSERT INTO resumable_upload (location, expire) VALUES (?, ?)")?; let r = stmt.execute((location_clone, expire_timestamp))?; assert!(r == 1); Ok(()) @@ -467,8 +538,9 @@ async fn main() -> Result<(), DynError> { )?; input_and_status_handling(status, rx_status, tx_ui).await; ui::run_ui(&mut terminal, rx_ui).await?; - crossterm::terminal::disable_raw_mode()?; terminal.clear()?; + crossterm::terminal::disable_raw_mode()?; + let _ = execute!(io::stdout(), Show); token.cancel(); handle.await.expect("impossible"); std::process::exit(0); diff --git a/mpc/client/src/types.rs b/mpc/client/src/types.rs index c224eb7452..cb7d3073ed 100644 --- a/mpc/client/src/types.rs +++ b/mpc/client/src/types.rs @@ -1,7 +1,6 @@ use serde::Serialize; -#[derive(PartialEq, Eq, Debug, Clone, Serialize)] -#[serde(rename_all = "camelCase")] +#[derive(PartialEq, Eq, Debug, Clone)] pub enum Status { Idle, Initializing, @@ -15,3 +14,24 @@ pub enum Status { Failed(String), Successful, } + +impl Serialize for Status { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(match self { + Status::Idle => "idle", + Status::Initializing => "initializing", + Status::DownloadStarted(_) => "downloadStarted", + Status::Downloading(_, _) => "downloading", + Status::DownloadEnded(_) => "downloadEnded", + Status::ContributionStarted => "contributionStarted", + Status::ContributionEnded => "contributionEnded", + Status::UploadStarted(_) => "uploadStarted", + Status::UploadEnded(_) => "uploadEnded", + Status::Failed(_) => "failed", + Status::Successful => "successful", + }) + } +} diff --git a/mpc/coordinator/Cargo.toml b/mpc/coordinator/Cargo.toml index 55eb53abe0..50c8f5cd51 100644 --- a/mpc/coordinator/Cargo.toml +++ b/mpc/coordinator/Cargo.toml @@ -7,7 +7,9 @@ version = "0.1.0" async-sqlite = "0.2.2" clap = { version = "4.5", features = ["derive"] } futures.workspace = true +hex = { workspace = true } mpc-shared = { workspace = true } +pgp = "0.13" postgrest = "1.0" reqwest = { workspace = true, features = ["json"] } serde_json = { workspace = true } diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 68bce68692..78a82cd97b 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -3,6 +3,30 @@ BEGIN; -- Default bucket for contributions upload INSERT INTO storage.buckets(id, name, public) VALUES('contributions', 'contributions', false); +CREATE TABLE wallet_address( + id UUID PRIMARY KEY, + wallet TEXT NOT NULL +); + +ALTER TABLE wallet_address ENABLE ROW LEVEL SECURITY; +ALTER TABLE wallet_address ADD FOREIGN KEY (id) REFERENCES auth.users(id); + +CREATE POLICY view_self + ON wallet_address + FOR SELECT + TO authenticated + USING ( + (SELECT auth.uid()) = id + ); + +CREATE POLICY allow_insert_self + ON wallet_address + FOR INSERT + TO authenticated + WITH CHECK ( + (SELECT auth.uid()) = id + ); + CREATE TABLE waitlist( id UUID PRIMARY KEY ); @@ -10,6 +34,14 @@ CREATE TABLE waitlist( ALTER TABLE waitlist ENABLE ROW LEVEL SECURITY; ALTER TABLE waitlist ADD FOREIGN KEY (id) REFERENCES auth.users(id); +CREATE POLICY view_self + ON waitlist + FOR SELECT + TO authenticated + USING ( + (SELECT auth.uid()) = id + ); + CREATE POLICY allow_insert_self ON waitlist FOR INSERT @@ -235,19 +267,17 @@ ALTER VIEW current_contribution_average SET (security_invoker = on); -- Current contributor is the highest score in the queue with the contribution -- not done yet and it's status expired without payload submitted. CREATE OR REPLACE VIEW current_contributor_id AS - SELECT q.id - FROM queue q - WHERE q.score = ( - SELECT MAX(qq.score) - FROM queue qq - WHERE NOT EXISTS ( - SELECT c.id FROM contribution c WHERE c.id = qq.id - ) AND ( - EXISTS (SELECT cs.expire FROM contribution_status cs WHERE cs.id = qq.id AND cs.expire > now()) - OR - EXISTS (SELECT cs.id FROM contribution_submitted cs WHERE cs.id = qq.id) - ) - ); + SELECT qq.id + FROM queue qq + WHERE NOT EXISTS ( + SELECT c.id FROM contribution c WHERE c.id = qq.id + ) AND ( + EXISTS (SELECT cs.expire FROM contribution_status cs WHERE cs.id = qq.id AND cs.expire > now()) + OR + EXISTS (SELECT cs.id FROM contribution_submitted cs WHERE cs.id = qq.id) + ) + ORDER BY qq.score DESC + LIMIT 1; ALTER VIEW current_contributor_id SET (security_invoker = on); @@ -289,15 +319,9 @@ ALTER VIEW current_payload_id SET (security_invoker = on); CREATE OR REPLACE PROCEDURE set_next_contributor() AS $$ BEGIN IF (SELECT COUNT(*) FROM public.current_contributor_id) = 0 THEN - INSERT INTO public.contribution_status(id) SELECT q.id FROM public.queue q WHERE q.score = ( - SELECT MAX(qq.score) - FROM public.queue qq - WHERE NOT EXISTS ( - SELECT c.id FROM public.contribution c WHERE c.id = qq.id - ) AND NOT EXISTS ( - SELECT cs.expire FROM public.contribution_status cs WHERE cs.id = qq.id AND cs.expire < now() - ) - ); + INSERT INTO public.contribution_status(id) + SELECT cq.id + FROM public.current_queue cq LIMIT 1; END IF; END $$ LANGUAGE plpgsql SET search_path = ''; @@ -408,6 +432,47 @@ ON storage.objects FOR EACH ROW EXECUTE FUNCTION set_contribution_submitted_trigger(); +----------------- +-- Attestation -- +----------------- +CREATE TABLE contribution_signature( + id uuid PRIMARY KEY, + public_key text NOT NULL, + signature text NOT NULL +); + +ALTER TABLE contribution_signature ENABLE ROW LEVEL SECURITY; +ALTER TABLE contribution_signature ADD FOREIGN KEY (id) REFERENCES contribution_status(id); + +CREATE POLICY view_self + ON contribution_signature + FOR SELECT + TO authenticated + USING ( + (SELECT auth.uid()) = id + ); + +CREATE POLICY allow_insert_self + ON contribution_signature + FOR INSERT + TO authenticated + WITH CHECK ( + (SELECT auth.uid()) = id + ); + + + +CREATE OR REPLACE VIEW current_user_state AS ( + SELECT (EXISTS (SELECT * FROM waitlist WHERE id = (SELECT auth.id)) AS in_waitlist +); + +ALTER VIEW current_user_state SET (security_invoker = off); + + +---------- +-- CRON -- +---------- + -- Will rotate the current contributor if the slot expired without any contribution submitted SELECT cron.schedule('update-contributor', '10 seconds', 'CALL set_next_contributor()'); diff --git a/mpc/coordinator/src/main.rs b/mpc/coordinator/src/main.rs index e5d4ad2466..c0c24ca251 100644 --- a/mpc/coordinator/src/main.rs +++ b/mpc/coordinator/src/main.rs @@ -1,5 +1,6 @@ use clap::{Parser, Subcommand}; -use mpc_shared::{phase2_verify, supabase::SupabaseMPCApi}; +use mpc_shared::{phase2_verify, signed_message, supabase::SupabaseMPCApi}; +use pgp::{cleartext::CleartextSignedMessage, Deserializable, SignedPublicKey}; #[derive(Parser, Debug)] #[command(version, about, long_about = None)] @@ -26,6 +27,8 @@ enum Error { CurrentPayloadNotFound, #[error("next payload not found.")] NextPayloadNotFound, + #[error("contributor signature not found")] + ContributorSignatureNotFound, } #[tokio::main] @@ -71,11 +74,58 @@ async fn main() -> Result<(), Box> { .contributor_payload(¤t_contributor.id) .await? .ok_or(Error::NextPayloadNotFound)?; - let payload_next = client + let next_payload = client .download_payload(&next_payload.id, &next_payload.id, progress) .await?; + println!("verifying signature..."); + let contribution_signature = client + .contributor_signature(¤t_contributor.id) + .await? + .ok_or(Error::ContributorSignatureNotFound)?; + let signed_public_key = SignedPublicKey::from_armor_single::<&[u8]>( + hex::decode(contribution_signature.public_key) + .expect("impossible") + .as_ref(), + ) + .expect("impossible") + .0; + + // Last bytes are the sh256 of the whole contrib + let next_payload_hash = &next_payload[&next_payload.len() - 32..]; + + let public_key_is_valid = signed_public_key.verify().is_ok(); + if !public_key_is_valid { + println!("public key is invalid"); + } + + let raw_signature = + hex::decode(&contribution_signature.signature).expect("impossible"); + let signature = CleartextSignedMessage::from_armor::<&[u8]>(raw_signature.as_ref()) + .expect("impossible") + .0; + + let signature_matches = signature.signed_text() + == signed_message(¤t_payload.id, &hex::encode(next_payload_hash)); + if !signature_matches { + println!("signature signed text mismatch"); + } + + let signature_is_valid = signature.verify(&signed_public_key).is_ok(); + if !signature_is_valid { + println!("contribution signature is invalid"); + } + println!("verifying payload..."); - if phase2_verify(&payload_current, &payload_next).is_ok() { + let contribution_is_valid = phase2_verify(&payload_current, &next_payload).is_ok(); + if !contribution_is_valid { + println!("contribution is invalid"); + } + + if public_key_is_valid + && signature_matches + && signature_is_valid + && contribution_is_valid + { println!("verification succeeded."); client .insert_contribution(current_contributor.id.clone(), true) diff --git a/mpc/shared/Cargo.toml b/mpc/shared/Cargo.toml index c642afe909..c3da4337b0 100644 --- a/mpc/shared/Cargo.toml +++ b/mpc/shared/Cargo.toml @@ -4,9 +4,10 @@ name = "mpc-shared" version = "0.1.0" [dependencies] -serde = { workspace = true, features = ["derive"] } -thiserror = { workspace = true } -postgrest = "1.0" -reqwest = { workspace = true, features = ["json"] } -serde_json = { workspace = true } -tokio = { workspace = true, features = ["full"] } \ No newline at end of file +hex = { workspace = true, features = ["alloc"] } +postgrest = "1.0" +reqwest = { workspace = true, features = ["json"] } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["full"] } diff --git a/mpc/shared/src/lib.rs b/mpc/shared/src/lib.rs index c3618fb265..f7fa9f19db 100644 --- a/mpc/shared/src/lib.rs +++ b/mpc/shared/src/lib.rs @@ -1,5 +1,5 @@ -pub mod types; pub mod supabase; +pub mod types; use std::ffi::{c_char, c_int}; @@ -83,3 +83,7 @@ pub fn phase2_verify( } } } + +pub fn signed_message(payload_id: &str, payload_hash: &str) -> String { + format!("0______0 - {} - {}", payload_id, payload_hash) +} diff --git a/mpc/shared/src/supabase.rs b/mpc/shared/src/supabase.rs index 4996a7353a..d898d1aa3a 100644 --- a/mpc/shared/src/supabase.rs +++ b/mpc/shared/src/supabase.rs @@ -3,12 +3,12 @@ use std::{future::Future, io::SeekFrom, str::FromStr}; use postgrest::Postgrest; use reqwest::{ header::{HeaderMap, HeaderValue, AUTHORIZATION, CONTENT_LENGTH, RANGE}, - ClientBuilder, + ClientBuilder, StatusCode, }; use tokio::io::{AsyncSeekExt, AsyncWriteExt}; use crate::{ - types::{Contribution, ContributorId, PayloadId}, + types::{Contribution, ContributionSignature, ContributorId, PayloadId}, CONTRIBUTION_SIZE, }; @@ -133,6 +133,52 @@ impl SupabaseMPCApi { Ok(()) } + pub async fn insert_contribution_signature( + &self, + contributor_id: String, + public_key: Vec, + signature: Vec, + ) -> Result<(), DynError> { + if let Err(e) = self + .client + .from("contribution_signature") + .insert(serde_json::to_string(&ContributionSignature { + id: contributor_id, + public_key: hex::encode(&public_key), + signature: hex::encode(&signature), + })?) + .execute() + .await? + .error_for_status() + { + // Conflict means we already have an entry. + // If network drops or something we must allow this to happen. + if e.status() == Some(StatusCode::CONFLICT) { + return Ok(()); + } else { + return Err(e.into()); + } + } + Ok(()) + } + + pub async fn contributor_signature( + &self, + contributor_id: &str, + ) -> Result, DynError> { + Ok(self + .client + .from("contribution_signature") + .eq("id", &contributor_id) + .select("*") + .execute() + .await? + .json::>() + .await? + .first() + .cloned()) + } + pub async fn download_payload( &self, payload_id: &str, diff --git a/mpc/shared/src/types.rs b/mpc/shared/src/types.rs index 72bdf1ba1f..67806140a8 100644 --- a/mpc/shared/src/types.rs +++ b/mpc/shared/src/types.rs @@ -16,3 +16,10 @@ pub struct Contribution { pub id: String, pub success: bool, } + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct ContributionSignature { + pub id: String, + pub public_key: String, + pub signature: String, +} From 1bf5a26c3a420c2a1e3dfc45f4b188e48d896cf8 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Fri, 13 Sep 2024 22:16:11 +0200 Subject: [PATCH 24/52] fix(galois): non static after go upgrade --- galoisd/cmd/galoisd/cmd/phase1_init.go | 2 +- galoisd/cmd/galoisd/cmd/phase2_extract.go | 10 +++++----- galoisd/galoisd.nix | 1 - 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/galoisd/cmd/galoisd/cmd/phase1_init.go b/galoisd/cmd/galoisd/cmd/phase1_init.go index bb20d0719f..386a78b3e6 100644 --- a/galoisd/cmd/galoisd/cmd/phase1_init.go +++ b/galoisd/cmd/galoisd/cmd/phase1_init.go @@ -852,7 +852,7 @@ func convertPtauToPhase1(ptau Ptau) (phase1 mpc.Phase1, err error) { } } - phase1 = mpc.InitPhase1(int(ptau.Header.Power)); + phase1 = mpc.InitPhase1(int(ptau.Header.Power)) phase1.Parameters.G1.Tau = tauG1 phase1.Parameters.G1.AlphaTau = alphaTauG1 diff --git a/galoisd/cmd/galoisd/cmd/phase2_extract.go b/galoisd/cmd/galoisd/cmd/phase2_extract.go index d58438b9cf..6366d20f5a 100644 --- a/galoisd/cmd/galoisd/cmd/phase2_extract.go +++ b/galoisd/cmd/galoisd/cmd/phase2_extract.go @@ -18,31 +18,31 @@ func Phase2ExtractCmd() *cobra.Command { var r1cs bn254.R1CS err := readFrom(r1csPath, &r1cs) if err != nil { - return fmt.Errorf("failed to read r1cs: %v", err); + return fmt.Errorf("failed to read r1cs: %v", err) } phase1Path := args[1] var srs1 mpc.Phase1 err = readFrom(phase1Path, &srs1) if err != nil { - return fmt.Errorf("failed to read phase1: %v", err); + return fmt.Errorf("failed to read phase1: %v", err) } phase2Path := args[2] var srs2 mpc.Phase2 err = readFrom(phase2Path, &srs2) if err != nil { - return fmt.Errorf("failed to read phase2: %v", err); + return fmt.Errorf("failed to read phase2: %v", err) } phase2EvalsPath := args[3] var evals mpc.Phase2Evaluations err = readFrom(phase2EvalsPath, &evals) if err != nil { - return fmt.Errorf("failed to read phase2 evals: %v", err); + return fmt.Errorf("failed to read phase2 evals: %v", err) } pk, vk := mpc.ExtractKeys(&r1cs, &srs1, &srs2, &evals) pkOutput := args[4] err = saveTo(pkOutput, &pk) if err != nil { - return fmt.Errorf("failed to write pk: %v", err); + return fmt.Errorf("failed to write pk: %v", err) } vkOutput := args[5] return saveTo(vkOutput, &vk) diff --git a/galoisd/galoisd.nix b/galoisd/galoisd.nix index 0e05f67a6f..bd2ff9300f 100644 --- a/galoisd/galoisd.nix +++ b/galoisd/galoisd.nix @@ -60,7 +60,6 @@ tags = [ "binary" ]; doCheck = true; } // (if pkgs.stdenv.isLinux then { - nativeBuildInputs = [ pkgs.musl ]; CGO_ENABLED = 0; ldflags = [ "-extldflags '-static -L${pkgs.musl}/lib -s -w'" From 3643de0a520473941e88efe212afbbd8fcc4b6f9 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Sun, 15 Sep 2024 17:16:23 +0200 Subject: [PATCH 25/52] feat(mpc): join_queue for waitlist and current_user_state --- mpc/coordinator/database.sql | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 78a82cd97b..ea70a52683 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -148,7 +148,7 @@ BEGIN IF (public.open_to_public()) THEN INSERT INTO public.queue(id) VALUES ((SELECT auth.uid())); ELSE - RAISE EXCEPTION 'not_open_yet'; + INSERT INTO public.waitlist(id) VALUES ((SELECT auth.uid())); END IF; ELSE PERFORM public.redeem(code_id); @@ -460,15 +460,15 @@ CREATE POLICY allow_insert_self (SELECT auth.uid()) = id ); - - CREATE OR REPLACE VIEW current_user_state AS ( - SELECT (EXISTS (SELECT * FROM waitlist WHERE id = (SELECT auth.id)) AS in_waitlist + SELECT + (EXISTS (SELECT * FROM waitlist WHERE id = (SELECT auth.uid()))) AS in_waitlist, + (EXISTS (SELECT * FROM code WHERE user_id = (SELECT auth.uid()))) AS has_redeemed, + (EXISTS (SELECT * FROM queue WHERE id = (SELECT auth.uid()))) AS in_queue ); ALTER VIEW current_user_state SET (security_invoker = off); - ---------- -- CRON -- ---------- From a2984d6f9e8fd839052d35a14cbe01057263d925 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Wed, 18 Sep 2024 15:42:04 +0200 Subject: [PATCH 26/52] feat(mpc): better handling of sqlite and tracing on coordinator --- mpc/client/src/main.rs | 4 +- mpc/coordinator/Cargo.toml | 2 + mpc/coordinator/database.sql | 28 ++++++-- mpc/coordinator/src/main.rs | 130 ++++++++++++++++++++++++++++++----- mpc/shared/src/lib.rs | 4 +- mpc/shared/src/types.rs | 14 ++++ 6 files changed, 154 insertions(+), 28 deletions(-) diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index 91eef09b9f..5da327c1d5 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -186,7 +186,7 @@ async fn contribute( // Gnark phase2 contribution appends the sha256 hash at the end let phase2_contribution_hash = &phase2_contribution[phase2_contribution.len() - 32..]; let signature = CleartextSignedMessage::sign( - &signed_message(&payload_id, &hex::encode(phase2_contribution_hash)), + &signed_message(¤t_payload.id, &payload_id, &hex::encode(phase2_contribution_hash)), &mut secret_key, || String::new(), ) @@ -215,6 +215,7 @@ async fn contribute( | OpenFlags::SQLITE_OPEN_URI, ) .journal_mode(JournalMode::Wal) + .num_conns(1) .open() .await?; pool.conn(|conn| { @@ -350,6 +351,7 @@ async fn contribute( .send(Status::UploadEnded(payload_id.clone())) .expect("impossible"); } + pool.close().await?; Ok(()) } diff --git a/mpc/coordinator/Cargo.toml b/mpc/coordinator/Cargo.toml index 50c8f5cd51..cf5368fd7b 100644 --- a/mpc/coordinator/Cargo.toml +++ b/mpc/coordinator/Cargo.toml @@ -15,3 +15,5 @@ reqwest = { workspace = true, features = ["json"] } serde_json = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } +tracing = { workspace = true } +tracing-subscriber = { workspace = true, features = ["env-filter", "json", "tracing-log"] } \ No newline at end of file diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index ea70a52683..dc76233ef7 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -119,7 +119,8 @@ EXECUTE FUNCTION set_initial_score_trigger(); ----------- CREATE TABLE code ( id text PRIMARY KEY, - user_id uuid DEFAULT NULL + user_id uuid DEFAULT NULL, + display_name text NOT NULL DEFAULT ('John Doe') ); ALTER TABLE code ENABLE ROW LEVEL SECURITY; @@ -127,7 +128,7 @@ ALTER TABLE code ADD FOREIGN KEY (user_id) REFERENCES auth.users(id); CREATE UNIQUE INDEX idx_code_user_id ON code(user_id); CREATE OR REPLACE FUNCTION redeem(code_id text) RETURNS void AS $$ - DECLARE +DECLARE redeemed_code public.code%ROWTYPE := NULL; BEGIN UPDATE public.code c @@ -444,12 +445,12 @@ CREATE TABLE contribution_signature( ALTER TABLE contribution_signature ENABLE ROW LEVEL SECURITY; ALTER TABLE contribution_signature ADD FOREIGN KEY (id) REFERENCES contribution_status(id); -CREATE POLICY view_self +CREATE POLICY view_all ON contribution_signature FOR SELECT TO authenticated USING ( - (SELECT auth.uid()) = id + true ); CREATE POLICY allow_insert_self @@ -462,13 +463,26 @@ CREATE POLICY allow_insert_self CREATE OR REPLACE VIEW current_user_state AS ( SELECT - (EXISTS (SELECT * FROM waitlist WHERE id = (SELECT auth.uid()))) AS in_waitlist, - (EXISTS (SELECT * FROM code WHERE user_id = (SELECT auth.uid()))) AS has_redeemed, - (EXISTS (SELECT * FROM queue WHERE id = (SELECT auth.uid()))) AS in_queue + (EXISTS (SELECT * FROM public.waitlist WHERE id = (SELECT auth.uid()))) AS in_waitlist, + (EXISTS (SELECT * FROM public.code WHERE user_id = (SELECT auth.uid()))) AS has_redeemed, + (EXISTS (SELECT * FROM public.queue WHERE id = (SELECT auth.uid()))) AS in_queue, + (COALESCE((SELECT display_name FROM public.code WHERE user_id = (SELECT auth.uid())), (SELECT raw_user_meta_data->>'name' FROM auth.users u WHERE u.id = (SELECT auth.uid())))) AS display_name ); ALTER VIEW current_user_state SET (security_invoker = off); +CREATE OR REPLACE VIEW users_contribution AS ( + SELECT c.id, u.raw_user_meta_data->>'user_name' AS user_name, u.raw_user_meta_data->>'avatar_url' AS avatar_url, c.seq, q.payload_id, cs.public_key, cs.signature + FROM public.contribution c + INNER JOIN public.queue q ON (c.id = q.id) + INNER JOIN public.contribution_signature cs ON (c.id = cs.id) + INNER JOIN auth.users u ON (c.id = u.id) + WHERE c.success + ORDER BY c.seq ASC +); + +ALTER VIEW users_contribution SET (security_invoker = on); + ---------- -- CRON -- ---------- diff --git a/mpc/coordinator/src/main.rs b/mpc/coordinator/src/main.rs index c0c24ca251..ee3d6e7ec2 100644 --- a/mpc/coordinator/src/main.rs +++ b/mpc/coordinator/src/main.rs @@ -1,14 +1,47 @@ +use std::str::FromStr; + use clap::{Parser, Subcommand}; use mpc_shared::{phase2_verify, signed_message, supabase::SupabaseMPCApi}; use pgp::{cleartext::CleartextSignedMessage, Deserializable, SignedPublicKey}; +use tracing::{debug, error, info, warn}; +use tracing_subscriber::EnvFilter; #[derive(Parser, Debug)] #[command(version, about, long_about = None)] struct Args { + #[arg( + global = true, + short = 'f', + long, + default_value = "json" + )] + log_format: LogFormat, #[command(subcommand)] command: Command, } +#[derive(Debug, Copy, Clone)] +pub enum LogFormat { + Text, + Json, +} + +#[derive(Debug, thiserror::Error)] +#[error("unknown log format {0}")] +pub struct UnknownLogFormatError(String); + +impl FromStr for LogFormat { + type Err = UnknownLogFormatError; + + fn from_str(s: &str) -> std::result::Result { + match s { + "text" => Ok(Self::Text), + "json" => Ok(Self::Json), + s => Err(UnknownLogFormatError(s.to_owned())), + } + } +} + #[derive(Subcommand, Debug, Clone)] enum Command { Start { @@ -34,12 +67,25 @@ enum Error { #[tokio::main] async fn main() -> Result<(), Box> { let args = Args::parse(); + match args.log_format { + LogFormat::Text => { + tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .init(); + } + LogFormat::Json => { + tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .json() + .init(); + } + }; match args.command { Command::Start { url, jwt } => { let client = SupabaseMPCApi::new(url, jwt.clone(), jwt); - let progress = |percent| async move { println!("downloaded: {:.2}%", percent) }; + let progress = |percent| async move { debug!("downloaded: {:.2}%", percent) }; loop { - println!("downloading current payload..."); + info!("downloading current payload..."); let current_payload = client .current_payload() .await? @@ -47,6 +93,7 @@ async fn main() -> Result<(), Box> { let payload_current = client .download_payload(¤t_payload.id, ¤t_payload.id, progress) .await?; + info!(%current_payload, "download current payload."); let current_contributor = { match client .current_contributor() @@ -54,7 +101,7 @@ async fn main() -> Result<(), Box> { .ok_or(Error::ContributorNotFound) { Ok(contributor) => { - println!("awaiting contribution of {}...", &contributor.id); + info!(%contributor, %current_payload, "awaiting contribution of {}...", &contributor.id); if client.contribution_submitted(&contributor.id).await? { contributor } else { @@ -63,27 +110,27 @@ async fn main() -> Result<(), Box> { } } Err(_) => { - println!("awaiting contributor to join queue..."); + warn!("awaiting contributor to join queue..."); tokio::time::sleep(std::time::Duration::from_secs(10)).await; continue; } } }; - println!("detected contribution submission, downloading..."); let next_payload = client .contributor_payload(¤t_contributor.id) .await? .ok_or(Error::NextPayloadNotFound)?; - let next_payload = client + info!(%current_contributor, %current_payload, %next_payload, "dected contribution, downloading..."); + let next_payload_content = client .download_payload(&next_payload.id, &next_payload.id, progress) .await?; - println!("verifying signature..."); + info!(%current_contributor, %current_payload, %next_payload, "verifying signature..."); let contribution_signature = client .contributor_signature(¤t_contributor.id) .await? .ok_or(Error::ContributorSignatureNotFound)?; let signed_public_key = SignedPublicKey::from_armor_single::<&[u8]>( - hex::decode(contribution_signature.public_key) + hex::decode(&contribution_signature.public_key) .expect("impossible") .as_ref(), ) @@ -91,11 +138,18 @@ async fn main() -> Result<(), Box> { .0; // Last bytes are the sh256 of the whole contrib - let next_payload_hash = &next_payload[&next_payload.len() - 32..]; + let next_payload_hash = &next_payload_content[&next_payload_content.len() - 32..]; let public_key_is_valid = signed_public_key.verify().is_ok(); if !public_key_is_valid { - println!("public key is invalid"); + error!( + %current_contributor, + %current_payload, + %next_payload, + %contribution_signature.public_key, + %contribution_signature.signature, + "public key is invalid" + ); } let raw_signature = @@ -105,20 +159,46 @@ async fn main() -> Result<(), Box> { .0; let signature_matches = signature.signed_text() - == signed_message(¤t_payload.id, &hex::encode(next_payload_hash)); + == signed_message( + ¤t_payload.id, + &next_payload.id, + &hex::encode(next_payload_hash), + ); if !signature_matches { - println!("signature signed text mismatch"); + error!( + %current_contributor, + %current_payload, + %next_payload, + %contribution_signature.public_key, + %contribution_signature.signature, + "signature signed text mismatch" + ); } let signature_is_valid = signature.verify(&signed_public_key).is_ok(); if !signature_is_valid { - println!("contribution signature is invalid"); + error!( + %current_contributor, + %current_payload, + %next_payload, + %contribution_signature.public_key, + %contribution_signature.signature, + "contribution signature is invalid" + ); } - println!("verifying payload..."); - let contribution_is_valid = phase2_verify(&payload_current, &next_payload).is_ok(); + info!("verifying payload..."); + let contribution_is_valid = + phase2_verify(&payload_current, &next_payload_content).is_ok(); if !contribution_is_valid { - println!("contribution is invalid"); + error!( + %current_contributor, + %current_payload, + %next_payload, + %contribution_signature.public_key, + %contribution_signature.signature, + "contribution is invalid" + ); } if public_key_is_valid @@ -126,13 +206,27 @@ async fn main() -> Result<(), Box> { && signature_is_valid && contribution_is_valid { - println!("verification succeeded."); + info!( + %current_contributor, + %current_payload, + %next_payload, + %contribution_signature.public_key, + %contribution_signature.signature, + "verification succeeded." + ); client .insert_contribution(current_contributor.id.clone(), true) .await?; tokio::fs::remove_file(¤t_payload.id).await?; } else { - println!("verification failed."); + error!( + %current_contributor, + %current_payload, + %next_payload, + %contribution_signature.public_key, + %contribution_signature.signature, + "verification failed." + ); client .insert_contribution(current_contributor.id.clone(), false) .await?; diff --git a/mpc/shared/src/lib.rs b/mpc/shared/src/lib.rs index f7fa9f19db..b69a463891 100644 --- a/mpc/shared/src/lib.rs +++ b/mpc/shared/src/lib.rs @@ -84,6 +84,6 @@ pub fn phase2_verify( } } -pub fn signed_message(payload_id: &str, payload_hash: &str) -> String { - format!("0______0 - {} - {}", payload_id, payload_hash) +pub fn signed_message(previous_payload_id: &str, next_payload_id: &str, payload_hash: &str) -> String { + format!("0______0 - {} - {} - {}", previous_payload_id, next_payload_id, payload_hash) } diff --git a/mpc/shared/src/types.rs b/mpc/shared/src/types.rs index 67806140a8..ddb899590c 100644 --- a/mpc/shared/src/types.rs +++ b/mpc/shared/src/types.rs @@ -1,3 +1,5 @@ +use std::fmt::Display; + use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -5,12 +7,24 @@ pub struct ContributorId { pub id: String, } +impl Display for ContributorId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.id.fmt(f) + } +} + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct PayloadId { #[serde(rename = "payload_id")] pub id: String, } +impl Display for PayloadId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.id.fmt(f) + } +} + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct Contribution { pub id: String, From b79a156a06267029cb1c98d118c4f5129d0c4ff6 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Wed, 18 Sep 2024 16:01:39 +0200 Subject: [PATCH 27/52] fix: revert changes to app --- app/src/routes/ceremony/+page.svelte | 195 --------------------------- 1 file changed, 195 deletions(-) delete mode 100644 app/src/routes/ceremony/+page.svelte diff --git a/app/src/routes/ceremony/+page.svelte b/app/src/routes/ceremony/+page.svelte deleted file mode 100644 index b1cb587985..0000000000 --- a/app/src/routes/ceremony/+page.svelte +++ /dev/null @@ -1,195 +0,0 @@ - - - - Union | Ceremony - - - -
- {#if queuePosition != null} - Queue position: {queuePosition} - {:else} -
- Queue position: -
- {/if} - - {#each messages as message} -
- {@html message} -
- {/each} -
From 3a08b42e5c0bf12477e777e2ff32efb99e780278 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Wed, 18 Sep 2024 16:01:53 +0200 Subject: [PATCH 28/52] fix(mpc): spelling and better naming --- mpc/coordinator/src/main.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mpc/coordinator/src/main.rs b/mpc/coordinator/src/main.rs index ee3d6e7ec2..d6d672c6b3 100644 --- a/mpc/coordinator/src/main.rs +++ b/mpc/coordinator/src/main.rs @@ -120,7 +120,7 @@ async fn main() -> Result<(), Box> { .contributor_payload(¤t_contributor.id) .await? .ok_or(Error::NextPayloadNotFound)?; - info!(%current_contributor, %current_payload, %next_payload, "dected contribution, downloading..."); + info!(%current_contributor, %current_payload, %next_payload, "detected contribution, downloading..."); let next_payload_content = client .download_payload(&next_payload.id, &next_payload.id, progress) .await?; @@ -158,13 +158,13 @@ async fn main() -> Result<(), Box> { .expect("impossible") .0; - let signature_matches = signature.signed_text() + let signed_text_matches = signature.signed_text() == signed_message( ¤t_payload.id, &next_payload.id, &hex::encode(next_payload_hash), ); - if !signature_matches { + if !signed_text_matches { error!( %current_contributor, %current_payload, @@ -202,7 +202,7 @@ async fn main() -> Result<(), Box> { } if public_key_is_valid - && signature_matches + && signed_text_matches && signature_is_valid && contribution_is_valid { From 32304d1a43729f528cebda2032d5315aabf8d35f Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Wed, 18 Sep 2024 16:39:34 +0200 Subject: [PATCH 29/52] chore: spelling & fmt --- Cargo.toml | 2 +- mpc/client/src/main.rs | 6 +++++- mpc/coordinator/Cargo.toml | 24 ++++++++++++------------ mpc/coordinator/database.sql | 4 ++-- mpc/coordinator/src/main.rs | 7 +------ mpc/shared/src/lib.rs | 11 +++++++++-- 6 files changed, 30 insertions(+), 24 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 4b522d323d..18dd5b3bfa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -226,9 +226,9 @@ ibc-vm-rs = { path = "lib/ibc-vm-rs", default-features = fa ics008-wasm-client = { path = "cosmwasm/ics08-light-clients/interface", default-features = false } ics23 = { path = "lib/ics23", default-features = false } macros = { path = "lib/macros", default-features = false } -mpc-shared = { path = "mpc/shared", default-features = false } move-bindgen = { path = "tools/move-bindgen", default-features = false } move-bindgen-derive = { path = "lib/move-bindgen-derive", default-features = false } +mpc-shared = { path = "mpc/shared", default-features = false } pg-queue = { path = "lib/pg-queue", default-features = false } poseidon-rs = { path = "lib/poseidon-rs", default-features = false } protos = { path = "generated/rust/protos", default-features = false } diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index 5da327c1d5..cd17778a6b 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -186,7 +186,11 @@ async fn contribute( // Gnark phase2 contribution appends the sha256 hash at the end let phase2_contribution_hash = &phase2_contribution[phase2_contribution.len() - 32..]; let signature = CleartextSignedMessage::sign( - &signed_message(¤t_payload.id, &payload_id, &hex::encode(phase2_contribution_hash)), + &signed_message( + ¤t_payload.id, + &payload_id, + &hex::encode(phase2_contribution_hash), + ), &mut secret_key, || String::new(), ) diff --git a/mpc/coordinator/Cargo.toml b/mpc/coordinator/Cargo.toml index cf5368fd7b..acbfa65c2b 100644 --- a/mpc/coordinator/Cargo.toml +++ b/mpc/coordinator/Cargo.toml @@ -4,16 +4,16 @@ name = "mpc-coordinator" version = "0.1.0" [dependencies] -async-sqlite = "0.2.2" -clap = { version = "4.5", features = ["derive"] } -futures.workspace = true -hex = { workspace = true } -mpc-shared = { workspace = true } -pgp = "0.13" -postgrest = "1.0" -reqwest = { workspace = true, features = ["json"] } -serde_json = { workspace = true } -thiserror = { workspace = true } -tokio = { workspace = true, features = ["full"] } +async-sqlite = "0.2.2" +clap = { version = "4.5", features = ["derive"] } +futures.workspace = true +hex = { workspace = true } +mpc-shared = { workspace = true } +pgp = "0.13" +postgrest = "1.0" +reqwest = { workspace = true, features = ["json"] } +serde_json = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true, features = ["full"] } tracing = { workspace = true } -tracing-subscriber = { workspace = true, features = ["env-filter", "json", "tracing-log"] } \ No newline at end of file +tracing-subscriber = { workspace = true, features = ["env-filter", "json", "tracing-log"] } diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index dc76233ef7..ac91a23c4e 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -234,7 +234,7 @@ CREATE POLICY view_all true ); --- The next contributor is the one with the higest score that didn't contribute yet. +-- The next contributor is the one with the highest score that didn't contribute yet. CREATE OR REPLACE FUNCTION set_next_contributor_trigger() RETURNS TRIGGER AS $$ BEGIN CALL public.set_next_contributor(); @@ -302,7 +302,7 @@ CREATE OR REPLACE VIEW current_queue_position AS ALTER VIEW current_queue_position SET (security_invoker = on); --- The current payload is from the latest successfull contribution +-- The current payload is from the latest successful contribution CREATE OR REPLACE VIEW current_payload_id AS SELECT COALESCE( (SELECT q.payload_id diff --git a/mpc/coordinator/src/main.rs b/mpc/coordinator/src/main.rs index d6d672c6b3..c21f4be55a 100644 --- a/mpc/coordinator/src/main.rs +++ b/mpc/coordinator/src/main.rs @@ -9,12 +9,7 @@ use tracing_subscriber::EnvFilter; #[derive(Parser, Debug)] #[command(version, about, long_about = None)] struct Args { - #[arg( - global = true, - short = 'f', - long, - default_value = "json" - )] + #[arg(global = true, short = 'f', long, default_value = "json")] log_format: LogFormat, #[command(subcommand)] command: Command, diff --git a/mpc/shared/src/lib.rs b/mpc/shared/src/lib.rs index b69a463891..dfd32ca68f 100644 --- a/mpc/shared/src/lib.rs +++ b/mpc/shared/src/lib.rs @@ -84,6 +84,13 @@ pub fn phase2_verify( } } -pub fn signed_message(previous_payload_id: &str, next_payload_id: &str, payload_hash: &str) -> String { - format!("0______0 - {} - {} - {}", previous_payload_id, next_payload_id, payload_hash) +pub fn signed_message( + previous_payload_id: &str, + next_payload_id: &str, + payload_hash: &str, +) -> String { + format!( + "0______0 - {} - {} - {}", + previous_payload_id, next_payload_id, payload_hash + ) } From 3c97ac021ea28a5a7fd497b9dcbdb0dd4ec1328a Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Thu, 19 Sep 2024 14:59:41 +0200 Subject: [PATCH 30/52] feat(mpc): chunked upload --- mpc/client/Cargo.toml | 5 +++-- mpc/client/src/main.rs | 20 ++++++++++++-------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/mpc/client/Cargo.toml b/mpc/client/Cargo.toml index ccb9d7127d..b5c07cbc19 100644 --- a/mpc/client/Cargo.toml +++ b/mpc/client/Cargo.toml @@ -8,6 +8,7 @@ async-sqlite = "0.2.2" base64 = { workspace = true } clap = { version = "4.5", features = ["derive"] } crossterm = "0.27.0" +futures-util = "0.3" hex = { workspace = true } http-body-util = "0.1" httpdate = "1.0" @@ -18,10 +19,10 @@ pgp = "0.13" postgrest = "1.0" rand = "0.8.5" ratatui = "0.27.0" -reqwest = { workspace = true, features = ["json"] } +reqwest = { workspace = true, features = ["json", "stream", "multipart"] } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } thiserror = { workspace = true } throbber-widgets-tui = "0.6" tokio = { workspace = true, features = ["full"] } -tokio-util = "0.7" +tokio-util = { version = "0.7", features = ["codec"] } diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index cd17778a6b..0ab02d90df 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -32,7 +32,7 @@ use pgp::{ ArmorOptions, Deserializable, KeyType, SecretKeyParamsBuilder, SignedSecretKey, }; use ratatui::{backend::CrosstermBackend, Terminal, Viewport}; -use reqwest::header::LOCATION; +use reqwest::{header::LOCATION, Body}; use serde::Deserialize; use tokio::{ net::TcpListener, @@ -279,7 +279,8 @@ async fn contribute( ), ) .send() - .await?; + .await? + .error_for_status()?; let location = response .headers() .get(LOCATION) @@ -337,17 +338,20 @@ async fn contribute( // ================================================== // https://tus.io/protocols/resumable-upload#patch == // ================================================== + let chunks = phase2_contribution + .into_iter() + .skip(upload_offset) + .collect::>() + // 1mb + .chunks(1024 * 1024) + .map(|x| Ok::<_, std::io::Error>(x.to_vec())) + .collect::>(); upload_client .patch(&upload_location) .header("Tus-Resumable", "1.0.0") .header("Content-Type", "application/offset+octet-stream") .header("Upload-Offset", upload_offset.to_string()) - .body( - phase2_contribution - .into_iter() - .skip(upload_offset) - .collect::>(), - ) + .body(Body::wrap_stream(futures_util::stream::iter(chunks))) .send() .await? .error_for_status()?; From d3cf0c1d9f9b4935dd8cc4e16ade58a865b974eb Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Thu, 19 Sep 2024 14:59:59 +0200 Subject: [PATCH 31/52] feat(mpc): waitlist timestamp hardening and materialized contribs --- mpc/coordinator/database.sql | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index ac91a23c4e..7a0300fe8d 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -28,7 +28,8 @@ CREATE POLICY allow_insert_self ); CREATE TABLE waitlist( - id UUID PRIMARY KEY + id UUID PRIMARY KEY, + created_at TIMESTAMPTZ NOT NULL DEFAULT(now()) ); ALTER TABLE waitlist ENABLE ROW LEVEL SECURITY; @@ -47,9 +48,22 @@ CREATE POLICY allow_insert_self FOR INSERT TO authenticated WITH CHECK ( - (SELECT auth.uid()) = id + (SELECT auth.uid()) = id AND open_to_public() = false ); +CREATE OR REPLACE FUNCTION waitlist_overwrite_timestamp() RETURNS TRIGGER AS $$ +BEGIN + NEW.created_at = now(); + RETURN NEW; +END +$$ LANGUAGE plpgsql SET search_path = ''; + +CREATE TRIGGER waitlist_overwrite_timestamp +BEFORE INSERT +ON waitlist +FOR EACH ROW +EXECUTE FUNCTION waitlist_overwrite_timestamp(); + ----------- -- Queue -- ----------- @@ -162,7 +176,7 @@ $$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = ''; ------------------------- CREATE OR REPLACE FUNCTION expiration_delay() RETURNS INTERVAL AS $$ BEGIN - RETURN INTERVAL '30 minutes'; + RETURN INTERVAL '1 hour'; END $$ LANGUAGE plpgsql SET search_path = ''; @@ -439,11 +453,13 @@ EXECUTE FUNCTION set_contribution_submitted_trigger(); CREATE TABLE contribution_signature( id uuid PRIMARY KEY, public_key text NOT NULL, - signature text NOT NULL + signature text NOT NULL, + public_key_hash text GENERATED ALWAYS AS (encode(sha256(decode(public_key, 'hex')), 'hex')) STORED ); ALTER TABLE contribution_signature ENABLE ROW LEVEL SECURITY; ALTER TABLE contribution_signature ADD FOREIGN KEY (id) REFERENCES contribution_status(id); +CREATE UNIQUE INDEX idx_contribution_signature_pkh ON contribution_signature(public_key_hash); CREATE POLICY view_all ON contribution_signature @@ -471,17 +487,20 @@ CREATE OR REPLACE VIEW current_user_state AS ( ALTER VIEW current_user_state SET (security_invoker = off); -CREATE OR REPLACE VIEW users_contribution AS ( - SELECT c.id, u.raw_user_meta_data->>'user_name' AS user_name, u.raw_user_meta_data->>'avatar_url' AS avatar_url, c.seq, q.payload_id, cs.public_key, cs.signature +CREATE MATERIALIZED VIEW IF NOT EXISTS users_contribution AS ( + SELECT c.id, u.raw_user_meta_data->>'name' AS user_name, u.raw_user_meta_data->>'avatar_url' AS avatar_url, c.seq, q.payload_id, cs.public_key, cs.signature, cs.public_key_hash, s.started AS time_started, su.created_at AS time_submitted, c.created_at AS time_verified FROM public.contribution c INNER JOIN public.queue q ON (c.id = q.id) + INNER JOIN public.contribution_status s ON (c.id = s.id) + INNER JOIN public.contribution_submitted su ON (c.id = su.id) INNER JOIN public.contribution_signature cs ON (c.id = cs.id) INNER JOIN auth.users u ON (c.id = u.id) WHERE c.success ORDER BY c.seq ASC ); -ALTER VIEW users_contribution SET (security_invoker = on); +CREATE UNIQUE INDEX idx_users_contribution_user_id ON users_contribution(id); +CREATE UNIQUE INDEX idx_users_contribution_pkh ON users_contribution(public_key_hash); ---------- -- CRON -- @@ -490,4 +509,6 @@ ALTER VIEW users_contribution SET (security_invoker = on); -- Will rotate the current contributor if the slot expired without any contribution submitted SELECT cron.schedule('update-contributor', '10 seconds', 'CALL set_next_contributor()'); +SELECT cron.schedule('update-users-contribution', '30 seconds', 'REFRESH MATERIALIZED VIEW CONCURRENTLY public.users_contribution'); + COMMIT; From eea4275bae2297a93f8f348e618fd64ddb9d83f8 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Thu, 19 Sep 2024 15:53:33 +0200 Subject: [PATCH 32/52] feat(mpc): add `/secret_key` and `/clear` endpoints --- mpc/client/src/main.rs | 74 +++++++++++++++++++++++++++++------------- 1 file changed, 52 insertions(+), 22 deletions(-) diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index 0ab02d90df..49dc0c8984 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -44,7 +44,9 @@ use tokio::{ use tokio_util::sync::CancellationToken; use types::Status; -const ENDPOINT: &str = "/contribute"; +const CONTRIBUTE_ENDPOINT: &str = "/contribute"; +const SK_ENDPOINT: &str = "/secret_key"; +const CLEAR_ENDPOINT: &str = "/clear"; const CONTRIB_SK_PATH: &str = "contrib_key.sk.asc"; @@ -383,9 +385,33 @@ async fn handle( .body(body) .unwrap()) }; + let raw_response = |status, body| { + Ok(hyper::Response::builder() + .header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") + .header(hyper::header::CONTENT_TYPE, "application/octet-stream") + .header( + hyper::header::CONTENT_DISPOSITION, + format!("attachment; filename={CONTRIB_SK_PATH}"), + ) + .status(status) + .body(body) + .unwrap()) + }; let response_empty = |status| response(status, BoxBody::default()); match (req.method(), req.uri().path()) { - (&Method::POST, ENDPOINT) + (&Method::POST, CLEAR_ENDPOINT) => { + let _ = tokio::fs::remove_file(CONTRIB_SK_PATH).await; + response_empty(hyper::StatusCode::OK) + } + (&Method::GET, SK_ENDPOINT) => { + if let Ok(_) = tokio::fs::metadata(CONTRIB_SK_PATH).await { + let content = tokio::fs::read(CONTRIB_SK_PATH).await?; + raw_response(hyper::StatusCode::OK, full(content)) + } else { + response_empty(hyper::StatusCode::NOT_FOUND) + } + } + (&Method::POST, CONTRIBUTE_ENDPOINT) if lock .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) .is_ok() => @@ -416,8 +442,10 @@ async fn handle( response_empty(hyper::StatusCode::ACCEPTED) } // FE must poll GET and dispatch accordingly. - (&Method::POST, ENDPOINT) => response_empty(hyper::StatusCode::SERVICE_UNAVAILABLE), - (&Method::GET, ENDPOINT) => match latest_status.read().await.clone() { + (&Method::POST, CONTRIBUTE_ENDPOINT) => { + response_empty(hyper::StatusCode::SERVICE_UNAVAILABLE) + } + (&Method::GET, CONTRIBUTE_ENDPOINT) => match latest_status.read().await.clone() { Status::Failed(e) => { lock.compare_exchange(true, false, Ordering::SeqCst, Ordering::SeqCst) .expect("impossible"); @@ -434,24 +462,26 @@ async fn handle( ), }, // CORS preflight request. - (&Method::OPTIONS, ENDPOINT) => Ok(hyper::Response::builder() - .header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") - .header( - hyper::header::ACCESS_CONTROL_ALLOW_HEADERS, - hyper::header::CONTENT_TYPE, - ) - .header( - hyper::header::ACCESS_CONTROL_ALLOW_METHODS, - format!( - "{}, {}, {}", - Method::OPTIONS.as_str(), - Method::GET.as_str(), - Method::POST.as_str() - ), - ) - .status(hyper::StatusCode::OK) - .body(BoxBody::default()) - .unwrap()), + (&Method::OPTIONS, CONTRIBUTE_ENDPOINT | SK_ENDPOINT | CLEAR_ENDPOINT) => { + Ok(hyper::Response::builder() + .header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") + .header( + hyper::header::ACCESS_CONTROL_ALLOW_HEADERS, + hyper::header::CONTENT_DISPOSITION, + ) + .header( + hyper::header::ACCESS_CONTROL_ALLOW_METHODS, + format!( + "{}, {}, {}", + Method::OPTIONS.as_str(), + Method::GET.as_str(), + Method::POST.as_str() + ), + ) + .status(hyper::StatusCode::OK) + .body(BoxBody::default()) + .unwrap()) + } _ => response_empty(hyper::StatusCode::NOT_FOUND), } } From 6febf2085743b7529173eeeb2eff33624947f776 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Thu, 19 Sep 2024 23:27:44 +0200 Subject: [PATCH 33/52] feat(mpc): improve username selection --- mpc/coordinator/database.sql | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 7a0300fe8d..86db3f733d 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -1,5 +1,13 @@ BEGIN; +----------- +-- Erase -- +----------- + -- TRUNCATE TABLE auth.users CASCADE; + -- DELETE FROM storage.objects o + -- WHERE o.bucket_id = 'contributions' + -- AND o.name <> '00000000-0000-0000-0000-000000000000'; + -- Default bucket for contributions upload INSERT INTO storage.buckets(id, name, public) VALUES('contributions', 'contributions', false); @@ -488,7 +496,7 @@ CREATE OR REPLACE VIEW current_user_state AS ( ALTER VIEW current_user_state SET (security_invoker = off); CREATE MATERIALIZED VIEW IF NOT EXISTS users_contribution AS ( - SELECT c.id, u.raw_user_meta_data->>'name' AS user_name, u.raw_user_meta_data->>'avatar_url' AS avatar_url, c.seq, q.payload_id, cs.public_key, cs.signature, cs.public_key_hash, s.started AS time_started, su.created_at AS time_submitted, c.created_at AS time_verified + SELECT c.id, COALESCE(u.raw_user_meta_data->>'user_name', u.raw_user_meta_data->>'name') AS user_name, u.raw_user_meta_data->>'avatar_url' AS avatar_url, c.seq, q.payload_id, cs.public_key, cs.signature, cs.public_key_hash, s.started AS time_started, su.created_at AS time_submitted, c.created_at AS time_verified FROM public.contribution c INNER JOIN public.queue q ON (c.id = q.id) INNER JOIN public.contribution_status s ON (c.id = s.id) From 6a0f301fb40b0410cbac05daba13f80ddde8a1c1 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Fri, 20 Sep 2024 14:57:17 +0200 Subject: [PATCH 34/52] feat(mpc): add timeline logs and more infors on contribution --- mpc/coordinator/database.sql | 132 +++++++++++++++++++++++++++++++---- 1 file changed, 117 insertions(+), 15 deletions(-) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 86db3f733d..f526d5139a 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -12,8 +12,8 @@ BEGIN; INSERT INTO storage.buckets(id, name, public) VALUES('contributions', 'contributions', false); CREATE TABLE wallet_address( - id UUID PRIMARY KEY, - wallet TEXT NOT NULL + id uuid PRIMARY KEY, + wallet text NOT NULL ); ALTER TABLE wallet_address ENABLE ROW LEVEL SECURITY; @@ -36,12 +36,14 @@ CREATE POLICY allow_insert_self ); CREATE TABLE waitlist( - id UUID PRIMARY KEY, - created_at TIMESTAMPTZ NOT NULL DEFAULT(now()) + id uuid PRIMARY KEY, + created_at timestamptz NOT NULL DEFAULT(now()), + seq smallserial NOT NULL, ); ALTER TABLE waitlist ENABLE ROW LEVEL SECURITY; ALTER TABLE waitlist ADD FOREIGN KEY (id) REFERENCES auth.users(id); +CREATE UNIQUE INDEX idx_waitlist_seq ON waitlist(seq); CREATE POLICY view_self ON waitlist @@ -85,7 +87,7 @@ CREATE TABLE queue ( id uuid PRIMARY KEY, payload_id uuid NOT NULL DEFAULT(gen_random_uuid()), joined timestamptz NOT NULL DEFAULT (now()), - score INTEGER NOT NULL + score integer NOT NULL ); ALTER TABLE queue ENABLE ROW LEVEL SECURITY; @@ -154,14 +156,27 @@ DECLARE redeemed_code public.code%ROWTYPE := NULL; BEGIN UPDATE public.code c - SET user_id = (SELECT auth.uid()) - WHERE c.id = encode(sha256(code_id::bytea), 'hex') - AND c.user_id IS NULL - RETURNING * INTO redeemed_code; + SET user_id = (SELECT auth.uid()) + WHERE c.id = encode(sha256(code_id::bytea), 'hex') + AND c.user_id IS NULL + RETURNING * INTO redeemed_code; IF (redeemed_code IS NULL) THEN RAISE EXCEPTION 'redeem_code_invalid'; END IF; INSERT INTO public.queue(id) VALUES ((SELECT auth.uid())); + PERFORM public.do_log( + json_build_object( + 'type', 'redeem', + 'user', (SELECT un.user_name FROM auth.users u WHERE u.id = (SELECT auth.uid())), + 'code', code_id + ) + ); + PERFORM public.do_log( + json_build_object( + 'type', 'join_queue', + 'user', (SELECT un.user_name FROM auth.users u WHERE u.id = (SELECT auth.uid())) + ) + ); END $$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = ''; @@ -170,8 +185,20 @@ BEGIN IF (code_id IS NULL) THEN IF (public.open_to_public()) THEN INSERT INTO public.queue(id) VALUES ((SELECT auth.uid())); + PERFORM public.do_log( + json_build_object( + 'type', 'join_queue', + 'user', (SELECT un.user_name FROM auth.users u WHERE u.id = (SELECT auth.uid())) + ) + ); ELSE INSERT INTO public.waitlist(id) VALUES ((SELECT auth.uid())); + PERFORM public.do_log( + json_build_object( + 'type', 'join_waitlist', + 'user', (SELECT un.user_name FROM auth.users u WHERE u.id = (SELECT auth.uid())) + ) + ); END IF; ELSE PERFORM public.redeem(code_id); @@ -179,6 +206,19 @@ BEGIN END $$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = ''; +-- Username +CREATE OR REPLACE VIEW user_name AS + SELECT u.id, + COALESCE( + (SELECT c.display_name FROM public.code c WHERE c.user_id = u.id), + COALESCE( + u.raw_user_meta_data->>'user_name', + u.raw_user_meta_data->>'name' + ) + ) AS user_name FROM auth.users u; + +ALTER VIEW user_name SET (security_invoker = off); + ------------------------- -- Contribution Status -- ------------------------- @@ -207,7 +247,6 @@ CREATE POLICY view_all true ); - ---------------------------- -- Contribution Submitted -- ---------------------------- @@ -259,6 +298,12 @@ CREATE POLICY view_all -- The next contributor is the one with the highest score that didn't contribute yet. CREATE OR REPLACE FUNCTION set_next_contributor_trigger() RETURNS TRIGGER AS $$ BEGIN + PERFORM public.do_log( + json_build_object( + 'type', 'contribution_verified', + 'user', (SELECT un.user_name FROM auth.users u WHERE u.id = NEW.id) + ) + ); CALL public.set_next_contributor(); RETURN NEW; END @@ -341,13 +386,20 @@ ALTER VIEW current_payload_id SET (security_invoker = on); CREATE OR REPLACE PROCEDURE set_next_contributor() AS $$ BEGIN - IF (SELECT COUNT(*) FROM public.current_contributor_id) = 0 THEN + IF (NOT EXISTS (SELECT * FROM public.current_contributor_id)) THEN INSERT INTO public.contribution_status(id) SELECT cq.id - FROM public.current_queue cq LIMIT 1; + FROM public.current_queue cq + LIMIT 1; + PERFORM public.do_log( + json_build_object( + 'type', 'contribution_started', + 'user', (SELECT un.user_name FROM auth.users u WHERE u.id = (SELECT cci.id FROM public.current_contributor_id cci)) + ) + ); END IF; END -$$ LANGUAGE plpgsql SET search_path = ''; +$$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = ''; CREATE OR REPLACE FUNCTION can_upload(name varchar) RETURNS BOOLEAN AS $$ BEGIN @@ -410,6 +462,12 @@ CREATE POLICY allow_authenticated_contributor_download CREATE OR REPLACE PROCEDURE set_contribution_submitted(queue_id uuid, object_id uuid) AS $$ BEGIN INSERT INTO public.contribution_submitted(id, object_id) VALUES(queue_id, object_id); + PERFORM public.do_log( + json_build_object( + 'type', 'contribution_submitted', + 'user', (SELECT un.user_name FROM auth.users u WHERE u.id = (SELECT auth.uid())) + ) + ); END $$ LANGUAGE plpgsql SET search_path = ''; @@ -490,18 +548,62 @@ CREATE OR REPLACE VIEW current_user_state AS ( (EXISTS (SELECT * FROM public.waitlist WHERE id = (SELECT auth.uid()))) AS in_waitlist, (EXISTS (SELECT * FROM public.code WHERE user_id = (SELECT auth.uid()))) AS has_redeemed, (EXISTS (SELECT * FROM public.queue WHERE id = (SELECT auth.uid()))) AS in_queue, - (COALESCE((SELECT display_name FROM public.code WHERE user_id = (SELECT auth.uid())), (SELECT raw_user_meta_data->>'name' FROM auth.users u WHERE u.id = (SELECT auth.uid())))) AS display_name + (SELECT un.user_name FROM public.user_name un WHERE un.id = (SELECT auth.uid())) AS display_name, + ((SELECT COUNT(*) + FROM public.waitlist w + WHERE w.id <> (SELECT auth.uid()) + AND w.seq < (SELECT ww.seq FROM public.waitlist ww WHERE w.id = (SELECT auth.uid())) + ) + 1) AS waitlist_position ); ALTER VIEW current_user_state SET (security_invoker = off); +----------------- +-- Logging -- +----------------- +CREATE TABLE log( + id smallserial PRIMARY KEY, + created_at timestamptz NOT NULL DEFAULT(now()), + message jsonb NOT NULL +); + +ALTER TABLE log ENABLE ROW LEVEL SECURITY; + +CREATE POLICY view_all + ON log + FOR SELECT + TO authenticated + USING ( + true + ); + +CREATE OR REPLACE FUNCTION do_log(message jsonb) RETURNS void AS $$ +BEGIN + INSERT INTO public.log(message) VALUES (message); +END +$$ LANGUAGE plpgsql SET search_path = ''; + CREATE MATERIALIZED VIEW IF NOT EXISTS users_contribution AS ( - SELECT c.id, COALESCE(u.raw_user_meta_data->>'user_name', u.raw_user_meta_data->>'name') AS user_name, u.raw_user_meta_data->>'avatar_url' AS avatar_url, c.seq, q.payload_id, cs.public_key, cs.signature, cs.public_key_hash, s.started AS time_started, su.created_at AS time_submitted, c.created_at AS time_verified + SELECT c.id, + un.user_name, + u.raw_user_meta_data->>'avatar_url' AS avatar_url, + c.seq, + q.payload_id, + cs.public_key, + cs.signature, + cs.public_key_hash, + s.started AS time_started, + su.created_at AS time_submitted, + c.created_at AS time_verified, + w.wallet AS wallet, + (su.created_at - s.started) AS time_contribute FROM public.contribution c INNER JOIN public.queue q ON (c.id = q.id) INNER JOIN public.contribution_status s ON (c.id = s.id) INNER JOIN public.contribution_submitted su ON (c.id = su.id) INNER JOIN public.contribution_signature cs ON (c.id = cs.id) + INNER JOIN public.wallet_address w ON (c.id = w.id) + INNER JOIN public.user_name un ON (c.id = un.id) INNER JOIN auth.users u ON (c.id = u.id) WHERE c.success ORDER BY c.seq ASC From 0244c4880f372435f4efccb6b9aa874b8ece24e1 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Fri, 20 Sep 2024 14:57:35 +0200 Subject: [PATCH 35/52] feat(mpc): tmp dir for client, pgp secret generation req/res --- mpc/client/src/main.rs | 143 ++++++++++++++++++++++++++--------------- 1 file changed, 92 insertions(+), 51 deletions(-) diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index 49dc0c8984..f43db80f73 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -46,9 +46,10 @@ use types::Status; const CONTRIBUTE_ENDPOINT: &str = "/contribute"; const SK_ENDPOINT: &str = "/secret_key"; -const CLEAR_ENDPOINT: &str = "/clear"; -const CONTRIB_SK_PATH: &str = "contrib_key.sk.asc"; +const ZKGM_DIR: &str = "zkgm"; +const CONTRIB_SK_PATH: &str = "zkgm/contrib_key.sk.asc"; +const SUCCESSFUL_PATH: &str = ".zkgm_successful"; #[derive(PartialEq, Eq, Debug, Clone, Deserialize)] #[serde(rename_all = "camelCase")] @@ -76,12 +77,18 @@ enum Error { Phase2ContributionFailed(#[from] mpc_shared::Phase2ContributionError), #[error(transparent)] Phase2VerificationFailed(#[from] mpc_shared::Phase2VerificationError), + #[error("pgp key couldn't be found")] + PGPKeyNotFound, } type BoxBody = http_body_util::combinators::BoxBody; type DynError = Box; +fn temp_file(payload_id: &str) -> String { + format!("{ZKGM_DIR}/{payload_id}") +} + fn generate_pgp_key(email: String) -> SignedSecretKey { let mut key_params = SecretKeyParamsBuilder::default(); key_params @@ -101,6 +108,20 @@ fn generate_pgp_key(email: String) -> SignedSecretKey { signed_secret_key } +async fn is_already_successful() -> bool { + tokio::fs::metadata(SUCCESSFUL_PATH).await.is_ok() +} + +async fn wait_successful(tx_status: Sender) { + loop { + if is_already_successful().await { + tx_status.send(Status::Successful).expect("impossible"); + tokio::time::sleep(tokio::time::Duration::from_millis(2000)).await; + break; + } + } +} + async fn contribute( tx_status: Sender, Contribute { @@ -110,9 +131,12 @@ async fn contribute( api_key, contributor_id, payload_id, - user_email, + .. }: Contribute, ) -> Result<(), DynError> { + if is_already_successful().await { + return Ok(()); + } let mut secret_key = if let Ok(_) = tokio::fs::metadata(CONTRIB_SK_PATH).await { SignedSecretKey::from_armor_single::<&[u8]>( tokio::fs::read(CONTRIB_SK_PATH).await?.as_ref(), @@ -120,15 +144,7 @@ async fn contribute( .expect("impossible") .0 } else { - let secret_key = generate_pgp_key(user_email.unwrap_or("placeholder@test.com".into())); - tokio::fs::write( - CONTRIB_SK_PATH, - secret_key - .to_armored_bytes(ArmorOptions::default()) - .expect("impossible"), - ) - .await?; - secret_key + return Err(Error::PGPKeyNotFound.into()); }; let client = SupabaseMPCApi::new(supabase_project.clone(), api_key, jwt); let current_contributor = client @@ -159,11 +175,11 @@ async fn contribute( tx_status .send(Status::DownloadEnded(current_payload.id.clone())) .expect("impossible"); - let phase2_contribution = if let Ok(true) = tokio::fs::metadata(&payload_id) + let phase2_contribution = if let Ok(true) = tokio::fs::metadata(temp_file(&payload_id)) .await .map(|meta| meta.size() as usize == CONTRIBUTION_SIZE) { - tokio::fs::read(&payload_id).await? + tokio::fs::read(temp_file(&payload_id)).await? } else { tx_status .send(Status::ContributionStarted) @@ -179,7 +195,7 @@ async fn contribute( tx_status .send(Status::ContributionEnded) .expect("impossible"); - tokio::fs::write(&payload_id, &phase2_contribution).await?; + tokio::fs::write(temp_file(&payload_id), &phase2_contribution).await?; phase2_contribution }; @@ -213,7 +229,7 @@ async fn contribute( ) .await?; let pool = PoolBuilder::new() - .path("db.sqlite3") + .path(temp_file("state.sqlite3")) .flags( OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE @@ -385,7 +401,7 @@ async fn handle( .body(body) .unwrap()) }; - let raw_response = |status, body| { + let file_response = |status, body| { Ok(hyper::Response::builder() .header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") .header(hyper::header::CONTENT_TYPE, "application/octet-stream") @@ -399,16 +415,31 @@ async fn handle( }; let response_empty = |status| response(status, BoxBody::default()); match (req.method(), req.uri().path()) { - (&Method::POST, CLEAR_ENDPOINT) => { - let _ = tokio::fs::remove_file(CONTRIB_SK_PATH).await; - response_empty(hyper::StatusCode::OK) + (&Method::POST, SK_ENDPOINT) => { + let whole_body = req.collect().await?.aggregate(); + let email = serde_json::from_reader(whole_body.reader())?; + let guard = latest_status.write().await; + let result = { + if let Err(_) = tokio::fs::metadata(CONTRIB_SK_PATH).await { + let secret_key = generate_pgp_key(email); + let secret_key_serialized = secret_key + .to_armored_bytes(ArmorOptions::default()) + .expect("impossible"); + tokio::fs::write(CONTRIB_SK_PATH, &secret_key_serialized).await?; + response_empty(hyper::StatusCode::CREATED) + } else { + response_empty(hyper::StatusCode::OK) + } + }; + drop(guard); + result } (&Method::GET, SK_ENDPOINT) => { - if let Ok(_) = tokio::fs::metadata(CONTRIB_SK_PATH).await { - let content = tokio::fs::read(CONTRIB_SK_PATH).await?; - raw_response(hyper::StatusCode::OK, full(content)) - } else { + if let Err(_) = tokio::fs::metadata(CONTRIB_SK_PATH).await { response_empty(hyper::StatusCode::NOT_FOUND) + } else { + let content = tokio::fs::read(CONTRIB_SK_PATH).await?; + file_response(hyper::StatusCode::OK, full(content)) } } (&Method::POST, CONTRIBUTE_ENDPOINT) @@ -430,13 +461,14 @@ async fn handle( .await; match result { Ok(_) => { - lock.compare_exchange(true, false, Ordering::SeqCst, Ordering::SeqCst) + let _ = tokio::fs::write(SUCCESSFUL_PATH, &[1u8]).await; + let _ = tokio::fs::remove_dir(ZKGM_DIR).await; + } + Err(e) => { + tx_status + .send(Status::Failed(format!("{:?}", e))) .expect("impossible"); - tx_status.send(Status::Successful).expect("impossible") } - Err(e) => tx_status - .send(Status::Failed(format!("{:?}", e))) - .expect("impossible"), } }); response_empty(hyper::StatusCode::ACCEPTED) @@ -462,26 +494,28 @@ async fn handle( ), }, // CORS preflight request. - (&Method::OPTIONS, CONTRIBUTE_ENDPOINT | SK_ENDPOINT | CLEAR_ENDPOINT) => { - Ok(hyper::Response::builder() - .header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") - .header( - hyper::header::ACCESS_CONTROL_ALLOW_HEADERS, - hyper::header::CONTENT_DISPOSITION, - ) - .header( - hyper::header::ACCESS_CONTROL_ALLOW_METHODS, - format!( - "{}, {}, {}", - Method::OPTIONS.as_str(), - Method::GET.as_str(), - Method::POST.as_str() - ), - ) - .status(hyper::StatusCode::OK) - .body(BoxBody::default()) - .unwrap()) - } + (&Method::OPTIONS, CONTRIBUTE_ENDPOINT | SK_ENDPOINT) => Ok(hyper::Response::builder() + .header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") + .header( + hyper::header::ACCESS_CONTROL_ALLOW_HEADERS, + format!( + "{}, {}", + hyper::header::CONTENT_TYPE, + hyper::header::CONTENT_DISPOSITION + ), + ) + .header( + hyper::header::ACCESS_CONTROL_ALLOW_METHODS, + format!( + "{}, {}, {}", + Method::OPTIONS.as_str(), + Method::GET.as_str(), + Method::POST.as_str() + ), + ) + .status(hyper::StatusCode::OK) + .body(BoxBody::default()) + .unwrap()), _ => response_empty(hyper::StatusCode::NOT_FOUND), } } @@ -525,6 +559,9 @@ async fn input_and_status_handling( #[tokio::main] async fn main() -> Result<(), DynError> { + if let Err(_) = tokio::fs::metadata(ZKGM_DIR).await { + tokio::fs::create_dir(ZKGM_DIR).await?; + } let status = Arc::new(RwLock::new(Status::Idle)); let lock = Arc::new(AtomicBool::new(false)); let (tx_status, rx_status) = broadcast::channel(64); @@ -532,6 +569,7 @@ async fn main() -> Result<(), DynError> { let status_clone = status.clone(); let token = CancellationToken::new(); let token_clone = token.clone(); + let tx_status_clone = tx_status.clone(); let handle = tokio::spawn(async move { let addr = SocketAddr::from(([0, 0, 0, 0], 0x1337)); let listener = TcpListener::bind(addr).await.unwrap(); @@ -540,7 +578,7 @@ async fn main() -> Result<(), DynError> { Ok((stream, _)) = listener.accept() => { let io = TokioIo::new(stream); let status_clone = status_clone.clone(); - let tx_status_clone = tx_status.clone(); + let tx_status_clone = tx_status_clone.clone(); let lock_clone = lock.clone(); let conn = hyper::server::conn::http1::Builder::new().serve_connection( io, @@ -577,7 +615,10 @@ async fn main() -> Result<(), DynError> { }, )?; input_and_status_handling(status, rx_status, tx_ui).await; - ui::run_ui(&mut terminal, rx_ui).await?; + tokio::select! { + _ = ui::run_ui(&mut terminal, rx_ui) => {} + _ = wait_successful(tx_status) => {} + } terminal.clear()?; crossterm::terminal::disable_raw_mode()?; let _ = execute!(io::stdout(), Show); From 0dc0185417c5e5146330049deebdfe7c1713954e Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Sat, 21 Sep 2024 13:06:08 +0200 Subject: [PATCH 36/52] feat(mpc): better state management, better username view in db --- mpc/client/src/main.rs | 30 +++++++++-------- mpc/client/src/ui.rs | 40 +++++++++++++--------- mpc/coordinator/database.sql | 65 +++++++++++++++++++++++++----------- 3 files changed, 87 insertions(+), 48 deletions(-) diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index f43db80f73..6645e9ae5b 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -115,8 +115,9 @@ async fn is_already_successful() -> bool { async fn wait_successful(tx_status: Sender) { loop { if is_already_successful().await { + let _ = tokio::fs::remove_dir_all(ZKGM_DIR).await; tx_status.send(Status::Successful).expect("impossible"); - tokio::time::sleep(tokio::time::Duration::from_millis(2000)).await; + tokio::time::sleep(tokio::time::Duration::from_millis(10000)).await; break; } } @@ -162,15 +163,19 @@ async fn contribute( .send(Status::DownloadStarted(current_payload.id.clone())) .expect("impossible"); let payload = client - .download_payload(¤t_payload.id, ¤t_payload.id, |percent| { - let tx_status = tx_status.clone(); - let current_payload_clone = current_payload.id.clone(); - async move { - tx_status - .send(Status::Downloading(current_payload_clone, percent as u8)) - .expect("impossible"); - } - }) + .download_payload( + ¤t_payload.id, + &temp_file(¤t_payload.id), + |percent| { + let tx_status = tx_status.clone(); + let current_payload_clone = current_payload.id.clone(); + async move { + tx_status + .send(Status::Downloading(current_payload_clone, percent as u8)) + .expect("impossible"); + } + }, + ) .await?; tx_status .send(Status::DownloadEnded(current_payload.id.clone())) @@ -360,8 +365,8 @@ async fn contribute( .into_iter() .skip(upload_offset) .collect::>() - // 1mb - .chunks(1024 * 1024) + // 4mb + .chunks(4 * 1024 * 1024) .map(|x| Ok::<_, std::io::Error>(x.to_vec())) .collect::>(); upload_client @@ -462,7 +467,6 @@ async fn handle( match result { Ok(_) => { let _ = tokio::fs::write(SUCCESSFUL_PATH, &[1u8]).await; - let _ = tokio::fs::remove_dir(ZKGM_DIR).await; } Err(e) => { tx_status diff --git a/mpc/client/src/ui.rs b/mpc/client/src/ui.rs index b674b43a3c..935279318e 100644 --- a/mpc/client/src/ui.rs +++ b/mpc/client/src/ui.rs @@ -39,9 +39,14 @@ fn ui(f: &mut Frame, state: &UiState, throbber_state: &mut ThrobberState) { ); f.render_widget(block, area); - let vertical = Layout::vertical([Constraint::Length(2), Constraint::Length(4)]).margin(1); + let vertical = Layout::vertical([ + Constraint::Length(2), + Constraint::Length(4), + Constraint::Length(4), + ]) + .margin(1); let horizontal = Layout::horizontal([Constraint::Percentage(40), Constraint::Percentage(60)]); - let [progress_area, main] = vertical.areas(area); + let [progress_area, warning_area, main] = vertical.areas(area); let [list_area, gauge_area] = horizontal.areas(main); let chunks = ratatui::layout::Layout::default() .direction(ratatui::layout::Direction::Horizontal) @@ -73,19 +78,24 @@ fn ui(f: &mut Frame, state: &UiState, throbber_state: &mut ThrobberState) { .ratio(steps_done as f64 / num_steps as f64); f.render_widget(progress, progress_area); + // Set full with state + let full = throbber_widgets_tui::Throbber::default() + .label(Span::styled( + "[!] This terminal must remain open, go back to the browser now. [!]", + Style::default() + .add_modifier(Modifier::BOLD) + .fg(Color::Yellow), + )) + .style(ratatui::style::Style::default().fg(ratatui::style::Color::White)) + .throbber_style( + ratatui::style::Style::default().add_modifier(ratatui::style::Modifier::BOLD), + ) + .throbber_set(throbber_widgets_tui::CLOCK) + .use_type(throbber_widgets_tui::WhichUse::Spin); + f.render_stateful_widget(full, warning_area, throbber_state); + match state { - UiState::Idle => { - // Set full with state - let full = throbber_widgets_tui::Throbber::default() - .label("Awaiting orders...") - .style(ratatui::style::Style::default().fg(ratatui::style::Color::White)) - .throbber_style( - ratatui::style::Style::default().add_modifier(ratatui::style::Modifier::BOLD), - ) - .throbber_set(throbber_widgets_tui::CLOCK) - .use_type(throbber_widgets_tui::WhichUse::Spin); - f.render_stateful_widget(full, chunks[0], throbber_state); - } + UiState::Idle => {} UiState::Downloading(name, progress, started_at) => { // in progress download let item = ListItem::new(Line::from(vec![ @@ -324,7 +334,7 @@ pub async fn run_ui( Paragraph::new(Line::from(vec![ Span::from("Done, "), Span::styled( - "successfully contributed", + "successfully contributed, you can now exit this terminal", Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Green), diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index f526d5139a..3855bd5f56 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -3,10 +3,11 @@ BEGIN; ----------- -- Erase -- ----------- - -- TRUNCATE TABLE auth.users CASCADE; - -- DELETE FROM storage.objects o - -- WHERE o.bucket_id = 'contributions' - -- AND o.name <> '00000000-0000-0000-0000-000000000000'; +TRUNCATE TABLE public.log; +TRUNCATE TABLE auth.users CASCADE; +DELETE FROM storage.objects o +WHERE o.bucket_id = 'contributions' +AND o.name <> '00000000-0000-0000-0000-000000000000'; -- Default bucket for contributions upload INSERT INTO storage.buckets(id, name, public) VALUES('contributions', 'contributions', false); @@ -167,14 +168,13 @@ BEGIN PERFORM public.do_log( json_build_object( 'type', 'redeem', - 'user', (SELECT un.user_name FROM auth.users u WHERE u.id = (SELECT auth.uid())), - 'code', code_id + 'user', (SELECT un.user_name FROM public.user_name un WHERE un.id = (SELECT auth.uid())) ) ); PERFORM public.do_log( json_build_object( 'type', 'join_queue', - 'user', (SELECT un.user_name FROM auth.users u WHERE u.id = (SELECT auth.uid())) + 'user', (SELECT un.user_name FROM public.user_name un WHERE un.id = (SELECT auth.uid())) ) ); END @@ -188,7 +188,7 @@ BEGIN PERFORM public.do_log( json_build_object( 'type', 'join_queue', - 'user', (SELECT un.user_name FROM auth.users u WHERE u.id = (SELECT auth.uid())) + 'user', (SELECT un.user_name FROM public.user_name un WHERE un.id = (SELECT auth.uid())) ) ); ELSE @@ -196,7 +196,7 @@ BEGIN PERFORM public.do_log( json_build_object( 'type', 'join_waitlist', - 'user', (SELECT un.user_name FROM auth.users u WHERE u.id = (SELECT auth.uid())) + 'user', (SELECT un.user_name FROM public.user_name un WHERE un.id = (SELECT auth.uid())) ) ); END IF; @@ -301,9 +301,10 @@ BEGIN PERFORM public.do_log( json_build_object( 'type', 'contribution_verified', - 'user', (SELECT un.user_name FROM auth.users u WHERE u.id = NEW.id) + 'user', (SELECT un.user_name FROM public.user_name un WHERE un.id = NEW.id), + 'success', NEW.success ) - ); + ); CALL public.set_next_contributor(); RETURN NEW; END @@ -386,17 +387,41 @@ ALTER VIEW current_payload_id SET (security_invoker = on); CREATE OR REPLACE PROCEDURE set_next_contributor() AS $$ BEGIN - IF (NOT EXISTS (SELECT * FROM public.current_contributor_id)) THEN + IF (NOT EXISTS (SELECT cci.id FROM public.current_contributor_id cci)) THEN INSERT INTO public.contribution_status(id) SELECT cq.id FROM public.current_queue cq LIMIT 1; - PERFORM public.do_log( - json_build_object( - 'type', 'contribution_started', - 'user', (SELECT un.user_name FROM auth.users u WHERE u.id = (SELECT cci.id FROM public.current_contributor_id cci)) - ) - ); + IF (EXISTS (SELECT cci.id FROM public.current_contributor_id cci)) THEN + PERFORM public.do_log( + json_build_object( + 'type', 'contribution_started', + 'user', (SELECT un.user_name FROM public.user_name un WHERE un.id = (SELECT cci.id FROM public.current_contributor_id cci)) + ) + ); + IF (EXISTS (SELECT cq.id FROM public.current_queue cq WHERE cq.position = 5)) THEN + -- I know it's ugly, just for the alert email + -- The JWT here is the public anon one, already embedded in the frontend + -- 5th in the queue get alerted + PERFORM net.http_post( + url := 'https://otfaamdxmgnkjqsosxye.supabase.co/functions/v1/ping', + headers := '{"Content-Type": "application/json", "Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Im90ZmFhbWR4bWdua2pxc29zeHllIiwicm9sZSI6ImFub24iLCJpYXQiOjE3MjEzMjA5NDMsImV4cCI6MjAzNjg5Njk0M30.q91NJPFFHKJXnbhbpUYwsB0NmimtD7pGPx6PkbB_A3w"}'::jsonb, + body := concat( + '{"email": "', + (SELECT u.email + FROM auth.users u + WHERE u.id = ( + SELECT cq.id + FROM public.current_queue cq + WHERE cq.position = 5 + )), + '", "secret":"', + (SELECT private.ping_secret()), + '"}' + )::jsonb + ); + END IF; + END IF; END IF; END $$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = ''; @@ -465,7 +490,7 @@ BEGIN PERFORM public.do_log( json_build_object( 'type', 'contribution_submitted', - 'user', (SELECT un.user_name FROM auth.users u WHERE u.id = (SELECT auth.uid())) + 'user', (SELECT un.user_name FROM public.user_name un WHERE un.id = queue_id) ) ); END @@ -577,7 +602,7 @@ CREATE POLICY view_all true ); -CREATE OR REPLACE FUNCTION do_log(message jsonb) RETURNS void AS $$ +CREATE OR REPLACE FUNCTION do_log(message json) RETURNS void AS $$ BEGIN INSERT INTO public.log(message) VALUES (message); END From 57111f0d0de0dd289df256f6038449cdb6ef1c6b Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Mon, 30 Sep 2024 11:36:29 +0200 Subject: [PATCH 37/52] feat(mpc): anon display name --- mpc/coordinator/database.sql | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 3855bd5f56..2ca80692d6 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -208,17 +208,22 @@ $$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = ''; -- Username CREATE OR REPLACE VIEW user_name AS - SELECT u.id, - COALESCE( - (SELECT c.display_name FROM public.code c WHERE c.user_id = u.id), - COALESCE( - u.raw_user_meta_data->>'user_name', - u.raw_user_meta_data->>'name' - ) - ) AS user_name FROM auth.users u; + SELECT u.id, ('anon_' || left(encode(sha256(u.id::text::bytea), 'hex'), 20)) AS user_name FROM auth.users u; ALTER VIEW user_name SET (security_invoker = off); +-- CREATE OR REPLACE VIEW user_name AS +-- SELECT u.id, +-- COALESCE( +-- (SELECT c.display_name FROM public.code c WHERE c.user_id = u.id), +-- COALESCE( +-- u.raw_user_meta_data->>'user_name', +-- u.raw_user_meta_data->>'name' +-- ) +-- ) AS user_name FROM auth.users u; + +-- ALTER VIEW user_name SET (security_invoker = off); + ------------------------- -- Contribution Status -- ------------------------- @@ -577,7 +582,7 @@ CREATE OR REPLACE VIEW current_user_state AS ( ((SELECT COUNT(*) FROM public.waitlist w WHERE w.id <> (SELECT auth.uid()) - AND w.seq < (SELECT ww.seq FROM public.waitlist ww WHERE w.id = (SELECT auth.uid())) + AND w.seq < (SELECT ww.seq FROM public.waitlist ww WHERE ww.id = (SELECT auth.uid())) ) + 1) AS waitlist_position ); From e4aa76acdd377785b730a771e95e2a8b4d148f41 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Fri, 4 Oct 2024 10:05:24 +0200 Subject: [PATCH 38/52] feat(mpc): index state files by contributor --- mpc/client/src/main.rs | 132 +++++++++++++++++++++++++---------------- 1 file changed, 81 insertions(+), 51 deletions(-) diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index 6645e9ae5b..17e805d1ba 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -47,10 +47,6 @@ use types::Status; const CONTRIBUTE_ENDPOINT: &str = "/contribute"; const SK_ENDPOINT: &str = "/secret_key"; -const ZKGM_DIR: &str = "zkgm"; -const CONTRIB_SK_PATH: &str = "zkgm/contrib_key.sk.asc"; -const SUCCESSFUL_PATH: &str = ".zkgm_successful"; - #[derive(PartialEq, Eq, Debug, Clone, Deserialize)] #[serde(rename_all = "camelCase")] struct Contribute { @@ -60,7 +56,7 @@ struct Contribute { api_key: String, contributor_id: String, payload_id: String, - user_email: Option, + user_email: String, } #[derive(thiserror::Error, Debug, Clone)] @@ -85,8 +81,42 @@ type BoxBody = http_body_util::combinators::BoxBody; type DynError = Box; -fn temp_file(payload_id: &str) -> String { - format!("{ZKGM_DIR}/{payload_id}") +fn successful_file(contributor_id: &str) -> String { + temp_file( + contributor_id, + &format!("{}.zkgm_successful", contributor_id), + ) +} + +fn temp_dir(contributor_id: &str) -> String { + format!("{contributor_id}.zkgm") +} + +fn temp_file(contributor_id: &str, file: &str) -> String { + let dir = temp_dir(contributor_id); + format!("{dir}/{file}") +} + +fn pgp_secret_file(email: &str) -> String { + format!("{email}.contrib_key.sk.asc") +} + +async fn is_already_successful(contributor_id: &str) -> bool { + tokio::fs::metadata(successful_file(contributor_id)) + .await + .is_ok() +} + +async fn create_temp_dir(contributor_id: &str) -> Result<(), DynError> { + if let Err(_) = tokio::fs::metadata(temp_dir(contributor_id)).await { + tokio::fs::create_dir(temp_dir(contributor_id)).await?; + } + Ok(()) +} + +async fn remove_temp_dir(contributor_id: &str) -> Result<(), DynError> { + tokio::fs::remove_dir_all(temp_dir(contributor_id)).await?; + Ok(()) } fn generate_pgp_key(email: String) -> SignedSecretKey { @@ -108,21 +138,6 @@ fn generate_pgp_key(email: String) -> SignedSecretKey { signed_secret_key } -async fn is_already_successful() -> bool { - tokio::fs::metadata(SUCCESSFUL_PATH).await.is_ok() -} - -async fn wait_successful(tx_status: Sender) { - loop { - if is_already_successful().await { - let _ = tokio::fs::remove_dir_all(ZKGM_DIR).await; - tx_status.send(Status::Successful).expect("impossible"); - tokio::time::sleep(tokio::time::Duration::from_millis(10000)).await; - break; - } - } -} - async fn contribute( tx_status: Sender, Contribute { @@ -132,15 +147,20 @@ async fn contribute( api_key, contributor_id, payload_id, + user_email, .. }: Contribute, ) -> Result<(), DynError> { - if is_already_successful().await { + create_temp_dir(&contributor_id).await?; + if is_already_successful(&contributor_id).await { + remove_temp_dir(&contributor_id).await?; + tx_status.send(Status::Successful).expect("impossible"); return Ok(()); } - let mut secret_key = if let Ok(_) = tokio::fs::metadata(CONTRIB_SK_PATH).await { + let pgp_secret_file = pgp_secret_file(&user_email); + let mut secret_key = if let Ok(_) = tokio::fs::metadata(&pgp_secret_file).await { SignedSecretKey::from_armor_single::<&[u8]>( - tokio::fs::read(CONTRIB_SK_PATH).await?.as_ref(), + tokio::fs::read(&pgp_secret_file).await?.as_ref(), ) .expect("impossible") .0 @@ -165,7 +185,7 @@ async fn contribute( let payload = client .download_payload( ¤t_payload.id, - &temp_file(¤t_payload.id), + &temp_file(&contributor_id, ¤t_payload.id), |percent| { let tx_status = tx_status.clone(); let current_payload_clone = current_payload.id.clone(); @@ -180,11 +200,12 @@ async fn contribute( tx_status .send(Status::DownloadEnded(current_payload.id.clone())) .expect("impossible"); - let phase2_contribution = if let Ok(true) = tokio::fs::metadata(temp_file(&payload_id)) - .await - .map(|meta| meta.size() as usize == CONTRIBUTION_SIZE) + let phase2_contribution = if let Ok(true) = + tokio::fs::metadata(temp_file(&contributor_id, &payload_id)) + .await + .map(|meta| meta.size() as usize == CONTRIBUTION_SIZE) { - tokio::fs::read(temp_file(&payload_id)).await? + tokio::fs::read(temp_file(&contributor_id, &payload_id)).await? } else { tx_status .send(Status::ContributionStarted) @@ -200,7 +221,11 @@ async fn contribute( tx_status .send(Status::ContributionEnded) .expect("impossible"); - tokio::fs::write(temp_file(&payload_id), &phase2_contribution).await?; + tokio::fs::write( + temp_file(&contributor_id, &payload_id), + &phase2_contribution, + ) + .await?; phase2_contribution }; @@ -234,7 +259,7 @@ async fn contribute( ) .await?; let pool = PoolBuilder::new() - .path(temp_file("state.sqlite3")) + .path(temp_file(&contributor_id, "state.sqlite3")) .flags( OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE @@ -383,6 +408,7 @@ async fn contribute( .expect("impossible"); } pool.close().await?; + tokio::fs::write(successful_file(&contributor_id), &[0xBE, 0xEF]).await?; Ok(()) } @@ -406,31 +432,33 @@ async fn handle( .body(body) .unwrap()) }; - let file_response = |status, body| { + let file_response = |status, body, name| { Ok(hyper::Response::builder() .header(hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") .header(hyper::header::CONTENT_TYPE, "application/octet-stream") .header( hyper::header::CONTENT_DISPOSITION, - format!("attachment; filename={CONTRIB_SK_PATH}"), + format!("attachment; filename={name}"), ) .status(status) .body(body) .unwrap()) }; let response_empty = |status| response(status, BoxBody::default()); - match (req.method(), req.uri().path()) { + let path = req.uri().path(); + match (req.method(), path) { (&Method::POST, SK_ENDPOINT) => { let whole_body = req.collect().await?.aggregate(); - let email = serde_json::from_reader(whole_body.reader())?; + let email = serde_json::from_reader::<_, String>(whole_body.reader())?; + let pgp_secret_file = pgp_secret_file(&email); let guard = latest_status.write().await; let result = { - if let Err(_) = tokio::fs::metadata(CONTRIB_SK_PATH).await { + if let Err(_) = tokio::fs::metadata(&pgp_secret_file).await { let secret_key = generate_pgp_key(email); let secret_key_serialized = secret_key .to_armored_bytes(ArmorOptions::default()) .expect("impossible"); - tokio::fs::write(CONTRIB_SK_PATH, &secret_key_serialized).await?; + tokio::fs::write(pgp_secret_file, &secret_key_serialized).await?; response_empty(hyper::StatusCode::CREATED) } else { response_empty(hyper::StatusCode::OK) @@ -439,12 +467,20 @@ async fn handle( drop(guard); result } - (&Method::GET, SK_ENDPOINT) => { - if let Err(_) = tokio::fs::metadata(CONTRIB_SK_PATH).await { - response_empty(hyper::StatusCode::NOT_FOUND) + (&Method::GET, _) if path.starts_with(SK_ENDPOINT) => { + if let Some(email) = path + .strip_prefix(SK_ENDPOINT) + .and_then(|x| x.strip_prefix("/")) + { + let pgp_secret_file = pgp_secret_file(email); + if let Err(_) = tokio::fs::metadata(&pgp_secret_file).await { + response_empty(hyper::StatusCode::NOT_FOUND) + } else { + let content = tokio::fs::read(&pgp_secret_file).await?; + file_response(hyper::StatusCode::OK, full(content), pgp_secret_file) + } } else { - let content = tokio::fs::read(CONTRIB_SK_PATH).await?; - file_response(hyper::StatusCode::OK, full(content)) + response_empty(hyper::StatusCode::NOT_FOUND) } } (&Method::POST, CONTRIBUTE_ENDPOINT) @@ -466,7 +502,7 @@ async fn handle( .await; match result { Ok(_) => { - let _ = tokio::fs::write(SUCCESSFUL_PATH, &[1u8]).await; + tx_status.send(Status::Successful).expect("impossible"); } Err(e) => { tx_status @@ -563,9 +599,6 @@ async fn input_and_status_handling( #[tokio::main] async fn main() -> Result<(), DynError> { - if let Err(_) = tokio::fs::metadata(ZKGM_DIR).await { - tokio::fs::create_dir(ZKGM_DIR).await?; - } let status = Arc::new(RwLock::new(Status::Idle)); let lock = Arc::new(AtomicBool::new(false)); let (tx_status, rx_status) = broadcast::channel(64); @@ -619,10 +652,7 @@ async fn main() -> Result<(), DynError> { }, )?; input_and_status_handling(status, rx_status, tx_ui).await; - tokio::select! { - _ = ui::run_ui(&mut terminal, rx_ui) => {} - _ = wait_successful(tx_status) => {} - } + ui::run_ui(&mut terminal, rx_ui).await?; terminal.clear()?; crossterm::terminal::disable_raw_mode()?; let _ = execute!(io::stdout(), Show); From 5df23d1a1a305eaee08d5669720116288bf1f4e1 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Fri, 4 Oct 2024 10:05:40 +0200 Subject: [PATCH 39/52] feat(mpc): allow rejoining the queue if slot expired --- mpc/coordinator/database.sql | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 2ca80692d6..72a7b18e92 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -152,6 +152,22 @@ ALTER TABLE code ENABLE ROW LEVEL SECURITY; ALTER TABLE code ADD FOREIGN KEY (user_id) REFERENCES auth.users(id); CREATE UNIQUE INDEX idx_code_user_id ON code(user_id); +CREATE OR REPLACE FUNCTION rejoin_queue() RETURNS void AS $$ +BEGIN + IF (NOT EXISTS (SELECT * FROM public.queue q WHERE q.id = (SELECT auth.uid()))) THEN + RAISE EXCEPTION 'not_in_queue'; + END IF; + IF (EXISTS (SELECT * FROM public.contribution_submitted cs WHERE cs.id = (SELECT auth.uid()))) THEN + RAISE EXCEPTION 'already_submitted'; + END IF; + IF (NOT EXISTS (SELECT * FROM public.contribution_status cs WHERE cs.id = (SELECT auth.uid()) AND cs.expire < now())) THEN + RAISE EXCEPTION 'not_expired'; + END IF; + UPDATE public.queue SET score = public.min_score() WHERE id = (SELECT auth.uid()); + DELETE FROM public.contribution_status cs WHERE id = (SELECT auth.uid()); +END +$$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = ''; + CREATE OR REPLACE FUNCTION redeem(code_id text) RETURNS void AS $$ DECLARE redeemed_code public.code%ROWTYPE := NULL; From bb3728fe628562f8fe6fd146de5ce10494becc79 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Mon, 7 Oct 2024 13:03:39 +0200 Subject: [PATCH 40/52] feat(mpc): add rls policy to wallet_address for users to edit their entry --- mpc/coordinator/database.sql | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 72a7b18e92..616d200da7 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -36,6 +36,14 @@ CREATE POLICY allow_insert_self (SELECT auth.uid()) = id ); +CREATE POLICY allow_edit_self + ON wallet_address + FOR UPDATE + TO authenticated + USING ( + (SELECT auth.uid()) = id + ); + CREATE TABLE waitlist( id uuid PRIMARY KEY, created_at timestamptz NOT NULL DEFAULT(now()), From 4295096d9738a7dccb3d70b0dc3b2ac43cb66947 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Tue, 8 Oct 2024 13:32:26 +0200 Subject: [PATCH 41/52] feat(mpc): properly exit on successful contribution --- mpc/client/src/main.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index 17e805d1ba..98dad518bc 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -107,6 +107,16 @@ async fn is_already_successful(contributor_id: &str) -> bool { .is_ok() } +async fn wait_successful(latest_status: Arc>) { + loop { + if *latest_status.read().await == Status::Successful { + tokio::time::sleep(tokio::time::Duration::from_millis(10000)).await; + break; + } + tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; + } +} + async fn create_temp_dir(contributor_id: &str) -> Result<(), DynError> { if let Err(_) = tokio::fs::metadata(temp_dir(contributor_id)).await { tokio::fs::create_dir(temp_dir(contributor_id)).await?; @@ -651,8 +661,11 @@ async fn main() -> Result<(), DynError> { viewport: Viewport::Inline(8), }, )?; - input_and_status_handling(status, rx_status, tx_ui).await; - ui::run_ui(&mut terminal, rx_ui).await?; + input_and_status_handling(status.clone(), rx_status, tx_ui).await; + tokio::select! { + _ = ui::run_ui(&mut terminal, rx_ui) => {} + _ = wait_successful(status) => {} + } terminal.clear()?; crossterm::terminal::disable_raw_mode()?; let _ = execute!(io::stdout(), Show); From 1720145239bdd5e2e147f3e4fca319850ef10ed2 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Thu, 10 Oct 2024 14:31:58 +0200 Subject: [PATCH 42/52] fix(mpc): ensure signature is wiped if we rejoin --- mpc/coordinator/database.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 616d200da7..02d4445bd5 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -172,6 +172,7 @@ BEGIN RAISE EXCEPTION 'not_expired'; END IF; UPDATE public.queue SET score = public.min_score() WHERE id = (SELECT auth.uid()); + DELETE FROM public.contribution_signature cs WHERE id = (SELECT auth.uid()); DELETE FROM public.contribution_status cs WHERE id = (SELECT auth.uid()); END $$ LANGUAGE plpgsql SECURITY DEFINER SET search_path = ''; From 718d3b811e1d1764e54890ac613f411bd4616c06 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Fri, 11 Oct 2024 18:09:29 +0200 Subject: [PATCH 43/52] fix(cli): wipe cache if payload cursor changed --- mpc/client/src/main.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mpc/client/src/main.rs b/mpc/client/src/main.rs index 98dad518bc..fce0aa4416 100644 --- a/mpc/client/src/main.rs +++ b/mpc/client/src/main.rs @@ -192,6 +192,13 @@ async fn contribute( tx_status .send(Status::DownloadStarted(current_payload.id.clone())) .expect("impossible"); + // If the current payload is not present, wipe all cached contribution + // files. This is needed because if a user rejoin the queue after having + // contributed using the previous cursor, it may have changed. + if let Err(_) = tokio::fs::metadata(&temp_file(&contributor_id, ¤t_payload.id)).await { + remove_temp_dir(&contributor_id).await?; + create_temp_dir(&contributor_id).await?; + } let payload = client .download_payload( ¤t_payload.id, From 1fa160eda2afc2aed528ad2958514f3e63a63d4f Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Tue, 29 Oct 2024 10:52:03 +0100 Subject: [PATCH 44/52] feat(mpc): add ping feature to rotate contrib if they don't ping within 10 mint in their slot --- mpc/coordinator/database.sql | 52 +++++++++++++++++++++++++++++++++--- 1 file changed, 48 insertions(+), 4 deletions(-) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 02d4445bd5..18e554fc9c 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -12,6 +12,29 @@ AND o.name <> '00000000-0000-0000-0000-000000000000'; -- Default bucket for contributions upload INSERT INTO storage.buckets(id, name, public) VALUES('contributions', 'contributions', false); +CREATE TABLE user_ping( + id uuid PRIMARY KEY +); + +ALTER TABLE user_ping ENABLE ROW LEVEL SECURITY; +ALTER TABLE user_ping ADD FOREIGN KEY (id) REFERENCES auth.users(id); + +CREATE POLICY view_self + ON user_ping + FOR SELECT + TO authenticated + USING ( + (SELECT auth.uid()) = id + ); + +CREATE POLICY allow_insert_self + ON user_ping + FOR INSERT + TO authenticated + WITH CHECK ( + (SELECT auth.uid()) = id + ); + CREATE TABLE wallet_address( id uuid PRIMARY KEY, wallet text NOT NULL @@ -369,11 +392,26 @@ CREATE OR REPLACE VIEW current_contributor_id AS SELECT qq.id FROM queue qq WHERE NOT EXISTS ( - SELECT c.id FROM contribution c WHERE c.id = qq.id + SELECT c.id + FROM contribution c + WHERE c.id = qq.id ) AND ( - EXISTS (SELECT cs.expire FROM contribution_status cs WHERE cs.id = qq.id AND cs.expire > now()) + EXISTS ( + SELECT cs.expire + FROM contribution_status cs + WHERE cs.id = qq.id + AND cs.expire > now() + AND ( + cs.started > (now() - INTERVAL '10 minutes') + OR + EXISTS (SELECT * FROM public.user_ping up WHERE up.id = qq.id)) + ) OR - EXISTS (SELECT cs.id FROM contribution_submitted cs WHERE cs.id = qq.id) + EXISTS ( + SELECT cs.id + FROM contribution_submitted cs + WHERE cs.id = qq.id + ) ) ORDER BY qq.score DESC LIMIT 1; @@ -490,8 +528,10 @@ CREATE POLICY allow_service_insert ); CREATE OR REPLACE FUNCTION can_download(name varchar) RETURNS BOOLEAN AS $$ +DECLARE + r BOOLEAN; BEGIN - RETURN ( + r := ( -- User must be the current contributor. (SELECT cci.id FROM public.current_contributor_id cci) = auth.uid() AND @@ -501,6 +541,10 @@ BEGIN -- Do not allow the user to interact with the file after its contribution has been submitted. NOT EXISTS (SELECT * FROM public.contribution_submitted cs WHERE cs.id = auth.uid()) ); + IF(r = true) THEN + INSERT INTO public.user_ping(id) VALUES (auth.uid()) ON CONFLICT DO NOTHING; + END IF; + RETURN r; END $$ LANGUAGE plpgsql SET search_path = ''; From d10b5bffd600e6757e0b358206f3758a0ddbc51b Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Wed, 30 Oct 2024 10:15:32 +0100 Subject: [PATCH 45/52] feat(mpc): open to public --- mpc/coordinator/database.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 18e554fc9c..af14ba41f2 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -90,7 +90,7 @@ CREATE POLICY allow_insert_self FOR INSERT TO authenticated WITH CHECK ( - (SELECT auth.uid()) = id AND open_to_public() = false + (SELECT auth.uid()) = id ); CREATE OR REPLACE FUNCTION waitlist_overwrite_timestamp() RETURNS TRIGGER AS $$ @@ -111,7 +111,7 @@ EXECUTE FUNCTION waitlist_overwrite_timestamp(); ----------- CREATE OR REPLACE FUNCTION open_to_public() RETURNS boolean AS $$ BEGIN - RETURN false; + RETURN true; END $$ LANGUAGE plpgsql SET search_path = ''; From e9c76d686b74ce6afac33776656d2282b608152b Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Wed, 30 Oct 2024 10:16:04 +0100 Subject: [PATCH 46/52] feat(mpc): materialize `current_queue` and better indexes on `log` --- mpc/coordinator/database.sql | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index af14ba41f2..08954e830c 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -136,7 +136,7 @@ CREATE POLICY view_all true ); -CREATE OR REPLACE VIEW current_queue AS +CREATE MATERIALIZED VIEW IF NOT EXISTS current_queue AS ( SELECT *, (SELECT COUNT(*) FROM queue qq WHERE @@ -146,10 +146,10 @@ CREATE OR REPLACE VIEW current_queue AS WHERE -- Contribution round not started NOT EXISTS (SELECT cs.id FROM contribution_status cs WHERE cs.id = q.id) - ORDER BY q.score DESC ); -ALTER VIEW current_queue SET (security_invoker = on); +CREATE UNIQUE INDEX idx_current_queue_id ON current_queue(id); +CREATE INDEX idx_current_queue_position ON current_queue(position); CREATE OR REPLACE FUNCTION min_score() RETURNS INTEGER AS $$ BEGIN @@ -459,6 +459,7 @@ BEGIN INSERT INTO public.contribution_status(id) SELECT cq.id FROM public.current_queue cq + ORDER BY cq.position ASC LIMIT 1; IF (EXISTS (SELECT cci.id FROM public.current_contributor_id cci)) THEN PERFORM public.do_log( @@ -668,6 +669,9 @@ CREATE TABLE log( ALTER TABLE log ENABLE ROW LEVEL SECURITY; +CREATE INDEX idx_log_created_at ON log(created_at); +CREATE INDEX idx_log_created_at_id ON log(created_at, id); + CREATE POLICY view_all ON log FOR SELECT @@ -717,7 +721,7 @@ CREATE UNIQUE INDEX idx_users_contribution_pkh ON users_contribution(public_key_ -- Will rotate the current contributor if the slot expired without any contribution submitted SELECT cron.schedule('update-contributor', '10 seconds', 'CALL set_next_contributor()'); - SELECT cron.schedule('update-users-contribution', '30 seconds', 'REFRESH MATERIALIZED VIEW CONCURRENTLY public.users_contribution'); +SELECT cron.schedule('update-current-queue', '30 seconds', 'REFRESH MATERIALIZED VIEW CONCURRENTLY public.current_queue'); COMMIT; From 50ee2d3ce0bf197bceea066b3c711186d0ba5736 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Thu, 21 Nov 2024 12:42:47 +0100 Subject: [PATCH 47/52] chore: cargo.lock & fmt --- Cargo.lock | 569 +++++++++++++++++++++++++++++++++++++++++++- galoisd/galoisd.nix | 171 +++++++------ mpc/README.md | 4 + mpc/mpc.nix | 60 +++-- 4 files changed, 683 insertions(+), 121 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5d789c853b..987ca63c22 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -890,7 +890,7 @@ dependencies = [ "thiserror", "tiny-keccak", "typenum", - "x25519-dalek", + "x25519-dalek 1.2.0", ] [[package]] @@ -1414,6 +1414,18 @@ version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +[[package]] +name = "argon2" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c3610892ee6e0cbce8ae2700349fcf8f98adb0dbfbee85aec3c9179d29cc072" +dependencies = [ + "base64ct", + "blake2", + "cpufeatures", + "password-hash 0.5.0", +] + [[package]] name = "arith" version = "0.2.3" @@ -2357,6 +2369,12 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" +[[package]] +name = "bitfield" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d7e60934ceec538daadb9d8432424ed043a904d8e0243f3c6446bce549a46ac" + [[package]] name = "bitflags" version = "1.3.2" @@ -2492,6 +2510,25 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" +[[package]] +name = "block-padding" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93" +dependencies = [ + "generic-array 0.14.7", +] + +[[package]] +name = "blowfish" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e412e2cd0f2b2d93e02543ceae7917b3c70331573df19ee046bcbc35e45e87d7" +dependencies = [ + "byteorder", + "cipher", +] + [[package]] name = "blst" version = "0.3.13" @@ -2572,6 +2609,25 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "bstr" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a68f1f47cdf0ec8ee4b941b2eee2a80cb796db73118c0dd09ac63fbe405be22" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "buffer-redux" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e8acf87c5b9f5897cd3ebb9a327f420e0cae9dd4e5c1d2e36f2c84c571a58f1" +dependencies = [ + "memchr", +] + [[package]] name = "bulletproofs" version = "4.0.0" @@ -2672,6 +2728,16 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4964518bd3b4a8190e832886cdc0da9794f12e8e6c1613a9e90ff331c4c8724b" +[[package]] +name = "camellia" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3264e2574e9ef2b53ce6f536dea83a69ac0bc600b762d1523ff83fe07230ce30" +dependencies = [ + "byteorder", + "cipher", +] + [[package]] name = "camino" version = "1.1.6" @@ -2720,12 +2786,36 @@ dependencies = [ "thiserror", ] +[[package]] +name = "cassowary" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53" + [[package]] name = "cast" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "cast5" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b07d673db1ccf000e90f54b819db9e75a8348d6eb056e9b8ab53231b7a9911" +dependencies = [ + "cipher", +] + +[[package]] +name = "castaway" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5" +dependencies = [ + "rustversion", +] + [[package]] name = "cc" version = "1.0.90" @@ -2742,6 +2832,15 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" +[[package]] +name = "cfb-mode" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "738b8d467867f80a71351933f70461f5b56f24d5c93e0cf216e59229c968d330" +dependencies = [ + "cipher", +] + [[package]] name = "cfg-expr" version = "0.15.7" @@ -2962,6 +3061,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "cmac" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8543454e3c3f5126effff9cd44d562af4e31fb8ce1cc0d3dcd8f084515dbc1aa" +dependencies = [ + "cipher", + "dbl", + "digest 0.10.7", +] + [[package]] name = "codespan" version = "0.11.1" @@ -3182,6 +3292,19 @@ dependencies = [ "unionlabs", ] +[[package]] +name = "compact_str" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f86b9c4c00838774a6d902ef931eff7470720c51d90c2e32cfe15dc304737b3f" +dependencies = [ + "castaway", + "cfg-if 1.0.0", + "itoa", + "ryu", + "static_assertions 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "concurrent-queue" version = "2.4.0" @@ -3531,6 +3654,12 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" +[[package]] +name = "crc24" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd121741cf3eb82c08dd3023eb55bf2665e5f60ec20f89760cf836ae4562e6a0" + [[package]] name = "crc32fast" version = "1.4.0" @@ -3655,6 +3784,31 @@ version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +[[package]] +name = "crossterm" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" +dependencies = [ + "bitflags 2.5.0", + "crossterm_winapi", + "libc", + "mio 0.8.11", + "parking_lot", + "signal-hook", + "signal-hook-mio", + "winapi 0.3.9", +] + +[[package]] +name = "crossterm_winapi" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "crunchy" version = "0.2.2" @@ -4217,6 +4371,15 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" +[[package]] +name = "dbl" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd2735a791158376708f9347fe8faba9667589d82427ef3aed6794a8981de3d9" +dependencies = [ + "generic-array 0.14.7", +] + [[package]] name = "der" version = "0.7.9" @@ -4270,6 +4433,37 @@ dependencies = [ "syn 2.0.77", ] +[[package]] +name = "derive_builder" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd33f37ee6a119146a1781d3356a7c26028f83d779b2e04ecd45fdc75c76877b" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7431fa049613920234f22c47fdc33e6cf3ee83067091ea4277a3f8c4587aae38" +dependencies = [ + "darling 0.20.8", + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4abae7035bf79b9877b779505d8cf3749285b80c43941eda66604841889451dc" +dependencies = [ + "derive_builder_core", + "syn 2.0.77", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -4304,6 +4498,15 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "des" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdd80ce8ce993de27e9f063a444a4d53ce8e8db4c1f00cc03af5ad5a9867a1e" +dependencies = [ + "cipher", +] + [[package]] name = "devnet-compose" version = "0.1.0" @@ -4454,6 +4657,22 @@ dependencies = [ "unionlabs", ] +[[package]] +name = "dsa" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48bc224a9084ad760195584ce5abb3c2c34a225fa312a128ad245a6b412b7689" +dependencies = [ + "digest 0.10.7", + "num-bigint-dig", + "num-traits", + "pkcs8", + "rfc6979", + "sha2 0.10.8", + "signature 2.2.0", + "zeroize", +] + [[package]] name = "dummy-ibc-app" version = "0.1.0" @@ -4480,6 +4699,19 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" +[[package]] +name = "eax" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9954fabd903b82b9d7a68f65f97dc96dd9ad368e40ccc907a7c19d53e6bfac28" +dependencies = [ + "aead", + "cipher", + "cmac", + "ctr", + "subtle 2.5.0", +] + [[package]] name = "ecdsa" version = "0.16.9" @@ -4618,6 +4850,7 @@ dependencies = [ "ff", "generic-array 0.14.7", "group", + "hkdf 0.12.4", "pem-rfc7468", "pkcs8", "rand_core 0.6.4", @@ -6688,6 +6921,15 @@ dependencies = [ "thiserror", ] +[[package]] +name = "idea" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "075557004419d7f2031b8bb7f44bb43e55a83ca7b63076a8fb8fe75753836477" +dependencies = [ + "cipher", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -6918,6 +7160,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "iter-read" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071ed4cc1afd86650602c7b11aa2e1ce30762a1c27193201cb5cee9c6ebb1294" + [[package]] name = "itertools" version = "0.10.5" @@ -7671,6 +7919,18 @@ dependencies = [ "adler", ] +[[package]] +name = "mio" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +dependencies = [ + "libc", + "log", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.48.0", +] + [[package]] name = "mio" version = "1.0.2" @@ -8193,6 +8453,66 @@ dependencies = [ "unionlabs", ] +[[package]] +name = "mpc-client" +version = "0.1.0" +dependencies = [ + "async-sqlite", + "base64 0.21.7", + "clap 4.5.4", + "crossterm", + "futures-util", + "hex", + "http-body-util", + "httpdate", + "hyper 1.3.1", + "hyper-util", + "mpc-shared", + "pgp", + "postgrest", + "rand 0.8.5", + "ratatui", + "reqwest 0.11.27", + "serde", + "serde_json", + "thiserror", + "throbber-widgets-tui", + "tokio", + "tokio-util", +] + +[[package]] +name = "mpc-coordinator" +version = "0.1.0" +dependencies = [ + "async-sqlite", + "clap 4.5.4", + "futures", + "hex", + "mpc-shared", + "pgp", + "postgrest", + "reqwest 0.11.27", + "serde_json", + "thiserror", + "tokio", + "tracing", + "tracing-subscriber 0.3.18", +] + +[[package]] +name = "mpc-shared" +version = "0.1.0" +dependencies = [ + "hex", + "postgrest", + "reqwest 0.11.27", + "serde", + "serde_json", + "thiserror", + "tokio", +] + [[package]] name = "multer" version = "3.1.0" @@ -8391,7 +8711,7 @@ dependencies = [ "proc-macro2", "quote", "strum 0.26.2", - "strum_macros 0.26.2", + "strum_macros 0.26.4", "syn 2.0.77", ] @@ -8408,7 +8728,7 @@ dependencies = [ "serde", "serde_json", "strum 0.26.2", - "strum_macros 0.26.2", + "strum_macros 0.26.4", "syn 2.0.77", ] @@ -8582,6 +8902,7 @@ dependencies = [ "num-iter", "num-traits", "rand 0.8.5", + "serde", "smallvec", "zeroize", ] @@ -8722,6 +9043,18 @@ dependencies = [ "memchr", ] +[[package]] +name = "ocb3" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c196e0276c471c843dd5777e7543a36a298a4be942a2a688d8111cd43390dedb" +dependencies = [ + "aead", + "cipher", + "ctr", + "subtle 2.5.0", +] + [[package]] name = "once_cell" version = "1.19.0" @@ -8877,6 +9210,32 @@ dependencies = [ "sha2 0.10.8", ] +[[package]] +name = "p384" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70786f51bcc69f6a4c0360e063a4cac5419ef7c5cd5b3c99ad70f3be5ba79209" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2 0.10.8", +] + +[[package]] +name = "p521" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc9e2161f1f215afdfce23677034ae137bbd45016a880c2eb3ba8eb95f085b2" +dependencies = [ + "base16ct", + "ecdsa", + "elliptic-curve", + "primeorder", + "rand_core 0.6.4", + "sha2 0.10.8", +] + [[package]] name = "pairing" version = "0.23.0" @@ -9005,6 +9364,17 @@ dependencies = [ "subtle 2.5.0", ] +[[package]] +name = "password-hash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" +dependencies = [ + "base64ct", + "rand_core 0.6.4", + "subtle 2.5.0", +] + [[package]] name = "pasta_curves" version = "0.5.1" @@ -9076,7 +9446,7 @@ checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ "digest 0.10.7", "hmac 0.12.1", - "password-hash", + "password-hash 0.4.2", "sha2 0.10.8", ] @@ -9224,6 +9594,70 @@ dependencies = [ "voyager-vm", ] +[[package]] +name = "pgp" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a6c842436d5fa2b59eac1e9b3d142b50bfff99c1744c816b1f4c2ac55a20754" +dependencies = [ + "aes", + "aes-gcm", + "argon2", + "base64 0.22.1", + "bitfield", + "block-padding 0.3.3", + "blowfish", + "bstr", + "buffer-redux", + "byteorder", + "camellia", + "cast5", + "cfb-mode", + "chrono", + "cipher", + "const-oid", + "crc24", + "curve25519-dalek 4.1.3", + "derive_builder", + "des", + "digest 0.10.7", + "dsa", + "eax", + "ecdsa", + "ed25519-dalek 2.1.1", + "elliptic-curve", + "flate2", + "generic-array 0.14.7", + "hex", + "hkdf 0.12.4", + "idea", + "iter-read", + "k256", + "log", + "md-5", + "nom", + "num-bigint-dig", + "num-traits", + "num_enum", + "ocb3", + "p256", + "p384", + "p521", + "rand 0.8.5", + "ripemd", + "rsa", + "sha1", + "sha1-checked", + "sha2 0.10.8", + "sha3 0.10.8", + "signature 2.2.0", + "smallvec", + "thiserror", + "twofish", + "x25519-dalek 2.0.1", + "zeroize", +] + [[package]] name = "pharos" version = "0.5.3" @@ -9540,6 +9974,15 @@ dependencies = [ "serde_json", ] +[[package]] +name = "postgrest" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a966c650b47a064e7082170b4be74fca08c088d893244fc4b70123e3c1f3ee7" +dependencies = [ + "reqwest 0.11.27", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -10130,6 +10573,27 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "ratatui" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d16546c5b5962abf8ce6e2881e722b4e0ae3b6f1a08a26ae3573c55853ca68d3" +dependencies = [ + "bitflags 2.5.0", + "cassowary", + "compact_str", + "crossterm", + "itertools 0.13.0", + "lru 0.12.5", + "paste", + "stability", + "strum 0.26.2", + "strum_macros 0.26.4", + "unicode-segmentation", + "unicode-truncate", + "unicode-width", +] + [[package]] name = "raw-cpuid" version = "11.1.0" @@ -11360,6 +11824,16 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha1-checked" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89f599ac0c323ebb1c6082821a54962b839832b03984598375bff3975b804423" +dependencies = [ + "digest 0.10.7", + "sha1", +] + [[package]] name = "sha2" version = "0.8.2" @@ -11437,6 +11911,27 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "signal-hook" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" +dependencies = [ + "libc", + "signal-hook-registry", +] + +[[package]] +name = "signal-hook-mio" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" +dependencies = [ + "libc", + "mio 0.8.11", + "signal-hook", +] + [[package]] name = "signal-hook-registry" version = "1.4.1" @@ -11864,6 +12359,16 @@ dependencies = [ "unionlabs", ] +[[package]] +name = "stability" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d904e7009df136af5297832a3ace3370cd14ff1546a232f4f185036c2736fcac" +dependencies = [ + "quote", + "syn 2.0.77", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -11977,7 +12482,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" dependencies = [ - "strum_macros 0.26.2", + "strum_macros 0.26.4", ] [[package]] @@ -12008,11 +12513,11 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", "rustversion", @@ -12459,6 +12964,16 @@ dependencies = [ "num_cpus", ] +[[package]] +name = "throbber-widgets-tui" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "685391f5e78b08989c1014f5a0edddf6751e0726b6a8dd1bdcc98d05921b19b6" +dependencies = [ + "rand 0.8.5", + "ratatui", +] + [[package]] name = "tidy" version = "0.1.0" @@ -12586,7 +13101,7 @@ dependencies = [ "backtrace", "bytes", "libc", - "mio", + "mio 1.0.2", "parking_lot", "pin-project-lite", "signal-hook-registry", @@ -13158,6 +13673,15 @@ dependencies = [ "utf-8", ] +[[package]] +name = "twofish" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a78e83a30223c757c3947cd144a31014ff04298d8719ae10d03c31c0448c8013" +dependencies = [ + "cipher", +] + [[package]] name = "twox-hash" version = "1.6.3" @@ -13359,11 +13883,22 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +[[package]] +name = "unicode-truncate" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3644627a5af5fa321c95b9b235a72fd24cd29c648c2c379431e6628655627bf" +dependencies = [ + "itertools 0.13.0", + "unicode-segmentation", + "unicode-width", +] + [[package]] name = "unicode-width" -version = "0.1.11" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-xid" @@ -14848,6 +15383,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "x25519-dalek" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" +dependencies = [ + "curve25519-dalek 4.1.3", + "rand_core 0.6.4", + "serde", + "zeroize", +] + [[package]] name = "yaml-rust" version = "0.4.5" diff --git a/galoisd/galoisd.nix b/galoisd/galoisd.nix index bd2ff9300f..e72e53e51e 100644 --- a/galoisd/galoisd.nix +++ b/galoisd/galoisd.nix @@ -12,6 +12,61 @@ }: { packages = { + galoisd = goPkgs.pkgsStatic.build123GoModule ( + { + name = "galoisd"; + src = ./.; + vendorHash = "sha256-wZSsLqnNi38rZL2oJ+GpnMWuo/5ydTJ80ebHQ/SXtis="; + meta = { + mainProgram = "galoisd"; + }; + tags = [ "binary" ]; + doCheck = true; + } + // ( + if pkgs.stdenv.isLinux then + { + CGO_ENABLED = 0; + ldflags = [ + "-extldflags '-static -L${pkgs.musl}/lib -s -w'" + ]; + } + else + { } + ) + ); + + galoisd-library = goPkgs.pkgsStatic.build123GoModule ( + { + name = "libgalois"; + src = ./.; + vendorHash = "sha256-wZSsLqnNi38rZL2oJ+GpnMWuo/5ydTJ80ebHQ/SXtis="; + tags = [ "library" ]; + } + // ( + if pkgs.stdenv.isLinux then + { + nativeBuildInputs = [ + goPkgs.musl + goPkgs.pkgsStatic.binutils + ]; + doCheck = false; + CGO_ENABLED = 1; + GOBIN = "${placeholder "out"}/lib"; + postInstall = '' + mv $out/lib/galoisd $out/lib/libgalois.a + ''; + ldflags = [ + "-s" + "-w" + "-buildmode c-archive" + ]; + } + else + { } + ) + ); + galoisd-image = pkgs.dockerTools.buildImage { name = "${self'.packages.galoisd.name}-image"; copyToRoot = pkgs.buildEnv { @@ -52,97 +107,33 @@ } ); - galoisd = goPkgs.pkgsStatic.build123GoModule ({ - name = "galoisd"; - src = ./.; - vendorHash = "sha256-wZSsLqnNi38rZL2oJ+GpnMWuo/5ydTJ80ebHQ/SXtis="; - meta = { mainProgram = "galoisd"; }; - tags = [ "binary" ]; - doCheck = true; - } // (if pkgs.stdenv.isLinux then { - CGO_ENABLED = 0; - ldflags = [ - "-extldflags '-static -L${pkgs.musl}/lib -s -w'" - ]; - } else - { })); - - galoisd-library = goPkgs.pkgsStatic.build123GoModule ({ - name = "libgalois"; - src = ./.; - vendorHash = "sha256-wZSsLqnNi38rZL2oJ+GpnMWuo/5ydTJ80ebHQ/SXtis="; - tags = [ "library" ]; - } // (if pkgs.stdenv.isLinux then { - nativeBuildInputs = [ goPkgs.musl goPkgs.pkgsStatic.binutils ]; - doCheck = false; - CGO_ENABLED = 1; - GOBIN = "${placeholder "out"}/lib"; - postInstall = '' - mv $out/lib/galoisd $out/lib/libgalois.a - ''; - ldflags = [ - "-s" - "-w" - "-buildmode c-archive" - ]; - } else - { })); - - - galoisd-image = pkgs.dockerTools.buildImage { - name = "${self'.packages.galoisd.name}-image"; - copyToRoot = pkgs.buildEnv { - name = "image-root"; - paths = [ pkgs.coreutils-full pkgs.cacert ]; - pathsToLink = [ "/bin" ]; - }; - config = { - Entrypoint = [ (pkgs.lib.getExe self'.packages.galoisd) ]; - Env = [ "SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt" ]; - }; - }; - - generate-prover-proto = mkCi false (pkgs.writeShellApplication { - name = "generate-prover-proto"; - runtimeInputs = - [ pkgs.protobuf pkgs.protoc-gen-go pkgs.protoc-gen-go-grpc ]; - text = '' - find ${proto.galoisd} -type f -regex ".*proto" |\ - while read -r file; do - echo "Generating $file" - protoc \ - -I"${proto.cometbls}/proto" \ - -I"${proto.gogoproto}" \ - -I"${proto.galoisd}" \ - --go_out=./grpc --go_opt=paths=source_relative \ - --go-grpc_out=./grpc --go-grpc_opt=paths=source_relative \ - "$file" - done - ''; - }); - - download-circuit = - let - files = pkgs.writeText "files.txt" '' - /circuit.zip - ''; - in - mkCi false (pkgs.writeShellApplication { - name = "download-circuit"; - runtimeInputs = [ pkgs.rclone pkgs.zip pkgs.unzip ]; - text = '' - if [[ "$#" -ne 1 ]]; then - echo "Invalid arguments, must be: download-circuit [path]" - exit 1 - fi - rclone --progress --no-traverse --http-url "https://circuit.cryptware.io" copy :http:/ "$1" --files-from=${files} - unzip "$1"/circuit.zip - rm "$1"/circuit.zip - ''; - }); + download-circuit = + let + files = pkgs.writeText "files.txt" '' + /circuit.zip + ''; + in + mkCi false ( + pkgs.writeShellApplication { + name = "download-circuit"; + runtimeInputs = [ + pkgs.rclone + pkgs.zip + pkgs.unzip + ]; + text = '' + if [[ "$#" -ne 1 ]]; then + echo "Invalid arguments, must be: download-circuit [path]" + exit 1 + fi + rclone --progress --no-traverse --http-url "https://circuit.cryptware.io" copy :http:/ "$1" --files-from=${files} + unzip "$1"/circuit.zip + rm "$1"/circuit.zip + ''; + } + ); - download-circuit-devnet = - pkgs.writeShellApplication { + download-circuit-devnet = pkgs.writeShellApplication { name = "download-circuit-devnet"; runtimeInputs = [ pkgs.coreutils diff --git a/mpc/README.md b/mpc/README.md index e4c6af1371..f7e5ee609c 100644 --- a/mpc/README.md +++ b/mpc/README.md @@ -2,6 +2,7 @@ This project contains the client and coordinator to conduct Groth16 multi-party computation for the circuit SRS. Three components are in play: + - Supabase : host the state machine in postgresql and exposes api and storage services to upload contributions. - Coordinator: contact Supabase and verify contribution to step the state machine. - Client: pure function that accepts the current contributor id and generate then upload a contribution payload. @@ -17,6 +18,7 @@ The coordinator is in charge of verifying contributions. When a contribution is ## Client Exposes an API to contribute at `localhost:4919`: + - `OPTIONS /contribute` - `POST /contribute` a `Contribute` object in body. Returns : - a `202 Accepted` if the contribution started. @@ -28,6 +30,7 @@ Exposes an API to contribute at `localhost:4919`: ### Structures #### Contribute + ```json { "supabase_project": "", @@ -40,6 +43,7 @@ Exposes an API to contribute at `localhost:4919`: ``` #### Status + ```rust #[serde(rename_all = "camelCase")] pub enum Status { diff --git a/mpc/mpc.nix b/mpc/mpc.nix index 91e8c31d2d..b360b77251 100644 --- a/mpc/mpc.nix +++ b/mpc/mpc.nix @@ -1,31 +1,51 @@ -{ self, ... }: { - perSystem = { self', pkgs, crane, ... }: +{ self, ... }: +{ + perSystem = + { + self', + pkgs, + crane, + ... + }: let attrs = { rustflags = "-L${self'.packages.galoisd-library}/lib"; }; - mpc-client = crane.buildWorkspaceMember (attrs // { - crateDirFromRoot = "mpc/client"; - }); - mpc-coordinator = crane.buildWorkspaceMember (attrs // { - crateDirFromRoot = "mpc/coordinator"; - }); + mpc-client = crane.buildWorkspaceMember ( + attrs + // { + crateDirFromRoot = "mpc/client"; + } + ); + mpc-coordinator = crane.buildWorkspaceMember ( + attrs + // { + crateDirFromRoot = "mpc/coordinator"; + } + ); in { - packages = mpc-coordinator.packages // mpc-client.packages // { - mpc-client-image = pkgs.dockerTools.buildImage { - name = "${self'.packages.mpc-client.name}-image"; - copyToRoot = pkgs.buildEnv { - name = "image-root"; - paths = [ pkgs.coreutils-full pkgs.cacert pkgs.ncurses ]; - pathsToLink = [ "/bin" ]; - }; - config = { - Entrypoint = [ (pkgs.lib.getExe self'.packages.mpc-client) ]; - Env = [ "SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt" ]; + packages = + mpc-coordinator.packages + // mpc-client.packages + // { + mpc-client-image = pkgs.dockerTools.buildImage { + name = "${self'.packages.mpc-client.name}-image"; + copyToRoot = pkgs.buildEnv { + name = "image-root"; + paths = [ + pkgs.coreutils-full + pkgs.cacert + pkgs.ncurses + ]; + pathsToLink = [ "/bin" ]; + }; + config = { + Entrypoint = [ (pkgs.lib.getExe self'.packages.mpc-client) ]; + Env = [ "SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt" ]; + }; }; }; - }; checks = mpc-coordinator.checks // mpc-client.checks; }; } From 054782a0fdf2eab9bf5ed93b363dcaae5972ae8f Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Thu, 21 Nov 2024 14:16:05 +0100 Subject: [PATCH 48/52] fix(galoisd): vendoring and upstream changes --- galoisd/galoisd.nix | 14 +- galoisd/go.mod | 1 + galoisd/go.sum | 10 +- .../lightclient/nonadjacent/circuit_test.go | 369 +----------------- 4 files changed, 23 insertions(+), 371 deletions(-) diff --git a/galoisd/galoisd.nix b/galoisd/galoisd.nix index e72e53e51e..f0023ee977 100644 --- a/galoisd/galoisd.nix +++ b/galoisd/galoisd.nix @@ -12,11 +12,11 @@ }: { packages = { - galoisd = goPkgs.pkgsStatic.build123GoModule ( + galoisd = goPkgs.pkgsStatic.buildGo123Module ( { name = "galoisd"; src = ./.; - vendorHash = "sha256-wZSsLqnNi38rZL2oJ+GpnMWuo/5ydTJ80ebHQ/SXtis="; + vendorHash = "sha256-lGqoOkJnTvCdIonLwDDqz9ozDDJwB4wyJXlCgvt4arE="; meta = { mainProgram = "galoisd"; }; @@ -26,9 +26,10 @@ // ( if pkgs.stdenv.isLinux then { - CGO_ENABLED = 0; + CGO_ENABLED = 1; ldflags = [ - "-extldflags '-static -L${pkgs.musl}/lib -s -w'" + "-linkmode external" + "-extldflags '-z noexecstack -static -L${goPkgs.musl}/lib -s -w'" ]; } else @@ -36,12 +37,13 @@ ) ); - galoisd-library = goPkgs.pkgsStatic.build123GoModule ( + galoisd-library = goPkgs.pkgsStatic.buildGo123Module ( { name = "libgalois"; src = ./.; - vendorHash = "sha256-wZSsLqnNi38rZL2oJ+GpnMWuo/5ydTJ80ebHQ/SXtis="; + vendorHash = "sha256-lGqoOkJnTvCdIonLwDDqz9ozDDJwB4wyJXlCgvt4arE="; tags = [ "library" ]; + doCheck = false; } // ( if pkgs.stdenv.isLinux then diff --git a/galoisd/go.mod b/galoisd/go.mod index 8029b4b9e9..b68b0346bd 100644 --- a/galoisd/go.mod +++ b/galoisd/go.mod @@ -94,6 +94,7 @@ require ( github.com/prometheus/common v0.59.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/ronanh/intcomp v1.1.0 // indirect github.com/sasha-s/go-deadlock v0.3.5 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/supranational/blst v0.3.13 // indirect diff --git a/galoisd/go.sum b/galoisd/go.sum index 13cea13912..1d6444dfbc 100644 --- a/galoisd/go.sum +++ b/galoisd/go.sum @@ -90,8 +90,6 @@ github.com/cometbft/cometbft-db v1.0.1 h1:SylKuLseMLQKw3+i8y8KozZyJcQSL98qEe2CGM github.com/cometbft/cometbft-db v1.0.1/go.mod h1:EBrFs1GDRiTqrWXYi4v90Awf/gcdD5ExzdPbg4X8+mk= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= -github.com/consensys/gnark v0.9.2-0.20240312175655-ce0186ef32c1 h1:HQLV1mfE1lGAbJqHqv0rpPZyWv9ieoBvHZ6lJWfXDTQ= -github.com/consensys/gnark v0.9.2-0.20240312175655-ce0186ef32c1/go.mod h1:0dnRvl8EDbPsSZsIg8xOP1Au8cf43xOlT7/BhwMV98g= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= @@ -383,6 +381,8 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqn github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/ronanh/intcomp v1.1.0 h1:i54kxmpmSoOZFcWPMWryuakN0vLxLswASsGa07zkvLU= +github.com/ronanh/intcomp v1.1.0/go.mod h1:7FOLy3P3Zj3er/kVrU/pl+Ql7JFZj7bwliMGketo0IU= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= @@ -438,8 +438,10 @@ github.com/unionlabs/cometbls/api v0.0.0-20241021101406-df0586cc2041 h1:UkvGSPE4 github.com/unionlabs/cometbls/api v0.0.0-20241021101406-df0586cc2041/go.mod h1:NDFKiBBD8HJC6QQLAoUI99YhsiRZtg2+FJWfk6A6m6o= github.com/unionlabs/cosmos-sdk v0.0.0-20241018173625-c2982236c557 h1:fUFvszqpOwv7sGU4feBqC8nOImEFrO7gba/Jo14NvYs= github.com/unionlabs/cosmos-sdk v0.0.0-20241018173625-c2982236c557/go.mod h1:RkgOx8ysunO3teUnkvxOGDsaIZX2BW6uU2LCeUqsU9A= -github.com/unionlabs/gnark-crypto v0.0.0-20240112093739-635c1b6963c6 h1:wRt6Yt29bWvwCSeRmRJ/Wm1sRev1GjJGXn4MzSrMbv4= -github.com/unionlabs/gnark-crypto v0.0.0-20240112093739-635c1b6963c6/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o= +github.com/unionlabs/gnark v0.0.0-20240723153903-9d859afe4c14 h1:qI5Bjy9cLI62v5ZOb+1YThbuUHoS9Jd8yJtBqo7Vqzo= +github.com/unionlabs/gnark v0.0.0-20240723153903-9d859afe4c14/go.mod h1:S+QS+G9ZclYU8cukF+fi8+CoWIXy/HUcmcIkc8gj4Q8= +github.com/unionlabs/gnark-crypto v0.0.0-20240720201413-c0383b2a80e9 h1:23VxTNlW0gIcUOW/WKMF7kZITEOictR+hi54g6wkRHs= +github.com/unionlabs/gnark-crypto v0.0.0-20240720201413-c0383b2a80e9/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/galoisd/pkg/lightclient/nonadjacent/circuit_test.go b/galoisd/pkg/lightclient/nonadjacent/circuit_test.go index 2e16e4da83..d48ddb8a64 100644 --- a/galoisd/pkg/lightclient/nonadjacent/circuit_test.go +++ b/galoisd/pkg/lightclient/nonadjacent/circuit_test.go @@ -1,25 +1,27 @@ package nonadjacent import ( - "cosmossdk.io/math" "crypto/sha256" "encoding/hex" "fmt" "galois/pkg/lightclient" + "math/big" + "math/rand" + "time" + + tmtypes "github.com/cometbft/cometbft/api/cometbft/types/v1" + version "github.com/cometbft/cometbft/api/cometbft/version/v1" cometbn254 "github.com/cometbft/cometbft/crypto/bn254" ce "github.com/cometbft/cometbft/crypto/encoding" "github.com/cometbft/cometbft/crypto/merkle" - tmtypes "github.com/cometbft/cometbft/proto/tendermint/types" - "github.com/cometbft/cometbft/proto/tendermint/version" "github.com/cometbft/cometbft/types" comettypes "github.com/cometbft/cometbft/types" + "github.com/consensys/gnark-crypto/ecc" curve "github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254/fr" - "github.com/consensys/gnark/backend/witness" - bn254 "github.com/consensys/gnark/constraint/bn254" + "github.com/consensys/gnark/frontend" - "github.com/consensys/gnark/frontend/cs/r1cs" gadget "github.com/consensys/gnark/std/algebra/emulated/sw_bn254" "github.com/consensys/gnark/test" @@ -625,358 +627,3 @@ func TestCantSelectPaddedValidator(t *testing.T) { func TestCantSelectPaddedPower(t *testing.T) { AttackFailing(t, AttackSelectPaddedPower) } - -func buildCircuit(t *testing.T) (*bn254.R1CS, *witness.Witness, error) { - r := rand.New(rand.NewSource(0xCAFEBABE)) - - nbOfValidators := 1 + r.Uint32()%lightclient.MaxVal - - privKeys := make([]cometbn254.PrivKey, nbOfValidators) - validators := make([]*tmtypes.SimpleValidator, nbOfValidators) - totalPower := int64(0) - for i := 0; i < len(validators); i++ { - privKeys[i] = cometbn254.GenPrivKey() - val, err := toValidator(privKeys[i].PubKey().Bytes(), 100000000+r.Int63n(100000000)) - if err != nil { - t.Fatal(err) - } - totalPower += val.VotingPower - validators[i] = val - } - - trustedValidators := validators - untrustedValidators := validators - - trustedValidatorsInput, trustedValidatorsRoot, err := marshalValidators(trustedValidators) - if err != nil { - t.Fatal(err) - } - - untrustedValidatorsInput, untrustedValidatorsRoot, err := marshalValidators(untrustedValidators) - if err != nil { - t.Fatal(err) - } - - header, vote, cometblsHeader, cometblsVote := getBlockHeader(r, trustedValidatorsRoot, untrustedValidatorsRoot) - - signedBytes := comettypes.VoteSignBytes(cometblsHeader.ChainID, cometblsVote) - - var signatures [][]byte - var bitmap big.Int - votingPower := 0 - - for true { - if votingPower > int(totalPower)/3*2+1 { - break - } - index := uint32(rand.Int31n(int32(nbOfValidators) - 1)) - i := index - for bitmap.Bit(int(i)) == 1 { - i = (i + 1) % nbOfValidators - } - votingPower += int(validators[i].VotingPower) - bitmap.SetBit(&bitmap, int(i), 1) - sig, err := privKeys[i].Sign(signedBytes) - if err != nil { - t.Fatal(err) - } - signatures = append(signatures, sig) - } - - trustedSignatures := signatures - untrustedSignatures := signatures - - trustedAggregatedSignature, err := aggregateSignatures(trustedSignatures) - if err != nil { - t.Fatal(err) - } - - untrustedAggregatedSignature, err := aggregateSignatures(untrustedSignatures) - if err != nil { - t.Fatal(err) - } - - trustedBitmap := bitmap - untrustedBitmap := bitmap - - trustedInput := TendermintNonAdjacentLightClientInput{ - Sig: gadget.NewG2Affine(trustedAggregatedSignature), - Validators: trustedValidatorsInput, - NbOfVal: nbOfValidators, - NbOfSignature: len(trustedSignatures), - Bitmap: trustedBitmap, - } - - untrustedInput := TendermintNonAdjacentLightClientInput{ - Sig: gadget.NewG2Affine(untrustedAggregatedSignature), - Validators: untrustedValidatorsInput, - NbOfVal: nbOfValidators, - NbOfSignature: len(untrustedSignatures), - Bitmap: untrustedBitmap, - } - - circuit := Circuit{ - DomainSeparationTag: []byte(cometbn254.CometblsSigDST), - TrustedInput: trustedInput, - TrustedValRoot: trustedValidatorsRoot, - UntrustedInput: untrustedInput, - Vote: *vote, - Header: *header, - InputsHash: inputsHash(cometblsHeader), - } - - err = test.IsSolved( - &Circuit{}, - &circuit, - ecc.BN254.ScalarField(), - ) - assert.NoError(t, err) - - witness, err := frontend.NewWitness(&circuit, ecc.BN254.ScalarField()) - if err != nil { - return nil, nil, err - } - - r1cs, err := frontend.Compile(ecc.BN254.ScalarField(), r1cs.NewBuilder, &circuit) - if err != nil { - return nil, nil, err - } - - return r1cs.(*bn254.R1CS), &witness, err -} - -// func TestNonAdjacentSetup(t *testing.T) { -// const ( -// nContributionsPhase1 = 1 -// nContributionsPhase2 = 1 -// power = 22 -// ) - -// assert := require.New(t) - -// t.Log("InitPhase1") -// start := time.Now() -// var srs1 mpc.Phase1 -// err := readFrom("final.ph1", &srs1) -// assert.NoError(err) -// t.Logf("InitPhase1: %v", time.Since(start)) - -// t.Log("Building circuit...") -// start = time.Now() -// r1cs, witness, err := buildCircuit(t) -// assert.NoError(err) -// t.Logf("Built in: %v", time.Since(start)) - -// t.Log("InitPhase2") -// start = time.Now() -// var srs2 mpc.Phase2 -// err = readFrom("init.ph2", &srs2) -// assert.NoError(err) -// var evals mpc.Phase2Evaluations -// err = readFrom("evals.ph2", &evals) -// assert.NoError(err) -// t.Logf("InitPhase2: %v", time.Since(start)) - -// err = saveTo("init.ph2", &srs2) -// assert.NoError(err) -// err = saveTo("evals.ph2", &evals) -// assert.NoError(err) - -// err = saveTo("contrib.ph2", &srs2) -// assert.NoError(err) - -// // Make and verify contributions for phase1 -// for i := 0; i < nContributionsPhase2; i++ { -// // we clone for test purposes; but in practice, participant will receive a []byte, deserialize it, -// // add his contribution and send back to coordinator. -// t.Log("ContributePhase2") -// assert.NoError(err) -// start = time.Now() -// srs2.Contribute() -// t.Logf("ContributePhase2: %v", time.Since(start)) -// t.Log("VerifyPhase2") -// var prev mpc.Phase2 -// start = time.Now() -// err = readFrom("contrib.ph2", &prev) -// t.Logf("VerifyPhase2Read: %v", time.Since(start)) -// start = time.Now() -// assert.NoError(mpc.VerifyPhase2(&prev, &srs2)) -// err = saveTo("contrib.ph2", &srs2) -// assert.NoError(err) -// t.Logf("VerifyPhase2: %v", time.Since(start)) -// } - -// t.Log("ExtractKeys") -// // Extract the proving and verifying keys -// pk, vk := mpc.ExtractKeys(r1cs, &srs1, &srs2, &evals) - -// t.Log("Save") -// err = saveTo("r1cs.bin", r1cs) -// assert.NoError(err) -// err = saveTo("pk.bin", &pk) -// assert.NoError(err) -// err = saveTo("vk.bin", &vk) -// assert.NoError(err) - -// t.Log("Prove...") -// proof, err := groth16.Prove(r1cs, &pk, *witness) -// assert.NoError(err) - -// pubWitness, err := (*witness).Public() -// assert.NoError(err) - -// t.Log("Verify...") -// err = groth16.Verify(proof, &vk, pubWitness) -// assert.NoError(err) -// } - -// func readFrom(file string, obj io.ReaderFrom) error { -// f, err := os.OpenFile(file, os.O_RDONLY, os.ModePerm) -// if err != nil { -// return err -// } -// defer f.Close() -// obj.ReadFrom(f) -// return nil -// } - -// func saveTo(file string, x io.WriterTo) error { -// f, err := os.Create(file) -// if err != nil { -// return err -// } -// defer f.Close() -// w := bufio.NewWriter(f) -// _, err = x.WriteTo(w) -// if err != nil { -// return err -// } -// w.Flush() -// return nil -// } - -// func clonePhase1(phase1 *mpc.Phase1) mpc.Phase1 { -// r := mpc.Phase1{} -// r.Parameters.G1.Tau = append(r.Parameters.G1.Tau, phase1.Parameters.G1.Tau...) -// r.Parameters.G1.AlphaTau = append(r.Parameters.G1.AlphaTau, phase1.Parameters.G1.AlphaTau...) -// r.Parameters.G1.BetaTau = append(r.Parameters.G1.BetaTau, phase1.Parameters.G1.BetaTau...) - -// r.Parameters.G2.Tau = append(r.Parameters.G2.Tau, phase1.Parameters.G2.Tau...) -// r.Parameters.G2.Beta = phase1.Parameters.G2.Beta - -// r.PublicKeys = phase1.PublicKeys -// r.Hash = append(r.Hash, phase1.Hash...) - -// return r -// } - -// func clonePhase2(phase2 *mpc.Phase2) mpc.Phase2 { -// r := mpc.Phase2{} -// r.Parameters.G1.BasisExpSigma = make([][]curve.G1Affine, len(r.Parameters.G1.BasisExpSigma)) -// for i := 0; i < len(r.Parameters.G1.BasisExpSigma); i++ { -// r.Parameters.G1.BasisExpSigma[i] = append( -// r.Parameters.G1.BasisExpSigma[i], -// phase2.Parameters.G1.BasisExpSigma[i]..., -// ) -// } -// r.Parameters.G1.Delta = phase2.Parameters.G1.Delta -// r.Parameters.G1.L = append(r.Parameters.G1.L, phase2.Parameters.G1.L...) -// r.Parameters.G1.Z = append(r.Parameters.G1.Z, phase2.Parameters.G1.Z...) -// r.Parameters.G2.Delta = phase2.Parameters.G2.Delta -// r.Parameters.G2.GRootSigmaNeg = phase2.Parameters.G2.GRootSigmaNeg -// r.PublicKey = phase2.PublicKey -// r.Hash = append(r.Hash, phase2.Hash...) - -// return r -// } - -// func convertPtauToPhase1(ptau Ptau) (phase1 mpc.Phase1, err error) { -// tauG1 := make([]curve.G1Affine, len(ptau.PTauPubKey.TauG1)) -// for i, g1 := range ptau.PTauPubKey.TauG1 { -// g1Affine := curve.G1Affine{} -// x := bytesToElement(g1[0].Bytes()) -// g1Affine.X = x -// y := bytesToElement(g1[1].Bytes()) -// g1Affine.Y = y -// if !g1Affine.IsOnCurve() { -// fmt.Printf("tauG1: \n index: %v g1Affine.X: %v \n g1Affine.Y: %v \n", i, g1Affine.X.String(), g1Affine.Y.String()) -// panic("g1Affine is not on curve") -// } -// tauG1[i] = g1Affine -// } - -// alphaTauG1 := make([]curve.G1Affine, len(ptau.PTauPubKey.AlphaTauG1)) -// for i, g1 := range ptau.PTauPubKey.AlphaTauG1 { -// g1Affine := curve.G1Affine{} -// x := bytesToElement(g1[0].Bytes()) -// g1Affine.X = x -// y := bytesToElement(g1[1].Bytes()) -// g1Affine.Y = y -// if !g1Affine.IsOnCurve() { -// fmt.Printf("alphaTauG1: \n index: %v g1Affine.X: %v \n g1Affine.Y: %v \n", i, g1Affine.X.String(), g1Affine.Y.String()) -// panic("g1Affine is not on curve") -// } -// alphaTauG1[i] = g1Affine -// } -// // fmt.Printf("alphaTauG1: %v \n", alphaTauG1) - -// betaTauG1 := make([]curve.G1Affine, len(ptau.PTauPubKey.BetaTauG1)) - -// for i, g1 := range ptau.PTauPubKey.BetaTauG1 { -// g1Affine := curve.G1Affine{} -// x := bytesToElement(g1[0].Bytes()) -// g1Affine.X = x -// y := bytesToElement(g1[1].Bytes()) -// g1Affine.Y = y -// if !g1Affine.IsOnCurve() { -// fmt.Printf("betaTauG1: \n index: %v, g1Affine.X: %v \n g1Affine.Y: %v \n", i, g1Affine.X.String(), g1Affine.Y.String()) -// panic("g1Affine is not on curve") -// } -// betaTauG1[i] = g1Affine -// } -// tauG2 := make([]curve.G2Affine, len(ptau.PTauPubKey.TauG2)) -// for i, g2 := range ptau.PTauPubKey.TauG2 { -// g2Affine := curve.G2Affine{} -// x0 := bytesToElement(g2[0].Bytes()) -// x1 := bytesToElement(g2[1].Bytes()) -// g2Affine.X.A0 = x0 -// g2Affine.X.A1 = x1 -// y0 := bytesToElement(g2[2].Bytes()) -// y1 := bytesToElement(g2[3].Bytes()) -// g2Affine.Y.A0 = y0 -// g2Affine.Y.A1 = y1 -// if !g2Affine.IsOnCurve() { -// fmt.Printf("tauG2: \n index: %v, g2Affine.X.A0: %v \n g2Affine.X.A1: %v \n g2Affine.Y.A0: %v \n g2Affine.Y.A1 %v \n", i, g2Affine.X.A0.String(), g2Affine.X.A1.String(), g2Affine.Y.A0.String(), g2Affine.Y.A1.String()) -// panic("g2Affine is not on curve") -// } -// tauG2[i] = g2Affine -// } - -// betaG2 := curve.G2Affine{} -// { -// g2 := ptau.PTauPubKey.BetaG2 - -// x0 := bytesToElement(g2[0].Bytes()) -// x1 := bytesToElement(g2[1].Bytes()) -// betaG2.X.A0 = x0 -// betaG2.X.A1 = x1 -// y0 := bytesToElement(g2[2].Bytes()) -// y1 := bytesToElement(g2[3].Bytes()) -// betaG2.Y.A0 = y0 -// betaG2.Y.A1 = y1 - -// if !betaG2.IsOnCurve() { -// fmt.Printf("g2Affine.X.A0: %v \n g2Affine.X.A1: %v \n g2Affine.Y.A0: %v \n g2Affine.Y.A1 %v \n", betaG2.X.A0.String(), betaG2.X.String(), betaG2.Y.A0.String(), betaG2.Y.A1.String()) -// panic("g2Affine is not on curve") -// } -// } - -// phase1.Parameters.G1.Tau = tauG1 -// phase1.Parameters.G1.AlphaTau = alphaTauG1 -// phase1.Parameters.G1.BetaTau = betaTauG1 - -// phase1.Parameters.G2.Tau = tauG2 -// phase1.Parameters.G2.Beta = betaG2 - -// return phase1, nil -// } From 5374b3e4447a82d420d31cd771227cedd516c063 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Thu, 21 Nov 2024 14:39:11 +0100 Subject: [PATCH 49/52] feat(mpc): add ping edge function --- mpc/edge/supabase/.gitignore | 4 ++ mpc/edge/supabase/functions/ping/index.ts | 49 +++++++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 mpc/edge/supabase/.gitignore create mode 100644 mpc/edge/supabase/functions/ping/index.ts diff --git a/mpc/edge/supabase/.gitignore b/mpc/edge/supabase/.gitignore new file mode 100644 index 0000000000..a3ad88055b --- /dev/null +++ b/mpc/edge/supabase/.gitignore @@ -0,0 +1,4 @@ +# Supabase +.branches +.temp +.env diff --git a/mpc/edge/supabase/functions/ping/index.ts b/mpc/edge/supabase/functions/ping/index.ts new file mode 100644 index 0000000000..3f077e7ac4 --- /dev/null +++ b/mpc/edge/supabase/functions/ping/index.ts @@ -0,0 +1,49 @@ +const resendSecret = Deno.env.get("RESEND_SECRET") +const resendApiKey = Deno.env.get("RESEND_API_KEY") + +const handler = async (request: Request): Promise => { + const { secret, email } = await request.json() + if (secret !== resendSecret) { + return new Response(JSON.stringify("too bad"), { + status: 403, + headers: { + "Content-Type": "application/json" + } + }) + } + const res = await fetch("https://api.resend.com/emails", { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${resendApiKey}` + }, + body: JSON.stringify({ + from: "Union Ceremony ", + to: [email], + reply_to: "ceremony@union.build", + subject: "Your Turn Is Almost Here - Log Into Union Ceremony", + html: ` +

+ Your contribution slot for the Union Trusted Setup Ceremony is almost here. +

+

+ Your place in queue: 5
+ Estimated time until your slot: between 1 hour and 5 hours +

+

+ Please go to ceremony.union.build, log in, and follow all steps on the page.
+ If you do not follow all steps by the time your contribution slot arrives, you will lose your slot. +

+ ` + }) + }) + const data = await res.json() + return new Response(JSON.stringify(data), { + status: 200, + headers: { + "Content-Type": "application/json" + } + }) +} + +Deno.serve(handler) From fc775c24f73be1fabee9558da79f1c774a0fa4a3 Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Thu, 21 Nov 2024 14:39:30 +0100 Subject: [PATCH 50/52] chore: typos and fmt --- mpc/README.md | 6 +++--- mpc/client/Cargo.toml | 2 +- mpc/coordinator/Cargo.toml | 2 +- mpc/coordinator/database.sql | 10 +++++----- mpc/mpc.nix | 4 +--- typos.toml | 1 + 6 files changed, 12 insertions(+), 13 deletions(-) diff --git a/mpc/README.md b/mpc/README.md index f7e5ee609c..b07aa95410 100644 --- a/mpc/README.md +++ b/mpc/README.md @@ -3,7 +3,7 @@ This project contains the client and coordinator to conduct Groth16 multi-party computation for the circuit SRS. Three components are in play: -- Supabase : host the state machine in postgresql and exposes api and storage services to upload contributions. +- Supabase: host the state machine in postgresql and exposes api and storage services to upload contributions. - Coordinator: contact Supabase and verify contribution to step the state machine. - Client: pure function that accepts the current contributor id and generate then upload a contribution payload. @@ -20,10 +20,10 @@ The coordinator is in charge of verifying contributions. When a contribution is Exposes an API to contribute at `localhost:4919`: - `OPTIONS /contribute` -- `POST /contribute` a `Contribute` object in body. Returns : +- `POST /contribute` a `Contribute` object in body. Returns: - a `202 Accepted` if the contribution started. - a `503 Unavailable` if the client is busy (likely already contributing). -- `GET /contribute` returns : +- `GET /contribute` returns: - a `200 Ok` if everything is ok with the body containing an encoded `Status` representing the client status (idle, contributing etc...). - a `500 InternalServerError` if the contribution failed unexpectedly, the body contains the error message. diff --git a/mpc/client/Cargo.toml b/mpc/client/Cargo.toml index b5c07cbc19..5bd69927b4 100644 --- a/mpc/client/Cargo.toml +++ b/mpc/client/Cargo.toml @@ -6,7 +6,7 @@ version = "0.1.0" [dependencies] async-sqlite = "0.2.2" base64 = { workspace = true } -clap = { version = "4.5", features = ["derive"] } +clap = { workspace = true, features = ["derive"] } crossterm = "0.27.0" futures-util = "0.3" hex = { workspace = true } diff --git a/mpc/coordinator/Cargo.toml b/mpc/coordinator/Cargo.toml index acbfa65c2b..08734c65a1 100644 --- a/mpc/coordinator/Cargo.toml +++ b/mpc/coordinator/Cargo.toml @@ -5,7 +5,7 @@ version = "0.1.0" [dependencies] async-sqlite = "0.2.2" -clap = { version = "4.5", features = ["derive"] } +clap = { workspace = true, features = ["derive"] } futures.workspace = true hex = { workspace = true } mpc-shared = { workspace = true } diff --git a/mpc/coordinator/database.sql b/mpc/coordinator/database.sql index 08954e830c..7638ca7627 100644 --- a/mpc/coordinator/database.sql +++ b/mpc/coordinator/database.sql @@ -3,11 +3,11 @@ BEGIN; ----------- -- Erase -- ----------- -TRUNCATE TABLE public.log; -TRUNCATE TABLE auth.users CASCADE; -DELETE FROM storage.objects o -WHERE o.bucket_id = 'contributions' -AND o.name <> '00000000-0000-0000-0000-000000000000'; +-- TRUNCATE TABLE public.log; +-- TRUNCATE TABLE auth.users CASCADE; +-- DELETE FROM storage.objects o +-- WHERE o.bucket_id = 'contributions' +-- AND o.name <> '00000000-0000-0000-0000-000000000000'; -- Default bucket for contributions upload INSERT INTO storage.buckets(id, name, public) VALUES('contributions', 'contributions', false); diff --git a/mpc/mpc.nix b/mpc/mpc.nix index b360b77251..c1f3802246 100644 --- a/mpc/mpc.nix +++ b/mpc/mpc.nix @@ -1,5 +1,4 @@ -{ self, ... }: -{ +_: { perSystem = { self', @@ -46,6 +45,5 @@ }; }; }; - checks = mpc-coordinator.checks // mpc-client.checks; }; } diff --git a/typos.toml b/typos.toml index 0493c094ea..4564a7efb5 100644 --- a/typos.toml +++ b/typos.toml @@ -1141,6 +1141,7 @@ files.extend-exclude = [ "rabby" = "rabby" "randao" = "randao" "rapide" = "rapide" +"ratatui" = "ratatui" "ratelimit" = "ratelimit" "ratelimited" = "ratelimited" "rawfile" = "rawfile" From eb62b71bc60668da0e602eaa3d6aceec183fb5ca Mon Sep 17 00:00:00 2001 From: Hussein Ait Lahcen Date: Thu, 21 Nov 2024 17:45:40 +0100 Subject: [PATCH 51/52] feat(galois): checkpoint vk/pk from mpc --- evm/contracts/clients/CometblsClient.sol | 2 +- evm/contracts/clients/Verifier.sol | 85 +++---- evm/tests/src/Verifier.t.sol | 72 ++++++ lib/cometbls-groth16-verifier/src/lib.rs | 128 ++-------- .../verifying_key.bin | Bin 524 -> 524 bytes lib/gnark-key-parser/src/lib.rs | 233 ++++++++++++++++++ 6 files changed, 359 insertions(+), 161 deletions(-) create mode 100644 evm/tests/src/Verifier.t.sol diff --git a/evm/contracts/clients/CometblsClient.sol b/evm/contracts/clients/CometblsClient.sol index d25038e635..d058943c09 100644 --- a/evm/contracts/clients/CometblsClient.sol +++ b/evm/contracts/clients/CometblsClient.sol @@ -531,7 +531,7 @@ contract CometblsClient is // Drop the most significant byte to fit in F_r bytes32 inputsHash = sha256( abi.encodePacked( - bytes32(chainId), + bytes32(uint256(uint248(chainId))), bytes32(uint256(header.height)), bytes32(uint256(header.secs)), bytes32(uint256(header.nanos)), diff --git a/evm/contracts/clients/Verifier.sol b/evm/contracts/clients/Verifier.sol index fcf8ebf26c..067a6b7c09 100644 --- a/evm/contracts/clients/Verifier.sol +++ b/evm/contracts/clients/Verifier.sol @@ -42,74 +42,63 @@ library Verifier { uint256 constant EXP_SQRT_FP = 0xC19139CB84C680A6E14116DA060561765E05AA45A1C72A34F082305B61F3F52; // (P + 1) / 4; - // Groth16 alpha point in G1 + // Verifying key uint256 constant ALPHA_X = - 4252850302693242182654534639730627324742305503909561446344356971523664816281; + 0x245229d9b076b3c0e8a4d70bde8c1cccffa08a9fae7557b165b3b0dbd653e2c7; uint256 constant ALPHA_Y = - 3971530409048238023625806606514600982127202826003358538821613170737831313919; - - // Groth16 beta point in G2 in powers of i + 0x253ec85988dbb84e46e94b5efa3373b47a000b4ac6c86b2d4b798d274a182302; uint256 constant BETA_NEG_X_0 = - 9609903744775525881338738176064678545439912439219033822736570321349357348980; + 0x2424bcc1f60a5472685fd50705b2809626e170120acaf441e133a2bd5e61d244; uint256 constant BETA_NEG_X_1 = - 11402125448377072234752634956069960846261435348550776006069399216352815312229; + 0x07090a82e8fabbd39299be24705b92cf208ee8b3487f6f2b39ff27978a29a1db; uint256 constant BETA_NEG_Y_0 = - 18012228678282290194170129154972180638950912669850573130308339510071981008545; + 0x04ddc8d30d5c438ca34091c5d2c6ded571382cba2b3c4fdc4222df2938b4e51e; uint256 constant BETA_NEG_Y_1 = - 15756550515454626729445647420198526257176992371703002957323861385095544414838; - - // Groth16 gamma point in G2 in powers of i + 0x25833b15e156ae01f2741f4f4120ddb466c52eb83a959f79eb99b23caa7fbf1d; uint256 constant GAMMA_NEG_X_0 = - 15418804173338388766896385877623893969695670309009587476846726795628238714393; + 0x1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed; uint256 constant GAMMA_NEG_X_1 = - 14882897597913405382982164467298010752166363844685258881581520272046793702095; + 0x198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2; uint256 constant GAMMA_NEG_Y_0 = - 17722217720691050164784298688157009907556422267906762591449788940639280738106; + 0x1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d; uint256 constant GAMMA_NEG_Y_1 = - 21681514378991397271958143575996358636110810782474567203218670880519258244465; - - // Groth16 delta point in G2 in powers of i + 0x275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec; uint256 constant DELTA_NEG_X_0 = - 2636161939055419322743684458857549714230849256995406138405588958157843793131; + 0x02aca5d2a73f8d34e4b26eee3932365e6526c8d5e2f3347d679c2cb1867104dc; uint256 constant DELTA_NEG_X_1 = - 18711435617866698040659011365354165232283248284733617156044102129651710736892; + 0x07b8dbefa90bde075a26318e5066db729155514e3c06b888d4e03c56d82c97e6; uint256 constant DELTA_NEG_Y_0 = - 2647887006311232967132848950859794223811860619760715975180654346594734512903; + 0x1696ccafaefe49a5d8bad8e79630e19b25e5392a203aff0042d0216f254806f5; uint256 constant DELTA_NEG_Y_1 = - 9638871602237154557801043117594638698760262947775166324439744310655148732994; - - // Constant and public input points + 0x2edb19cbb2b6ad0c98fdd7d1845500c26e497dc35e4cdc1cb02cc65dc4ba1bf2; uint256 constant CONSTANT_X = - 17683074019270049519594214298171697666582975915064153618004061598086681825921; + 0x2f5d8a3817f21d3e453573c90c3cc47b7ff235fad7bdfbd59bbd6ae5d153273e; uint256 constant CONSTANT_Y = - 16826145467743906176166100307225491106961753217491843100452871479833450456070; + 0x147fa22142b1fd86ce75fc87230a0feac8765d02938784dcfc828d17d7e7c432; uint256 constant PUB_0_X = - 4999724750322169039879775285047941133298355297928988655266615607529011563466; + 0x2a81b98e1c997bd01a20893a08a46c6804493e838c1a0ff6c8c069ef5ab66b9a; uint256 constant PUB_0_Y = - 8614448667589143428827059805500251818303043966026074735628377626634208993292; + 0x276938ada8075cec20d4d6a1f157ec94cc7ba6207c98576e98c1ad9d6378fb6f; uint256 constant PUB_1_X = - 1184807858330365651919114999096473332175166887333719856514157833289677967559; + 0x179496ce140df89ce35c5ee7fb496efdffda5e5d3b95ff9116e2e5df96b36ab7; uint256 constant PUB_1_Y = - 20327610427697660249999185524229068956160879388632193295649998184224119517657; - - // Commitment key + 0x0326e7d44688ce5903676b7d646e46a5938c8e5fd8cd54e4d5aa3300649f3cfc; uint256 constant PEDERSEN_G_X_0 = - 0x257DF6F8132CB0037F7DFDF1A29B04C1FF92BA082EDA513996BA2BFA9FBD1987; + 0x1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed; uint256 constant PEDERSEN_G_X_1 = - 0x13F0D8D8879885CA567EF99298C30C397E6FBA584658F4127713A814C06DE55A; + 0x198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2; uint256 constant PEDERSEN_G_Y_0 = - 0x1660EBCC60C7A3AC560EFCEA5993F528EE13685D3A39694ACD74FE67C80D798A; + 0x12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa; uint256 constant PEDERSEN_G_Y_1 = - 0x15E80642C58DB4DBE0A87F92CE3C65E962F231278353783A691FD64078BA7F34; - + 0x090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b; uint256 constant PEDERSEN_G_ROOT_SIGMA_NEG_X_0 = - 0x2FBFE141A7555CF7E3E86B092660B81CFB68A025AD817E45CEC0B0F2E2CA6368; + 0x02aca5d2a73f8d34e4b26eee3932365e6526c8d5e2f3347d679c2cb1867104dc; uint256 constant PEDERSEN_G_ROOT_SIGMA_NEG_X_1 = - 0x02A104DF1C015F2307FA2859627098CDF9FDB521D61D323943343A12304E5BAF; + 0x07b8dbefa90bde075a26318e5066db729155514e3c06b888d4e03c56d82c97e6; uint256 constant PEDERSEN_G_ROOT_SIGMA_NEG_Y_0 = - 0x27DA3F93ECF3BFD0B3A3354AE2162A6C230C0E539B6D9F82C0826E2B006A5922; + 0x1696ccafaefe49a5d8bad8e79630e19b25e5392a203aff0042d0216f254806f5; uint256 constant PEDERSEN_G_ROOT_SIGMA_NEG_Y_1 = - 0x2C0838551CB9E5CF67DB57DE7E2250BB97807F6687F135A6EB910359BA7BDB8D; + 0x2edb19cbb2b6ad0c98fdd7d1845500c26e497dc35e4cdc1cb02cc65dc4ba1bf2; /// Compute the public input linear combination. /// @notice Reverts with PublicInputNotInField if the input is not in the field. @@ -174,15 +163,15 @@ library Verifier { assembly ("memory-safe") { let f := mload(0x40) calldatacopy(f, proofCommitment, 0x40) - mstore(add(f, 0x40), PEDERSEN_G_X_0) - mstore(add(f, 0x60), PEDERSEN_G_X_1) - mstore(add(f, 0x80), PEDERSEN_G_Y_0) - mstore(add(f, 0xA0), PEDERSEN_G_Y_1) + mstore(add(f, 0x40), PEDERSEN_G_X_1) + mstore(add(f, 0x60), PEDERSEN_G_X_0) + mstore(add(f, 0x80), PEDERSEN_G_Y_1) + mstore(add(f, 0xA0), PEDERSEN_G_Y_0) calldatacopy(add(f, 0xC0), proofCommitmentPOK, 0x40) - mstore(add(f, 0x100), PEDERSEN_G_ROOT_SIGMA_NEG_X_0) - mstore(add(f, 0x120), PEDERSEN_G_ROOT_SIGMA_NEG_X_1) - mstore(add(f, 0x140), PEDERSEN_G_ROOT_SIGMA_NEG_Y_0) - mstore(add(f, 0x160), PEDERSEN_G_ROOT_SIGMA_NEG_Y_1) + mstore(add(f, 0x100), PEDERSEN_G_ROOT_SIGMA_NEG_X_1) + mstore(add(f, 0x120), PEDERSEN_G_ROOT_SIGMA_NEG_X_0) + mstore(add(f, 0x140), PEDERSEN_G_ROOT_SIGMA_NEG_Y_1) + mstore(add(f, 0x160), PEDERSEN_G_ROOT_SIGMA_NEG_Y_0) success := staticcall(gas(), PRECOMPILE_VERIFY, f, 0x180, f, 0x20) success := and(success, mload(f)) } diff --git a/evm/tests/src/Verifier.t.sol b/evm/tests/src/Verifier.t.sol new file mode 100644 index 0000000000..5b8d2aa273 --- /dev/null +++ b/evm/tests/src/Verifier.t.sol @@ -0,0 +1,72 @@ +pragma solidity ^0.8.27; + +import "forge-std/Test.sol"; + +import "../../contracts/clients/Verifier.sol"; +import { + CometblsClient, + SignedHeader +} from "../../contracts/clients/CometblsClient.sol"; + +contract VerifierProxy { + CometblsClient client; + + constructor() { + client = new CometblsClient(); + } + + function verifyZKP( + bytes calldata zkp, + bytes31 chainId, + bytes32 trustedValidatorsHash, + SignedHeader calldata header + ) public returns (bool) { + return client.verifyZKP(zkp, chainId, trustedValidatorsHash, header); + } +} + +contract VerifierTests is Test { + VerifierProxy proxy; + + function setUp() public { + proxy = new VerifierProxy(); + } + + // {"version":{"block":11},"chain_id":"union-devnet-1337","height":3405691582,"time":"2024-11-21T17:07:31.998131342+01:00","last_block_id":{"hash":"009B31782C017EDEED99404DBC37EFC7B7B3689C8EF777E53E7A27DBE2C41DD6","parts":{"total":1,"hash":"F0146290F522303FF76B81FF839E2E2A09CA16F233AB6B5496A6DF5F9819BA45"}},"last_commit_hash":"6917693413C4013690A0D2A033EB27066F34D391239F601E0F47FEEC7B055595","data_hash":"256A8F28318D65FE2193C68D834329310186F05D2289FFE03D3AB22F6A84D170","validators_hash":"20DDFE7A0F75C65D876316091ECCD494A54A2BB324C872015F73E528D53CB9C4","next_validators_hash":"20DDFE7A0F75C65D876316091ECCD494A54A2BB324C872015F73E528D53CB9C4","consensus_hash":"7D07D08BD42C08956B2DB813CCC357FC919ABD89401878C9A5AA4F132E1829EE","app_hash":"EE7E3E58F98AC95D63CE93B270981DF3EE54CA367F8D521ED1F444717595CD36","last_results_hash":"4D266CFC3FA42C3F3DB4C5B105F54CEBCBBC3A5BFDB6AC25BA9CB032EC9A7BDB","evidence_hash":"40D6137CE7FDA8009295029D1D43EEC56F35F01597C0B257A087BB32074DB626","proposer_address":"853EFB9CB0F1E4D82E0D61683C4569C8D52CBF785A571E9DEA232D3E449FCF21"} + + function test_verifyZKP_ok() public { + assertTrue( + proxy.verifyZKP( + hex"03CF56142A1E03D2445A82100FEAF70C1CD95A731ED85792AFFF5792EC0BDD2108991BB56F9043A269F88903DE616A9AB99A3C5AB778E566744B060456C5616C06BCE7F1930421768C2CBD79F88D08EC3A52D7C9A867064E973064385E9C945E02951190DD7CE1662546733DD540188C96E608CA750FEF36B39E2577833634C70AE6F1A6D00DC6C21446AAF285EF35D944E8782B131300574F9A889C7E708A2325E9A78013BBE869D38B19C602DAF69644C77D177E99ED76398BCEE13C61FDBF2E178A5BA028A36033E54D1D9A0071E82E04079A5305347EBAC6D66F6EBFA48B1DA1BF9DC5A51EFA292E1DC7B85D26F18422EB386C48CA75434039764448BB96268DDC2CF683DDCA4BD83DF21C5631CF784375EEBE77EABC2DE77886BF1D48392C9C52E063B4A7131EAB9ABBA12A9F26888BC37366D41AC7D4BAC0BF6755ACB009BF9F36F380B6D0EEAABF066503A1B6E01DCC965D968D7694E01B1755E6BDD21C7A80B41682748F9B7151714BE34AA79AAD48BBB2A84525F6CDF812658C6E4F", + bytes31(uint248(uint136(bytes17("union-devnet-1337")))), + 0x20DDFE7A0F75C65D876316091ECCD494A54A2BB324C872015F73E528D53CB9C4, + SignedHeader({ + height: 3405691582, + secs: 1732205251, + nanos: 998131342, + validatorsHash: hex"20DDFE7A0F75C65D876316091ECCD494A54A2BB324C872015F73E528D53CB9C4", + nextValidatorsHash: hex"20DDFE7A0F75C65D876316091ECCD494A54A2BB324C872015F73E528D53CB9C4", + appHash: hex"EE7E3E58F98AC95D63CE93B270981DF3EE54CA367F8D521ED1F444717595CD36" + }) + ) + ); + } + + function test_verifyZKP_tamperedBlock() public { + assertFalse( + proxy.verifyZKP( + hex"03CF56142A1E03D2445A82100FEAF70C1CD95A731ED85792AFFF5792EC0BDD2108991BB56F9043A269F88903DE616A9AB99A3C5AB778E566744B060456C5616C06BCE7F1930421768C2CBD79F88D08EC3A52D7C9A867064E973064385E9C945E02951190DD7CE1662546733DD540188C96E608CA750FEF36B39E2577833634C70AE6F1A6D00DC6C21446AAF285EF35D944E8782B131300574F9A889C7E708A2325E9A78013BBE869D38B19C602DAF69644C77D177E99ED76398BCEE13C61FDBF2E178A5BA028A36033E54D1D9A0071E82E04079A5305347EBAC6D66F6EBFA48B1DA1BF9DC5A51EFA292E1DC7B85D26F18422EB386C48CA75434039764448BB96268DDC2CF683DDCA4BD83DF21C5631CF784375EEBE77EABC2DE77886BF1D48392C9C52E063B4A7131EAB9ABBA12A9F26888BC37366D41AC7D4BAC0BF6755ACB009BF9F36F380B6D0EEAABF066503A1B6E01DCC965D968D7694E01B1755E6BDD21C7A80B41682748F9B7151714BE34AA79AAD48BBB2A84525F6CDF812658C6E4F", + bytes31(uint248(uint136(bytes17("union-devnet-1337")))), + 0x20DDFE7A0F75C65D876316091ECCD494A54A2BB324C872015F73E528D53CB9C4, + SignedHeader({ + height: 3405691581, + secs: 1732205251, + nanos: 998131342, + validatorsHash: hex"20DDFE7A0F75C65D876316091ECCD494A54A2BB324C872015F73E528D53CB9C4", + nextValidatorsHash: hex"20DDFE7A0F75C65D876316091ECCD494A54A2BB324C872015F73E528D53CB9C4", + appHash: hex"EE7E3E58F98AC95D63CE93B270981DF3EE54CA367F8D521ED1F444717595CD36" + }) + ) + ); + } +} diff --git a/lib/cometbls-groth16-verifier/src/lib.rs b/lib/cometbls-groth16-verifier/src/lib.rs index bf1f69e616..928f7b59f1 100644 --- a/lib/cometbls-groth16-verifier/src/lib.rs +++ b/lib/cometbls-groth16-verifier/src/lib.rs @@ -287,8 +287,6 @@ fn verify_generic_zkp_2( #[cfg(test)] mod tests { - use core::str::FromStr; - use unionlabs::google::protobuf::timestamp::Timestamp; use super::*; @@ -298,18 +296,18 @@ mod tests { assert_eq!( verify_zkp( &ChainId::from_string("union-devnet-1337").unwrap(), - hex!("1B7EA0F1B3E574F8D50A12827CCEA43CFF858C2716AE05370CC40AE8EC521FD8").into(), + hex!("20DDFE7A0F75C65D876316091ECCD494A54A2BB324C872015F73E528D53CB9C4").into(), &LightHeader { height: 3405691582.try_into().unwrap(), time: Timestamp { - seconds: 1710783278.try_into().unwrap(), - nanos: 499600406.try_into().unwrap() + seconds: 1732205251.try_into().unwrap(), + nanos: 998131342.try_into().unwrap() }, - validators_hash: hex!("1B7EA0F1B3E574F8D50A12827CCEA43CFF858C2716AE05370CC40AE8EC521FD8").into(), - next_validators_hash: hex!("1B7EA0F1B3E574F8D50A12827CCEA43CFF858C2716AE05370CC40AE8EC521FD8").into(), - app_hash: hex!("3A34FC963EEFAAE9B7C0D3DFF89180D91F3E31073E654F732340CEEDD77DD25B").into(), + validators_hash: hex!("20DDFE7A0F75C65D876316091ECCD494A54A2BB324C872015F73E528D53CB9C4").into(), + next_validators_hash: hex!("20DDFE7A0F75C65D876316091ECCD494A54A2BB324C872015F73E528D53CB9C4").into(), + app_hash: hex!("EE7E3E58F98AC95D63CE93B270981DF3EE54CA367F8D521ED1F444717595CD36").into(), }, - hex!("294A48A750D5C2CF926516752FF484EEBE55FF26CF8A8A7536D98794CF062DB6214D0C9E5C6B164111927A1630889619DBBB40149D8E2D32898E7ACB765542CD0EB8A8E04CCC254C3BFDC2FCE627D59C3C05E2AC76E03977855DD889C1C9BA432FF7FF4DEFCB5286555D36D22DD073A859140508AF9B977F38EB9A604E99A5F6109D43A4AFA0AB161DA2B261DED80FBC0C36E57DE2001338941C834E3262CF751BC1BFC6EC27BB8E106BAAB976285BAC1D4AC38D1B759C8A2852D65CE239974F1275CC6765B3D174FD1122EFDE86137D19F07483FEF5244B1D74B2D9DC598AC32A5CA10E8837FBC89703F4D0D46912CF4AF82341C30C2A1F3941849CC011A56E18AD2162EEB71289B8821CC01875BC1E35E5FC1EBD9114C0B2C0F0D9A96C394001468C70A1716CA98EBE82B1E614D4D9B07292EBAD5B60E0C76FD1D58B485E7D1FB1E07F51A0C68E4CA59A399FCF0634D9585BE478E37480423681B984E96C0A1698D8FCB1DF51CAE023B045E114EED9CB233A5742D9E60E1097206EB20A5058") + hex!("03CF56142A1E03D2445A82100FEAF70C1CD95A731ED85792AFFF5792EC0BDD2108991BB56F9043A269F88903DE616A9AB99A3C5AB778E566744B060456C5616C06BCE7F1930421768C2CBD79F88D08EC3A52D7C9A867064E973064385E9C945E02951190DD7CE1662546733DD540188C96E608CA750FEF36B39E2577833634C70AE6F1A6D00DC6C21446AAF285EF35D944E8782B131300574F9A889C7E708A2325E9A78013BBE869D38B19C602DAF69644C77D177E99ED76398BCEE13C61FDBF2E178A5BA028A36033E54D1D9A0071E82E04079A5305347EBAC6D66F6EBFA48B1DA1BF9DC5A51EFA292E1DC7B85D26F18422EB386C48CA75434039764448BB96268DDC2CF683DDCA4BD83DF21C5631CF784375EEBE77EABC2DE77886BF1D48392C9C52E063B4A7131EAB9ABBA12A9F26888BC37366D41AC7D4BAC0BF6755ACB009BF9F36F380B6D0EEAABF066503A1B6E01DCC965D968D7694E01B1755E6BDD21C7A80B41682748F9B7151714BE34AA79AAD48BBB2A84525F6CDF812658C6E4F") ), Ok(()) ); @@ -320,120 +318,26 @@ mod tests { ZKP::try_from(hex!("1c9bc15a0c4541aff1d12780d6cf4ae2bdc6e3afafceae9d4fa36209fa323b68002e9c77c223d830e5df6a80cdd683f0986353933ee3179970fccc5d893219d30726f3b8c0dbe630b815b01b5557228a0dfeb0e0435bb0d15d1ccff7f6133fc110937d9fceee2f9052468c198fafeca89d524142a0efa9dc4df445853ce617302059018fef03dc34456ad201d2a5420a7d1c8fac57cb48cbe6709ac4da27d1eb250f73eab007d26cbff41ceb4564ab1cdfa83e9ee88be4f816dc841bbf2e90c80186ad9437fce7655c71b54addae1ccea429da3edba3232d073cb7e89ff2d27218556f1af0c446962ace932f637279dd0ad3ef1501fb6da39d5f68282f54bcf6094999672f3d8cbbf0409aef1048175ffff50b03a5154016d307a2ef425ffee509cd447b22ce6331c7a3473b2c6da1f9d550e8c3ab19bde65e699e07f4f2886c03ec4ff2faa0e342de7ac5daf32025acd6070c19ed8b007c121db0d955472c7d2e38d5a943d15bc902613029e4baa8c26034ff280e3a4d5468fcd6745afe53b5").as_slice()).unwrap(); } - #[test] - fn test_err_969001_969006() { - assert_eq!( - verify_zkp( - &ChainId::from_string("union-testnet-8").unwrap(), - hex!("01a84dca649aa2df8de2f65a84c9092bbd5296b4bc54d818f844b28573d8e0be").into(), - &LightHeader { - height: 969006.try_into().unwrap(), - time: Timestamp::from_str("2024-06-18T13:21:28.026113925Z").unwrap(), - validators_hash: hex!("01a84dca649aa2df8de2f65a84c9092bbd5296b4bc54d818f844b28573d8e0be").into(), - next_validators_hash: hex!("01a84dca649aa2df8de2f65a84c9092bbd5296b4bc54d818f844b28573d8e0be").into(), - app_hash: hex!("87822b2b2affeed1c7a67b15f73d9b4ad128d0984a7f0dca910d033092dac828").into(), - }, - hex!("07c6767f0ec80904244a735ed6b0dba033fdcfaf92697438c560673c255331550201e520aa2202d20f2f4a264f03fde7faf072f053ff0c11630fc951e5956e9b0ac9b8301e712a57be7bd624659d937c3c42880b629d910743297fa444f626af2823ac2e190c3a8a78dbb2e8cc8c431c8536ce8d8fd8a2c192591e1559b5cbea04f330ec60397f60363457c00884d797cd7ff3a0e58fa27d2fed6eef6840e85a270f44f1ecdac406385b6b1ba933b21b5e5c390e09230d6710b30940434a39ab13db654f0bad779cce75d84f5cd302aa0feb83de879bd2ac96830bcacba82e9f0d00f4b0611ed108fe7e63217c3b058dd33d4a4de0307506eb20799f3c9db9f52fd45d1875ab9a5cdeecd0757d7d20a3af34f3182c6b53adc6076d914d2281c20cf63a90841b4f9aefc896a4e8defc01e76509a9b779b7e580a19143ff9af2591691b825825167febe9b5d762a17df13a0a4c547efa12c10b33d67886f505836213e3df47da08b551e92cb3db933ab06fd7dfbe84950bfba30e4f481ae35c3bc") - ), - Err(Error::InvalidProof) - ); - } - - #[test] - fn test_ok_969001_969002() { - assert_eq!( - verify_zkp( - &ChainId::from_string("union-testnet-8").unwrap(), - hex!("01a84dca649aa2df8de2f65a84c9092bbd5296b4bc54d818f844b28573d8e0be").into(), - &LightHeader { - height: 969002.try_into().unwrap(), - time: Timestamp::from_str("2024-06-18T13:21:02.868708953Z").unwrap(), - validators_hash: hex!("01a84dca649aa2df8de2f65a84c9092bbd5296b4bc54d818f844b28573d8e0be").into(), - next_validators_hash: hex!("01a84dca649aa2df8de2f65a84c9092bbd5296b4bc54d818f844b28573d8e0be").into(), - app_hash: hex!("333f81c038816f109413eac1dc1cb8cef8facca1e9a49f21763f5dc84a375e14").into(), - }, - hex!("02344d05cbb4f42548eadc621c46a3ae37f2ce23c12df83d1b490414bc20749a1fd5d4bd3b62a5b2cfae9f29686bfe1bc7a7c4bde72df168bdc1c1b0a3da1deb2a3f92896f5c37b4e3269aa84b47a67cad8b072350f794a15bac37608a5d549315e3850f18ddfa58ff9cfd5b2d133c3ac08d9f76e64611e6df4b6ba3d752e6f9054ec040028d1fd50d0f39eb60cb16326ba8876f5a47eea0c8b9c61461612bd518532a44ed88602a6e81177d08018fefadb2fedeac17ec26dae578532efb8a7905e1aca9429d9b8bfd7fb04e419c034258bc2d367e1c1a63936c67aca6767d5c1ba16ebb1dfccd919fa28d12255e6f9fcb98964682ca733bc591a25bd5a7993226daae60fea7d697b714916f9a6093f40a7a0e2a2a40b41b8741a98d5337b91f21a20866c16d94855c50593175e6d61481d56d08569ca55f8aa9f73277b3782a179b1bb01a269ae4eeacf273379099c641503f20830d6ef399867024b4f3c191120c8f0c1091387705c314ee6c5d8d23bf200649fe7b8dc2857db55f7bc5968c") - ), - Ok(()) - ); - } - - #[test] - fn test_err_969001_969002() { - assert_eq!( - verify_zkp( - &ChainId::from_string("union-testnet-8").unwrap(), - hex!("01a84dca649aa2df8de2f65a84c9092bbd5296b4bc54d818f844b28573d8e0be").into(), - &LightHeader { - height: 969002.try_into().unwrap(), - time: Timestamp::from_str("2024-06-18T13:21:02.868708953Z").unwrap(), - validators_hash: hex!("01a84dca649aa2df8de2f65a84c9092bbd5296b4bc54d818f844b28573d8e0be").into(), - next_validators_hash: hex!("01a84dca649aa2df8de2f65a84c9092bbd5296b4bc54d818f844b28573d8e0be").into(), - app_hash: hex!("333f81c038816f109413eac1dc1cb8cef8facca1e9a49f21763f5dc84a375e14").into(), - }, - hex!("13b9571349f3624ca8027ceb742ac0582a3d27847b794f567c0e35dc551a8e3e1c791e8efdd146de4319a39089755754a3a3b08a4ab1d343576ed085b5c924f825f284dad24cddb3614e663b3b407af8d3ec55edad709dace9266996aa91466126eb14026de607692bb70f8f6750c6245a9491bba466245f49ee08fbdc57ed12096bcc416908750ce28317609680ca01b5731237d600162f790d0c7085a6b721022f966ae2f087062644fcd20024ac0641ca732388cf360ce8cc61ac0480c7cc26a09e5a8c2e1b728fd0a37e5532fcc44dcd389314a80e0fb191d148740e436a1e4b916c9862c7ccf9073bfcb3b5dd09a3903f619e79a7c04f89cc42619fe35a074ad1bbd03821f2622c67a1ab95486896592703a846dda6e6e3c2b6213aa4791fc58b6834c89cbea52b43c31ca8c4a44378f38d06d2baa04672f7006651c2431ed56b4cc18b0b0082d919813a0f0433942b8691ec70c6305705faef970ceef00ca817ffdf6c5bfa0eaf33951e6695bc537f8345cc8f03d9f234d44dec3ff8b4") - ), - Err(Error::InvalidProof) - ); - } - - #[test] - fn test_ok_968996_969001() { - assert_eq!( - verify_zkp( - &ChainId::from_string("union-testnet-8").unwrap(), - hex!("1deda64b1cc1319718f168b5aa8ed904b7d5b0ab932acdf6deae0ad9bd565a53").into(), - &LightHeader { - height: 969001.try_into().unwrap(), - time: Timestamp::from_str("2024-06-18T13:20:56.784169335Z").unwrap(), - validators_hash: hex!("1deda64b1cc1319718f168b5aa8ed904b7d5b0ab932acdf6deae0ad9bd565a53").into(), - next_validators_hash: hex!("01a84dca649aa2df8de2f65a84c9092bbd5296b4bc54d818f844b28573d8e0be").into(), - app_hash: hex!("1818da4a8b1c430557a3018adc2bf9a06e56c3b530e5cce7709232e0f03bd9ab").into(), - }, - hex!("086541c22b53d509d8369492d32683188f0b379950ea3c5da84aca2b331d911c163bc6e30c7610b6903832184d284399d140b316134202cfa53b695ed17db64e271a8ab10b015cc4562730180cc7af7d7509b64de00b5864ccef3ab6b5c187da1511c4af3392d5e4465cebeb3c92cad546ab6b5b7de08923ae756d4a49d972920ed4f1b33bde26016e753fe00e9ee8b37873e4df4696cce84baa34e444d6f9dc0021b25644dc22fd9414197dd9e094180eac33a5e6fc6d2e04e12df5baaae92815173080dedcafeb2789245e75f1c38ddaa4611273fa5eed1cb77f75aabace770186385a3a373190a9091147de95b3f11050152bc4376573ed454cfd703f1e7106edb33921b12717708fe03861534c812a5ea6c7e0ec428c02292f1e7dafb45901e8b29e0b18ba7cbfad2a7aef7db558f3eb49a943a379a03b1b976df912a0c329b66224da89f94e29c49b3c5070b86b23d9d23424246235088ea858a21340cc2d1120ac3dc25febd188abf16774ea49564f34bc769b6abd9295128c391dad18") - ), - Ok(()) - ); - } - #[test] fn test_tampered_block() { assert_eq!( verify_zkp( &ChainId::from_string("union-devnet-1337").unwrap(), - hex!("1B7EA0F1B3E574F8D50A12827CCEA43CFF858C2716AE05370CC40AE8EC521FD8").into(), + hex!("20DDFE7A0F75C65D876316091ECCD494A54A2BB324C872015F73E528D53CB9C4").into(), &LightHeader { - height: 3405691583.try_into().unwrap(), + height: 3405691582.try_into().unwrap(), time: Timestamp { - seconds: 1710783278.try_into().unwrap(), - nanos: 499600406.try_into().unwrap() + // tampered seconds + seconds: 1732205252.try_into().unwrap(), + nanos: 998131342.try_into().unwrap() }, - validators_hash: hex!("1B7EA0F1B3E574F8D50A12827CCEA43CFF858C2716AE05370CC40AE8EC521FD8").into(), - next_validators_hash: hex!("1B7EA0F1B3E574F8D50A12827CCEA43CFF858C2716AE05370CC40AE8EC521FD8").into(), - app_hash: hex!("3A34FC963EEFAAE9B7C0D3DFF89180D91F3E31073E654F732340CEEDD77DD25B").into(), + validators_hash: hex!("20DDFE7A0F75C65D876316091ECCD494A54A2BB324C872015F73E528D53CB9C4").into(), + next_validators_hash: hex!("20DDFE7A0F75C65D876316091ECCD494A54A2BB324C872015F73E528D53CB9C4").into(), + app_hash: hex!("EE7E3E58F98AC95D63CE93B270981DF3EE54CA367F8D521ED1F444717595CD36").into(), }, - hex!("294A48A750D5C2CF926516752FF484EEBE55FF26CF8A8A7536D98794CF062DB6214D0C9E5C6B164111927A1630889619DBBB40149D8E2D32898E7ACB765542CD0EB8A8E04CCC254C3BFDC2FCE627D59C3C05E2AC76E03977855DD889C1C9BA432FF7FF4DEFCB5286555D36D22DD073A859140508AF9B977F38EB9A604E99A5F6109D43A4AFA0AB161DA2B261DED80FBC0C36E57DE2001338941C834E3262CF751BC1BFC6EC27BB8E106BAAB976285BAC1D4AC38D1B759C8A2852D65CE239974F1275CC6765B3D174FD1122EFDE86137D19F07483FEF5244B1D74B2D9DC598AC32A5CA10E8837FBC89703F4D0D46912CF4AF82341C30C2A1F3941849CC011A56E18AD2162EEB71289B8821CC01875BC1E35E5FC1EBD9114C0B2C0F0D9A96C394001468C70A1716CA98EBE82B1E614D4D9B07292EBAD5B60E0C76FD1D58B485E7D1FB1E07F51A0C68E4CA59A399FCF0634D9585BE478E37480423681B984E96C0A1698D8FCB1DF51CAE023B045E114EED9CB233A5742D9E60E1097206EB20A5058") + hex!("03CF56142A1E03D2445A82100FEAF70C1CD95A731ED85792AFFF5792EC0BDD2108991BB56F9043A269F88903DE616A9AB99A3C5AB778E566744B060456C5616C06BCE7F1930421768C2CBD79F88D08EC3A52D7C9A867064E973064385E9C945E02951190DD7CE1662546733DD540188C96E608CA750FEF36B39E2577833634C70AE6F1A6D00DC6C21446AAF285EF35D944E8782B131300574F9A889C7E708A2325E9A78013BBE869D38B19C602DAF69644C77D177E99ED76398BCEE13C61FDBF2E178A5BA028A36033E54D1D9A0071E82E04079A5305347EBAC6D66F6EBFA48B1DA1BF9DC5A51EFA292E1DC7B85D26F18422EB386C48CA75434039764448BB96268DDC2CF683DDCA4BD83DF21C5631CF784375EEBE77EABC2DE77886BF1D48392C9C52E063B4A7131EAB9ABBA12A9F26888BC37366D41AC7D4BAC0BF6755ACB009BF9F36F380B6D0EEAABF066503A1B6E01DCC965D968D7694E01B1755E6BDD21C7A80B41682748F9B7151714BE34AA79AAD48BBB2A84525F6CDF812658C6E4F") ), Err(Error::InvalidProof) ); } - - #[test] - fn invalid_vk() { - assert_eq!( - verify_zkp( - &ChainId::from_string("union-devnet-1").unwrap(), - hex!("2f4975ab7e75a677f43efebf53e0ec05460d2cf55506ad08d6b05254f96a500d").into(), - &LightHeader { - height: 905.try_into().unwrap(), - time: Timestamp::from_str("2024-09-23T20:48:00.739712762Z").unwrap(), - validators_hash: hex!("2f4975ab7e75a677f43efebf53e0ec05460d2cf55506ad08d6b05254f96a500d").into(), - next_validators_hash: hex!("2f4975ab7e75a677f43efebf53e0ec05460d2cf55506ad08d6b05254f96a500d").into(), - app_hash: hex!("eddaa32275fbbf44c6a21e32b59b097bed5374be715eab22f093399a9700a1e4").into(), - }, - hex!("1d530ee22263bc9e7008e3bd982c966b226d1018814e5b4d07597b4d35aea56b2ef63fdddb29fe06ef99cf645201a12e8b98b9ff7a7cec0819f696e17413294b0c638c4f946f4d4af4da8dd0815de2f5af8fd8612d1c98e9846846ea1ec78aac046df852b916de3fd8b3332bc3d23073e11b252b023711c18b19952507428da12e2baf74a03ca7bdc37edd0123e47f0a3a029f6da43a32dc6830e126b4ddf8712f2a0e021ac0f6414f171156f6a9019d6ea53cd30762c1e60d6a0e029778586c0cc1e2e13f7c45347a2a3ba82e43eccdc468fc8a05ba0a95fef26777872c27e42317f2c76c0a5f41e63088b8b394c5a7a3066809952f489718142107bd7b24572074be60bdb7611f1c916061a5ab3dc75a62b953a19650d839027a885801252a1e1cd84f8ba570047c2f1d220f26f7b11e69b7519f092d31ff954e92fd012a931ea2b4d20942376502043ba98e69f351f60b12e5a7ff180e5a1a966697d80696066694fa833420f5db7e3ae1b91dbce06fe2ffa1ea0a503af6a93f61ad7aa4f4") - ), - Err(Error::InvalidPok) - ); - } } diff --git a/lib/cometbls-groth16-verifier/verifying_key.bin b/lib/cometbls-groth16-verifier/verifying_key.bin index 40a313493731857ad7a1a79266166b57c680bd55..81a73b87985e7bb22f2d5542968b6abc4d3fc771 100644 GIT binary patch literal 524 zcmaD7qq^5nrf%MF`&#g$#k@5j5>}pSUeQ_~6ZUPOhdGzjj}qG;^OyLG+~a3Vkm&d(`J^TmDy{-le(l zwu;K0gWtG9iZbG_va@b#n5OoyK#1$q7srRji}uDPUUHe)H+k|TUJt9Hg#FtMf5)nR zT;*wOy7IZu^e3&`*L@Z`B*Ad+y^y?InNt=+T3n1vWxBJ{mG5!4?z-LE8*_8gx3`D4 z@TaLf>8g@-?vC$gF{!*VK;Q9KVw?6^wM(sUXv%A^4?h*nZ>25ow)kwvq^3G9G#7Ag)Da%7#Nt> z$97qWf0DIxH7!2LV{@ds{*&pi>wAA+oxL~f>BV4myH|}n`($QTUyxGhwBlHjlfmL? z*W4q;|Lw$q%=b~-vS&@7GVPoQ?~gf;W8$9w_RRbH|5jYA_0<0p#U4GqKW%f?cAy=M NK+HgCSRAL&uK?1`25tZV literal 524 zcmV+n0`vWeX9p<(%zCynSMlA%8EkwX?WF}k*aB^TZvkh)7^s=qBi`{}bt6wIZBw;o zqEduG>3~qPhy!Onk$Jc@{5hnVHDA(jw_qJzh(L>eo>S6bQ7$2X%_aaB+>{?L$!%p7 zKLbR@zb%T18<(5L9XY9wu@P1L)pQv14xkw7geY{N=WFR2F-YS0?_@$Ev(8(SnsdD_ z3P6ERf0C!1NIK6V7Q5iWiQsuQwAp1lky08Y#c;?7`bOW;F2q8fakd$m{QD$SS=Ks9 zBe?j)h|%V+e4|)~dx+}{-u{VfA|DCqUQ8I#;748LRGTG7VI6}3;jjM?N2c|nh{-WZ z@VWd2)C0)L7$BpCrz{xOq?Ro2L8Gga+NbR(n;w Date: Thu, 21 Nov 2024 18:31:03 +0100 Subject: [PATCH 52/52] feat(galois): update checkpoint circuit download --- galoisd/galoisd.nix | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/galoisd/galoisd.nix b/galoisd/galoisd.nix index f0023ee977..a172e8e7c3 100644 --- a/galoisd/galoisd.nix +++ b/galoisd/galoisd.nix @@ -111,8 +111,9 @@ download-circuit = let + circuit-name = "circuit-eb62b71bc60668da0e602eaa3d6aceec183fb5ca-26eae4b9-bd55-4ce7-8446-ad829ab7b3ed.zip"; files = pkgs.writeText "files.txt" '' - /circuit.zip + /${circuit-name} ''; in mkCi false ( @@ -129,8 +130,8 @@ exit 1 fi rclone --progress --no-traverse --http-url "https://circuit.cryptware.io" copy :http:/ "$1" --files-from=${files} - unzip "$1"/circuit.zip - rm "$1"/circuit.zip + unzip "$1"/${circuit-name} + rm "$1"/${circuit-name} ''; } ); @@ -169,8 +170,8 @@ ''; unpacked-circuit = unpackCircuit ( pkgs.fetchurl { - url = "https://circuit.cryptware.io/testnet.zip"; - hash = "sha256-ImDwglgLdRjd9pxg5B7w2KNSPm1+kTu2k20yw8Rjtzc="; + url = "https://circuit.cryptware.io/circuit-eb62b71bc60668da0e602eaa3d6aceec183fb5ca-26eae4b9-bd55-4ce7-8446-ad829ab7b3ed.zip"; + hash = "sha256-4cExiem1lKrQlDIsrArfQPTuTvpABzi/rNra17R/md4="; } ); in