diff --git a/.gitmodules b/.gitmodules index 5cc2bfab6..f45d463b2 100644 --- a/.gitmodules +++ b/.gitmodules @@ -231,3 +231,16 @@ url = https://github.com/vacp2p/nim-ngtcp2.git ignore = untracked branch = master +[submodule "vendor/nim-groth16"] + path = vendor/nim-groth16 + url = https://github.com/codex-storage/nim-groth16.git + ignore = untracked + branch = master +[submodule "vendor/nim-goldilocks-hash"] + path = vendor/nim-goldilocks-hash + url = https://github.com/codex-storage/nim-goldilocks-hash.git + ignore = untracked + branch = master +[submodule "vendor/circom-witnessgen"] + path = vendor/circom-witnessgen + url = https://github.com/codex-storage/circom-witnessgen.git diff --git a/codex.nim b/codex.nim index 7749bdee2..b534a0b34 100644 --- a/codex.nim +++ b/codex.nim @@ -10,7 +10,6 @@ import pkg/chronos import pkg/questionable import pkg/confutils -import pkg/confutils/defs import pkg/confutils/std/net import pkg/confutils/toml/defs as confTomlDefs import pkg/confutils/toml/std/net as confTomlNet diff --git a/codex/codex.nim b/codex/codex.nim index 813574641..50120cd65 100644 --- a/codex/codex.nim +++ b/codex/codex.nim @@ -214,22 +214,19 @@ proc new*( .withTcpTransport({ServerFlags.ReuseAddr}) .build() - var - cache: CacheStore = nil - taskpool: Taskpool - - try: - if config.numThreads == ThreadCount(0): - taskpool = Taskpool.new(numThreads = min(countProcessors(), 16)) + let numThreads = + if int(config.numThreads) == 0: + countProcessors() else: - taskpool = Taskpool.new(numThreads = int(config.numThreads)) - info "Threadpool started", numThreads = taskpool.numThreads - except CatchableError as exc: - raiseAssert("Failure in taskpool initialization:" & exc.msg) + int(config.numThreads) + + var tp = + try: + Taskpool.new(numThreads) + except CatchableError as exc: + raiseAssert("Failure in tp initialization:" & exc.msg) - if config.cacheSize > 0'nb: - cache = CacheStore.new(cacheSize = config.cacheSize) - ## Is unused? + info "Threadpool started", numThreads = tp.numThreads let discoveryDir = config.dataDir / CodexDhtNamespace @@ -305,9 +302,8 @@ proc new*( store = NetworkStore.new(engine, repoStore) prover = if config.prover: - let backend = - config.initializeBackend().expect("Unable to create prover backend.") - some Prover.new(store, backend, config.numProofSamples) + let prover = config.initializeProver(tp).expect("Unable to create prover.") + some prover else: none Prover @@ -317,7 +313,7 @@ proc new*( engine = engine, discovery = discovery, prover = prover, - taskPool = taskpool, + taskPool = tp, ) restServer = RestServerRef @@ -337,5 +333,5 @@ proc new*( restServer: restServer, repoStore: repoStore, maintenance: maintenance, - taskpool: taskpool, + taskpool: tp, ) diff --git a/codex/conf.nim b/codex/conf.nim index 9f899e60a..fe54b68d3 100644 --- a/codex/conf.nim +++ b/codex/conf.nim @@ -18,6 +18,7 @@ import std/terminal # Is not used in tests import std/options import std/strutils import std/typetraits +import std/cpuinfo import pkg/chronos import pkg/chronicles/helpers @@ -54,9 +55,7 @@ export DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockInterval, DefaultNumBlocksPerInterval, DefaultRequestCacheSize, DefaultMaxPriorityFeePerGas -type ThreadCount* = distinct Natural - -proc `==`*(a, b: ThreadCount): bool {.borrow.} +type ThreadCount* = range[0 .. 256] proc defaultDataDir*(): string = let dataDir = @@ -76,7 +75,6 @@ const DefaultDataDir* = defaultDataDir() DefaultCircuitDir* = defaultDataDir() / "circuits" - DefaultThreadCount* = ThreadCount(0) type StartUpCmd* {.pure.} = enum @@ -87,6 +85,13 @@ type noCmd prover + ProverBackendCmd* {.pure.} = enum + nimgroth16 + circomcompat + + Curves* {.pure.} = enum + bn128 = "bn128" + LogKind* {.pure.} = enum Auto = "auto" Colors = "colors" @@ -193,7 +198,8 @@ type numThreads* {. desc: "Number of worker threads (\"0\" = use as many threads as there are CPU cores available)", - defaultValue: DefaultThreadCount, + defaultValueDesc: "0", + defaultValue: ThreadCount(0), name: "num-threads" .}: ThreadCount @@ -389,6 +395,22 @@ type name: "circuit-dir" .}: OutDir + proverBackend* {. + desc: + "The backend to use for the prover. " & + "Must be one of: nimgroth16, circomcompat", + defaultValue: ProverBackendCmd.nimgroth16, + defaultValueDesc: "nimgroth16", + name: "prover-backend" + .}: ProverBackendCmd + + curve* {. + desc: "The curve to use for the storage circuit", + defaultValue: Curves.bn128, + defaultValueDesc: $Curves.bn128, + name: "curve" + .}: Curves + circomR1cs* {. desc: "The r1cs file for the storage circuit", defaultValue: $DefaultCircuitDir / "proof_main.r1cs", @@ -396,8 +418,17 @@ type name: "circom-r1cs" .}: InputFile + circomGraph* {. + desc: + "The graph file for the storage circuit (only used with nimgroth16 backend)", + defaultValue: $DefaultCircuitDir / "proof_main.bin", + defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.bin", + name: "circom-graph" + .}: InputFile + circomWasm* {. - desc: "The wasm file for the storage circuit", + desc: + "The wasm file for the storage circuit (only used with circomcompat backend)", defaultValue: $DefaultCircuitDir / "proof_main.wasm", defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm", name: "circom-wasm" @@ -410,11 +441,11 @@ type name: "circom-zkey" .}: InputFile - # TODO: should probably be hidden and behind a feature flag circomNoZkey* {. desc: "Ignore the zkey file - use only for testing!", defaultValue: false, - name: "circom-no-zkey" + name: "circom-no-zkey", + hidden .}: bool numProofSamples* {. @@ -505,7 +536,7 @@ const proc parseCmdArg*( T: typedesc[MultiAddress], input: string -): MultiAddress {.upraises: [ValueError].} = +): MultiAddress {.raises: [ValueError].} = var ma: MultiAddress try: let res = MultiAddress.init(input) @@ -519,12 +550,8 @@ proc parseCmdArg*( quit QuitFailure ma -proc parseCmdArg*(T: type ThreadCount, input: string): T {.upraises: [ValueError].} = - let count = parseInt(input) - if count != 0 and count < 2: - warn "Invalid number of threads", input = input - quit QuitFailure - ThreadCount(count) +proc parseCmdArg*(T: type ThreadCount, val: string): T {.raises: [ValueError].} = + ThreadCount(val.parseUInt()) proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T = var res: SignedPeerRecord @@ -586,7 +613,7 @@ proc parseCmdArg*(T: type Duration, val: string): T = proc readValue*( r: var TomlReader, val: var EthAddress -) {.upraises: [SerializationError, IOError].} = +) {.raises: [SerializationError, IOError].} = val = EthAddress.init(r.readValue(string)).get() proc readValue*(r: var TomlReader, val: var SignedPeerRecord) = @@ -614,7 +641,7 @@ proc readValue*(r: var TomlReader, val: var MultiAddress) = proc readValue*( r: var TomlReader, val: var NBytes -) {.upraises: [SerializationError, IOError].} = +) {.raises: [SerializationError, IOError].} = var value = 0'i64 var str = r.readValue(string) let count = parseSize(str, value, alwaysBin = true) @@ -625,7 +652,7 @@ proc readValue*( proc readValue*( r: var TomlReader, val: var ThreadCount -) {.upraises: [SerializationError, IOError].} = +) {.raises: [SerializationError, IOError].} = var str = r.readValue(string) try: val = parseCmdArg(ThreadCount, str) @@ -634,7 +661,7 @@ proc readValue*( proc readValue*( r: var TomlReader, val: var Duration -) {.upraises: [SerializationError, IOError].} = +) {.raises: [SerializationError, IOError].} = var str = r.readValue(string) var dur: Duration let count = parseDuration(str, dur) @@ -701,7 +728,7 @@ proc stripAnsi*(v: string): string = res -proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} = +proc updateLogLevel*(logLevel: string) {.raises: [ValueError].} = # Updates log levels (without clearing old ones) let directives = logLevel.split(";") try: diff --git a/codex/erasure/erasure.nim b/codex/erasure/erasure.nim index 95516500f..db6450fea 100644 --- a/codex/erasure/erasure.nim +++ b/codex/erasure/erasure.nim @@ -362,7 +362,7 @@ proc asyncEncode*( proc encodeData( self: Erasure, manifest: Manifest, params: EncodingParams -): Future[?!Manifest] {.async.} = +): Future[?!Manifest] {.async: (raises: [CancelledError]).} = ## Encode blocks pointed to by the protected manifest ## ## `manifest` - the manifest to encode @@ -461,7 +461,7 @@ proc encode*( blocks: Natural, parity: Natural, strategy = SteppedStrategy, -): Future[?!Manifest] {.async.} = +): Future[?!Manifest] {.async: (raises: [CancelledError]).} = ## Encode a manifest into one that is erasure protected. ## ## `manifest` - the original manifest to be encoded @@ -554,7 +554,7 @@ proc asyncDecode*( proc decodeInternal( self: Erasure, encoded: Manifest -): Future[?!(ref seq[Cid], seq[Natural])] {.async.} = +): Future[?!(ref seq[Cid], seq[Natural])] {.async: (raises: [CancelledError]).} = logScope: steps = encoded.steps rounded_blocks = encoded.rounded @@ -638,7 +638,9 @@ proc decodeInternal( return (cids, recoveredIndices).success -proc decode*(self: Erasure, encoded: Manifest): Future[?!Manifest] {.async.} = +proc decode*( + self: Erasure, encoded: Manifest +): Future[?!Manifest] {.async: (raises: [CancelledError]).} = ## Decode a protected manifest into it's original ## manifest ## diff --git a/codex/nat.nim b/codex/nat.nim index d022dad6c..f11f16ea5 100644 --- a/codex/nat.nim +++ b/codex/nat.nim @@ -423,10 +423,12 @@ proc nattedAddress*( it.remapAddr(ip = newIP, port = tcp) else: # NAT mapping failed - use original address - echo "Failed to get external IP, using original address", it + # TODO: `trace` breaks in the mapIt template + # trace "Failed to get external IP, using original address", it discoveryAddrs.add(getMultiAddrWithIPAndUDPPort(ipPart.get, udpPort)) it else: # Invalid multiaddress format - return as is it + (newAddrs, discoveryAddrs) diff --git a/codex/node.nim b/codex/node.nim index e010b0854..d02a39def 100644 --- a/codex/node.nim +++ b/codex/node.nim @@ -46,6 +46,7 @@ import ./errors import ./logutils import ./utils/asynciter import ./utils/trackedfutures +import ./utils/poseidon2digest export logutils @@ -63,17 +64,17 @@ type ] CodexNode* = object - switch: Switch - networkId: PeerId - networkStore: NetworkStore - engine: BlockExcEngine - prover: ?Prover - discovery: Discovery - contracts*: Contracts - clock*: Clock - storage*: Contracts - taskpool: Taskpool - trackedFutures: TrackedFutures + switch: Switch # the libp2p network switch + networkId: PeerId # the peer id of the node + networkStore: NetworkStore # the network store + engine: BlockExcEngine # the block exchange engine + prover: ?Prover # the prover + discovery: Discovery # the discovery service + contracts*: Contracts # the contracts + clock*: Clock # the clock + storage*: Contracts # the storage + taskpool: Taskpool # the taskpool + trackedFutures: TrackedFutures # the tracked futures CodexNodeRef* = ref CodexNode @@ -96,18 +97,12 @@ func discovery*(self: CodexNodeRef): Discovery = proc storeManifest*( self: CodexNodeRef, manifest: Manifest -): Future[?!bt.Block] {.async.} = - without encodedVerifiable =? manifest.encode(), err: - trace "Unable to encode manifest" - return failure(err) - - without blk =? bt.Block.new(data = encodedVerifiable, codec = ManifestCodec), error: - trace "Unable to create block from manifest" - return failure(error) +): Future[?!bt.Block] {.async: (raises: [CancelledError]).} = + let + encodedVerifiable = ?manifest.encode() + blk = ?bt.Block.new(data = encodedVerifiable, codec = ManifestCodec) - if err =? (await self.networkStore.putBlock(blk)).errorOption: - trace "Unable to store manifest block", cid = blk.cid, err = err.msg - return failure(err) + ?await self.networkStore.putBlock(blk) success blk @@ -338,7 +333,9 @@ proc retrieve*( await self.streamEntireDataset(manifest, cid) -proc deleteSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!void] {.async.} = +proc deleteSingleBlock( + self: CodexNodeRef, cid: Cid +): Future[?!void] {.async: (raises: [CancelledError]).} = if err =? (await self.networkStore.delBlock(cid)).errorOption: error "Error deleting block", cid, err = err.msg return failure(err) @@ -346,7 +343,9 @@ proc deleteSingleBlock(self: CodexNodeRef, cid: Cid): Future[?!void] {.async.} = trace "Deleted block", cid return success() -proc deleteEntireDataset(self: CodexNodeRef, cid: Cid): Future[?!void] {.async.} = +proc deleteEntireDataset( + self: CodexNodeRef, cid: Cid +): Future[?!void] {.async: (raises: [CancelledError]).} = # Deletion is a strictly local operation var store = self.networkStore.localStore @@ -403,7 +402,7 @@ proc store*( filename: ?string = string.none, mimetype: ?string = string.none, blockSize = DefaultBlockSize, -): Future[?!Cid] {.async.} = +): Future[?!Cid] {.async: (raises: [CancelledError]).} = ## Save stream contents as dataset with given blockSize ## to nodes's BlockStore, and return Cid of its manifest ## @@ -478,7 +477,9 @@ proc store*( return manifestBlk.cid.success -proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} = +proc iterateManifests*( + self: CodexNodeRef, onManifest: OnManifest +) {.async: (raises: [CancelledError]).} = without cidsIter =? await self.networkStore.listBlocks(BlockType.Manifest): warn "Failed to listBlocks" return @@ -505,7 +506,7 @@ proc setupRequest( pricePerBytePerSecond: UInt256, collateralPerByte: UInt256, expiry: uint64, -): Future[?!StorageRequest] {.async.} = +): Future[?!StorageRequest] {.async: (raises: [CancelledError]).} = ## Setup slots for a given dataset ## @@ -527,32 +528,20 @@ proc setupRequest( trace "Setting up slots" - without manifest =? await self.fetchManifest(cid), error: - trace "Unable to fetch manifest for cid" - return failure error - - # Erasure code the dataset according to provided parameters - let erasure = Erasure.new( - self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider, self.taskpool - ) - - without encoded =? (await erasure.encode(manifest, ecK, ecM)), error: - trace "Unable to erasure code dataset" - return failure(error) - - without builder =? Poseidon2Builder.new(self.networkStore.localStore, encoded), err: - trace "Unable to create slot builder" - return failure(err) + let + manifest = ?await self.fetchManifest(cid) - without verifiable =? (await builder.buildManifest()), err: - trace "Unable to build verifiable manifest" - return failure(err) + # Erasure code the dataset according to provided parameters + erasure = Erasure.new( + self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider, + self.taskpool, + ) - without manifestBlk =? await self.storeManifest(verifiable), err: - trace "Unable to store verifiable manifest" - return failure(err) + encoded = ?await erasure.encode(manifest, ecK, ecM) + builder = ?Poseidon2Builder.new(self.networkStore.localStore, encoded) + verifiable = ?await builder.buildManifest() + manifestBlk = ?await self.storeManifest(verifiable) - let verifyRoot = if builder.verifyRoot.isNone: return failure("No slots root") @@ -586,7 +575,7 @@ proc requestStorage*( pricePerBytePerSecond: UInt256, collateralPerByte: UInt256, expiry: uint64, -): Future[?!PurchaseId] {.async.} = +): Future[?!PurchaseId] {.async: (raises: [CancelledError]).} = ## Initiate a request for storage sequence, this might ## be a multistep procedure. ## @@ -617,7 +606,17 @@ proc requestStorage*( trace "Unable to setup request" return failure err - let purchase = await contracts.purchasing.purchase(request) + # TODO: remove try/except once state machine has checked exceptions + let purchase = + try: + await contracts.purchasing.purchase(request) + except CancelledError as err: + trace "Purchase cancelled", err = err.msg + raise err + except CatchableError as err: + trace "Unable to purchase storage", err = err.msg + return failure(err) + success purchase.id proc onStore( @@ -739,38 +738,28 @@ proc onProve( if prover =? self.prover: trace "Prover enabled" - without cid =? Cid.init(cidStr).mapFailure, err: - error "Unable to parse Cid", cid, err = err.msg - return failure(err) - - without manifest =? await self.fetchManifest(cid), err: - error "Unable to fetch manifest for cid", err = err.msg - return failure(err) + let + cid = ?Cid.init(cidStr).mapFailure + manifest = ?await self.fetchManifest(cid) + builder = + ?Poseidon2Builder.new(self.networkStore, manifest, manifest.verifiableStrategy) + sampler = ?Poseidon2Sampler.new(slotIdx, self.networkStore, builder) when defined(verify_circuit): - without (inputs, proof) =? await prover.prove(slotIdx.int, manifest, challenge), - err: - error "Unable to generate proof", err = err.msg - return failure(err) + let (proof, checked) = + ?await prover.prove(sampler, manifest, challenge, verify = true) - without checked =? await prover.verify(proof, inputs), err: - error "Unable to verify proof", err = err.msg - return failure(err) - - if not checked: + if checked.isSome and not checked.get: error "Proof verification failed" return failure("Proof verification failed") trace "Proof verified successfully" else: - without (_, proof) =? await prover.prove(slotIdx.int, manifest, challenge), err: - error "Unable to generate proof", err = err.msg - return failure(err) + let (proof, _) = ?await prover.prove(sampler, manifest, challenge, verify = false) - let groth16Proof = proof.toGroth16Proof() - trace "Proof generated successfully", groth16Proof + trace "Proof generated successfully", proof - success groth16Proof + success proof else: warn "Prover not enabled" failure "Prover not enabled" diff --git a/codex/slots/builder.nim b/codex/slots/builder.nim index 25844db63..1857150c8 100644 --- a/codex/slots/builder.nim +++ b/codex/slots/builder.nim @@ -3,6 +3,6 @@ import ./converters import ../merkletree -export builder, converters +export builder, converters, merkletree type Poseidon2Builder* = SlotsBuilder[Poseidon2Tree, Poseidon2Hash] diff --git a/codex/slots/builder/builder.nim b/codex/slots/builder/builder.nim index 5fbb0fe19..3fe1c5191 100644 --- a/codex/slots/builder/builder.nim +++ b/codex/slots/builder/builder.nim @@ -32,109 +32,127 @@ import ../converters export converters, asynciter logScope: - topics = "codex slotsbuilder" + topics = "codex slots builder" -type SlotsBuilder*[T, H] = ref object of RootObj +type SlotsBuilder*[SomeTree, SomeHash] = ref object of RootObj store: BlockStore manifest: Manifest # current manifest strategy: IndexingStrategy # indexing strategy cellSize: NBytes # cell size numSlotBlocks: Natural # number of blocks per slot (should yield a power of two number of cells) - slotRoots: seq[H] # roots of the slots + slotRoots: seq[SomeHash] # roots of the slots emptyBlock: seq[byte] # empty block - verifiableTree: ?T # verification tree (dataset tree) - emptyDigestTree: T # empty digest tree for empty blocks + verifiableTree: ?SomeTree # verification tree (dataset tree) + emptyDigestTree: SomeTree # empty digest tree for empty blocks -func verifiable*[T, H](self: SlotsBuilder[T, H]): bool {.inline.} = +func verifiable*[SomeTree, SomeHash]( + self: SlotsBuilder[SomeTree, SomeHash] +): bool {.inline.} = ## Returns true if the slots are verifiable. ## self.manifest.verifiable -func slotRoots*[T, H](self: SlotsBuilder[T, H]): seq[H] {.inline.} = +func slotRoots*[SomeTree, SomeHash]( + self: SlotsBuilder[SomeTree, SomeHash] +): seq[SomeHash] {.inline.} = ## Returns the slot roots. ## self.slotRoots -func verifyTree*[T, H](self: SlotsBuilder[T, H]): ?T {.inline.} = +func verifyTree*[SomeTree, SomeHash]( + self: SlotsBuilder[SomeTree, SomeHash] +): ?SomeTree {.inline.} = ## Returns the slots tree (verification tree). ## self.verifiableTree -func verifyRoot*[T, H](self: SlotsBuilder[T, H]): ?H {.inline.} = +func verifyRoot*[SomeTree, SomeHash]( + self: SlotsBuilder[SomeTree, SomeHash] +): ?SomeHash {.inline.} = ## Returns the slots root (verification root). ## if tree =? self.verifyTree and root =? tree.root: return some root -func numSlots*[T, H](self: SlotsBuilder[T, H]): Natural = +func numSlots*[SomeTree, SomeHash](self: SlotsBuilder[SomeTree, SomeHash]): Natural = ## Number of slots. ## self.manifest.numSlots -func numSlotBlocks*[T, H](self: SlotsBuilder[T, H]): Natural = +func numSlotBlocks*[SomeTree, SomeHash]( + self: SlotsBuilder[SomeTree, SomeHash] +): Natural = ## Number of blocks per slot. ## self.numSlotBlocks -func numBlocks*[T, H](self: SlotsBuilder[T, H]): Natural = +func numBlocks*[SomeTree, SomeHash](self: SlotsBuilder[SomeTree, SomeHash]): Natural = ## Number of blocks. ## self.numSlotBlocks * self.manifest.numSlots -func slotBytes*[T, H](self: SlotsBuilder[T, H]): NBytes = +func slotBytes*[SomeTree, SomeHash](self: SlotsBuilder[SomeTree, SomeHash]): NBytes = ## Number of bytes per slot. ## (self.manifest.blockSize.int * self.numSlotBlocks).NBytes -func numBlockCells*[T, H](self: SlotsBuilder[T, H]): Natural = +func numBlockCells*[SomeTree, SomeHash]( + self: SlotsBuilder[SomeTree, SomeHash] +): Natural = ## Number of cells per block. ## (self.manifest.blockSize div self.cellSize).Natural -func cellSize*[T, H](self: SlotsBuilder[T, H]): NBytes = +func cellSize*[SomeTree, SomeHash](self: SlotsBuilder[SomeTree, SomeHash]): NBytes = ## Cell size. ## self.cellSize -func numSlotCells*[T, H](self: SlotsBuilder[T, H]): Natural = +func numSlotCells*[SomeTree, SomeHash]( + self: SlotsBuilder[SomeTree, SomeHash] +): Natural = ## Number of cells per slot. ## self.numBlockCells * self.numSlotBlocks -func slotIndicesIter*[T, H](self: SlotsBuilder[T, H], slot: Natural): ?!Iter[int] = +func slotIndicesIter*[SomeTree, SomeHash]( + self: SlotsBuilder[SomeTree, SomeHash], slot: Natural +): ?!Iter[int] = ## Returns the slot indices. ## self.strategy.getIndices(slot).catch -func slotIndices*[T, H](self: SlotsBuilder[T, H], slot: Natural): seq[int] = +func slotIndices*[SomeTree, SomeHash]( + self: SlotsBuilder[SomeTree, SomeHash], slot: Natural +): seq[int] = ## Returns the slot indices. ## if iter =? self.strategy.getIndices(slot).catch: return toSeq(iter) -func manifest*[T, H](self: SlotsBuilder[T, H]): Manifest = +func manifest*[SomeTree, SomeHash](self: SlotsBuilder[SomeTree, SomeHash]): Manifest = ## Returns the manifest. ## self.manifest -proc buildBlockTree*[T, H]( - self: SlotsBuilder[T, H], blkIdx: Natural, slotPos: Natural -): Future[?!(seq[byte], T)] {.async: (raises: [CancelledError]).} = +proc buildBlockTree*[SomeTree, SomeHash]( + self: SlotsBuilder[SomeTree, SomeHash], blkIdx: Natural, slotPos: Natural +): Future[?!(seq[byte], SomeTree)] {.async: (raises: [CancelledError]).} = ## Build the block digest tree and return a tuple with the ## block data and the tree. ## @@ -152,22 +170,17 @@ proc buildBlockTree*[T, H]( trace "Returning empty digest tree for pad block" return success (self.emptyBlock, self.emptyDigestTree) - without blk =? await self.store.getBlock(self.manifest.treeCid, blkIdx), err: - error "Failed to get block CID for tree at index", err = err.msg - return failure(err) + let blk = ?await self.store.getBlock(self.manifest.treeCid, blkIdx) if blk.isEmpty: success (self.emptyBlock, self.emptyDigestTree) else: - without tree =? T.digestTree(blk.data, self.cellSize.int), err: - error "Failed to create digest for block", err = err.msg - return failure(err) - + let tree = ?SomeTree.digestTree(blk.data, self.cellSize.int) success (blk.data, tree) -proc getCellHashes*[T, H]( - self: SlotsBuilder[T, H], slotIndex: Natural -): Future[?!seq[H]] {.async: (raises: [CancelledError, IndexingError]).} = +proc getCellHashes*[SomeTree, SomeHash]( + self: SlotsBuilder[SomeTree, SomeHash], slotIndex: Natural +): Future[?!seq[SomeHash]] {.async: (raises: [CancelledError]).} = ## Collect all the cells from a block and return ## their hashes. ## @@ -184,7 +197,7 @@ proc getCellHashes*[T, H]( slotIndex = slotIndex let hashes = collect(newSeq): - for i, blkIdx in self.strategy.getIndices(slotIndex): + for i, blkIdx in ?self.strategy.getIndices(slotIndex).catch: logScope: blkIdx = blkIdx pos = i @@ -200,25 +213,18 @@ proc getCellHashes*[T, H]( success hashes -proc buildSlotTree*[T, H]( - self: SlotsBuilder[T, H], slotIndex: Natural -): Future[?!T] {.async: (raises: [CancelledError]).} = +proc buildSlotTree*[SomeTree, SomeHash]( + self: SlotsBuilder[SomeTree, SomeHash], slotIndex: Natural +): Future[?!SomeTree] {.async: (raises: [CancelledError]).} = ## Build the slot tree from the block digest hashes ## and return the tree. - try: - without cellHashes =? (await self.getCellHashes(slotIndex)), err: - error "Failed to select slot blocks", err = err.msg - return failure(err) - - T.init(cellHashes) - except IndexingError as err: - error "Failed to build slot tree", err = err.msg - return failure(err) + let cellHashes = ?await self.getCellHashes(slotIndex) + SomeTree.init(cellHashes) -proc buildSlot*[T, H]( - self: SlotsBuilder[T, H], slotIndex: Natural -): Future[?!H] {.async: (raises: [CancelledError]).} = +proc buildSlot*[SomeTree, SomeHash]( + self: SlotsBuilder[SomeTree, SomeHash], slotIndex: Natural +): Future[?!SomeHash] {.async: (raises: [CancelledError]).} = ## Build a slot tree and store the proofs in ## the block store. ## @@ -244,18 +250,17 @@ proc buildSlot*[T, H]( error "Failed to get proof for slot tree", err = err.msg return failure(err) - if err =? - (await self.store.putCidAndProof(treeCid, i, cellCid, encodableProof)).errorOption: - error "Failed to store slot tree", err = err.msg - return failure(err) + ?(await self.store.putCidAndProof(treeCid, i, cellCid, encodableProof)) tree.root() -func buildVerifyTree*[T, H](self: SlotsBuilder[T, H], slotRoots: openArray[H]): ?!T = - T.init(@slotRoots) +func buildVerifyTree*[SomeTree, SomeHash]( + self: SlotsBuilder[SomeTree, SomeHash], slotRoots: openArray[SomeHash] +): ?!SomeTree = + SomeTree.init(@slotRoots) -proc buildSlots*[T, H]( - self: SlotsBuilder[T, H] +proc buildSlots*[SomeTree, SomeHash]( + self: SlotsBuilder[SomeTree, SomeHash] ): Future[?!void] {.async: (raises: [CancelledError]).} = ## Build all slot trees and store them in the block store. ## @@ -269,10 +274,7 @@ proc buildSlots*[T, H]( if self.slotRoots.len == 0: self.slotRoots = collect(newSeq): for i in 0 ..< self.manifest.numSlots: - without slotRoot =? (await self.buildSlot(i)), err: - error "Failed to build slot", err = err.msg, index = i - return failure(err) - slotRoot + ?(await self.buildSlot(i)) without tree =? self.buildVerifyTree(self.slotRoots) and root =? tree.root, err: error "Failed to build slot roots tree", err = err.msg @@ -286,17 +288,15 @@ proc buildSlots*[T, H]( success() -proc buildManifest*[T, H]( - self: SlotsBuilder[T, H] +proc buildManifest*[SomeTree, SomeHash]( + self: SlotsBuilder[SomeTree, SomeHash] ): Future[?!Manifest] {.async: (raises: [CancelledError]).} = - if err =? (await self.buildSlots()).errorOption: - error "Failed to build slot roots", err = err.msg - return failure(err) + ## Build the manifest from the slots and return it. + ## - without rootCids =? self.slotRoots.toSlotCids(), err: - error "Failed to map slot roots to CIDs", err = err.msg - return failure(err) + ?(await self.buildSlots()) # build all slots first + let rootCids = ?self.slotRoots.toSlotCids() without rootProvingCidRes =? self.verifyRoot .? toVerifyCid() and rootProvingCid =? rootProvingCidRes, err: error "Failed to map slot roots to CIDs", err = err.msg @@ -306,13 +306,13 @@ proc buildManifest*[T, H]( self.manifest, rootProvingCid, rootCids, self.cellSize, self.strategy.strategyType ) -proc new*[T, H]( - _: type SlotsBuilder[T, H], +proc new*[SomeTree, SomeHash]( + _: type SlotsBuilder[SomeTree, SomeHash], store: BlockStore, manifest: Manifest, strategy = LinearStrategy, cellSize = DefaultCellSize, -): ?!SlotsBuilder[T, H] = +): ?!SlotsBuilder[SomeTree, SomeHash] = if not manifest.protected: trace "Manifest is not protected." return failure("Manifest is not protected.") @@ -352,7 +352,7 @@ proc new*[T, H]( numBlocksTotal = numSlotBlocksTotal * manifest.numSlots # number of blocks per slot emptyBlock = newSeq[byte](manifest.blockSize.int) - emptyDigestTree = ?T.digestTree(emptyBlock, cellSize.int) + emptyDigestTree = ?SomeTree.digestTree(emptyBlock, cellSize.int) strategy = ?strategy.init( @@ -375,7 +375,7 @@ proc new*[T, H]( trace "Creating slots builder" - var self = SlotsBuilder[T, H]( + var self = SlotsBuilder[SomeTree, SomeHash]( store: store, manifest: manifest, strategy: strategy, diff --git a/codex/slots/proofs.nim b/codex/slots/proofs.nim index 4f7f01b58..a1f56d9a0 100644 --- a/codex/slots/proofs.nim +++ b/codex/slots/proofs.nim @@ -1,5 +1,5 @@ import ./proofs/backends import ./proofs/prover -import ./proofs/backendfactory +import ./proofs/proverfactory -export circomcompat, prover, backendfactory +export backends, prover, proverfactory diff --git a/codex/slots/proofs/backendfactory.nim b/codex/slots/proofs/backendfactory.nim deleted file mode 100644 index 7aba27d85..000000000 --- a/codex/slots/proofs/backendfactory.nim +++ /dev/null @@ -1,82 +0,0 @@ -import os -import strutils -import pkg/chronos -import pkg/chronicles -import pkg/questionable -import pkg/confutils/defs -import pkg/stew/io2 -import pkg/ethers - -import ../../conf -import ./backends -import ./backendutils - -proc initializeFromConfig(config: CodexConf, utils: BackendUtils): ?!AnyBackend = - if not fileAccessible($config.circomR1cs, {AccessFlags.Read}) or - not endsWith($config.circomR1cs, ".r1cs"): - return failure("Circom R1CS file not accessible") - - if not fileAccessible($config.circomWasm, {AccessFlags.Read}) or - not endsWith($config.circomWasm, ".wasm"): - return failure("Circom wasm file not accessible") - - if not fileAccessible($config.circomZkey, {AccessFlags.Read}) or - not endsWith($config.circomZkey, ".zkey"): - return failure("Circom zkey file not accessible") - - trace "Initialized prover backend from cli config" - success( - utils.initializeCircomBackend( - $config.circomR1cs, $config.circomWasm, $config.circomZkey - ) - ) - -proc r1csFilePath(config: CodexConf): string = - config.circuitDir / "proof_main.r1cs" - -proc wasmFilePath(config: CodexConf): string = - config.circuitDir / "proof_main.wasm" - -proc zkeyFilePath(config: CodexConf): string = - config.circuitDir / "proof_main.zkey" - -proc initializeFromCircuitDirFiles( - config: CodexConf, utils: BackendUtils -): ?!AnyBackend {.gcsafe.} = - if fileExists(config.r1csFilePath) and fileExists(config.wasmFilePath) and - fileExists(config.zkeyFilePath): - trace "Initialized prover backend from local files" - return success( - utils.initializeCircomBackend( - config.r1csFilePath, config.wasmFilePath, config.zkeyFilePath - ) - ) - - failure("Circuit files not found") - -proc suggestDownloadTool(config: CodexConf) = - without address =? config.marketplaceAddress: - raise (ref Defect)( - msg: "Proving backend initializing while marketplace address not set." - ) - - let - tokens = ["cirdl", "\"" & $config.circuitDir & "\"", config.ethProvider, $address] - instructions = "'./" & tokens.join(" ") & "'" - - warn "Proving circuit files are not found. Please run the following to download them:", - instructions - -proc initializeBackend*( - config: CodexConf, utils: BackendUtils = BackendUtils() -): ?!AnyBackend = - without backend =? initializeFromConfig(config, utils), cliErr: - info "Could not initialize prover backend from CLI options...", msg = cliErr.msg - without backend =? initializeFromCircuitDirFiles(config, utils), localErr: - info "Could not initialize prover backend from circuit dir files...", - msg = localErr.msg - suggestDownloadTool(config) - return failure("CircuitFilesNotFound") - # Unexpected: value of backend does not survive leaving each scope. (definition does though...) - return success(backend) - return success(backend) diff --git a/codex/slots/proofs/backends.nim b/codex/slots/proofs/backends.nim index 3bd2edb6c..b0b79b370 100644 --- a/codex/slots/proofs/backends.nim +++ b/codex/slots/proofs/backends.nim @@ -1,5 +1,4 @@ import ./backends/circomcompat +import ./backends/nimgroth16 -export circomcompat - -type AnyBackend* = CircomCompat +export circomcompat, nimgroth16 diff --git a/codex/slots/proofs/backends/circomcompat.nim b/codex/slots/proofs/backends/circomcompat.nim index 1d2e3e19a..33de3bc55 100644 --- a/codex/slots/proofs/backends/circomcompat.nim +++ b/codex/slots/proofs/backends/circomcompat.nim @@ -7,6 +7,8 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. +{.deprecated: "use the NimGroth16Backend".} + {.push raises: [].} import std/sugar @@ -23,8 +25,11 @@ import ./converters export circomcompat, converters +logScope: + topics = "codex backend circomcompat" + type - CircomCompat* = object + CircomCompatBackend* = object slotDepth: int # max depth of the slot tree datasetDepth: int # max depth of dataset tree blkDepth: int # depth of the block merkle tree (pow2 for now) @@ -34,13 +39,15 @@ type wasmPath: string # path to the wasm file zkeyPath: string # path to the zkey file backendCfg: ptr CircomBn254Cfg - vkp*: ptr CircomKey + vkp*: ptr CircomCompatKey + + CircomCompatBackendRef* = ref CircomCompatBackend - NormalizedProofInputs*[H] {.borrow: `.`.} = distinct ProofInputs[H] + NormalizedProofInputs*[SomeHash] {.borrow: `.`.} = distinct ProofInputs[SomeHash] -func normalizeInput*[H]( - self: CircomCompat, input: ProofInputs[H] -): NormalizedProofInputs[H] = +func normalizeInput*[SomeHash]( + self: CircomCompatBackendRef, input: ProofInputs[SomeHash] +): NormalizedProofInputs[SomeHash] = ## Parameters in CIRCOM circuits are statically sized and must be properly ## padded before they can be passed onto the circuit. This function takes ## variable length parameters and performs that padding. @@ -53,23 +60,25 @@ func normalizeInput*[H]( for sample in input.samples: var merklePaths = sample.merklePaths merklePaths.setLen(self.slotDepth) - Sample[H](cellData: sample.cellData, merklePaths: merklePaths) + Sample[SomeHash](cellData: sample.cellData, merklePaths: merklePaths) var normSlotProof = input.slotProof normSlotProof.setLen(self.datasetDepth) - NormalizedProofInputs[H] ProofInputs[H]( - entropy: input.entropy, - datasetRoot: input.datasetRoot, - slotIndex: input.slotIndex, - slotRoot: input.slotRoot, - nCellsPerSlot: input.nCellsPerSlot, - nSlotsPerDataSet: input.nSlotsPerDataSet, - slotProof: normSlotProof, - samples: normSamples, + NormalizedProofInputs[SomeHash]( + ProofInputs[SomeHash]( + entropy: input.entropy, + datasetRoot: input.datasetRoot, + slotIndex: input.slotIndex, + slotRoot: input.slotRoot, + nCellsPerSlot: input.nCellsPerSlot, + nSlotsPerDataSet: input.nSlotsPerDataSet, + slotProof: normSlotProof, + samples: normSamples, + ) ) -proc release*(self: CircomCompat) = +proc release*(self: CircomCompatBackendRef) = ## Release the ctx ## @@ -79,7 +88,9 @@ proc release*(self: CircomCompat) = if not isNil(self.vkp): self.vkp.unsafeAddr.release_key() -proc prove[H](self: CircomCompat, input: NormalizedProofInputs[H]): ?!CircomProof = +proc prove[SomeHash]( + self: CircomCompatBackendRef, input: NormalizedProofInputs[SomeHash] +): Future[?!CircomCompatProof] {.async: (raises: [CancelledError]).} = doAssert input.samples.len == self.numSamples, "Number of samples does not match" doAssert input.slotProof.len <= self.datasetDepth, @@ -101,7 +112,7 @@ proc prove[H](self: CircomCompat, input: NormalizedProofInputs[H]): ?!CircomProo ctx.addr.release_circom_compat() if init_circom_compat(self.backendCfg, addr ctx) != ERR_OK or ctx == nil: - raiseAssert("failed to initialize CircomCompat ctx") + raiseAssert("failed to initialize CircomCompatBackend ctx") var entropy = input.entropy.toBytes @@ -172,12 +183,16 @@ proc prove[H](self: CircomCompat, input: NormalizedProofInputs[H]): ?!CircomProo success proof -proc prove*[H](self: CircomCompat, input: ProofInputs[H]): ?!CircomProof = +proc prove*[SomeHash]( + self: CircomCompatBackendRef, input: ProofInputs[SomeHash] +): Future[?!CircomCompatProof] {.async: (raises: [CancelledError], raw: true).} = self.prove(self.normalizeInput(input)) -proc verify*[H]( - self: CircomCompat, proof: CircomProof, inputs: ProofInputs[H] -): ?!bool = +proc verify*[SomeHash]( + self: CircomCompatBackendRef, + proof: CircomCompatProof, + inputs: ProofInputs[SomeHash], +): Future[?!bool] {.async: (raises: [CancelledError]).} = ## Verify a proof using a ctx ## @@ -196,8 +211,8 @@ proc verify*[H]( finally: inputs.releaseCircomInputs() -proc init*( - _: type CircomCompat, +proc new*( + _: type CircomCompatBackendRef, r1csPath: string, wasmPath: string, zkeyPath: string = "", @@ -206,7 +221,7 @@ proc init*( blkDepth = DefaultBlockDepth, cellElms = DefaultCellElms, numSamples = DefaultSamplesNum, -): CircomCompat = +): ?!CircomCompatBackendRef = ## Create a new ctx ## @@ -217,16 +232,16 @@ proc init*( cfg == nil: if cfg != nil: cfg.addr.release_cfg() - raiseAssert("failed to initialize circom compat config") + return failure "failed to initialize circom compat config" var vkpPtr: ptr VerifyingKey = nil if cfg.get_verifying_key(vkpPtr.addr) != ERR_OK or vkpPtr == nil: if vkpPtr != nil: vkpPtr.addr.release_key() - raiseAssert("Failed to get verifying key") + return failure "Failed to get verifying key" - CircomCompat( + success CircomCompatBackendRef( r1csPath: r1csPath, wasmPath: wasmPath, zkeyPath: zkeyPath, diff --git a/codex/slots/proofs/backends/converters.nim b/codex/slots/proofs/backends/converters.nim index ee771477d..65007d84a 100644 --- a/codex/slots/proofs/backends/converters.nim +++ b/codex/slots/proofs/backends/converters.nim @@ -1,5 +1,5 @@ ## Nim-Codex -## Copyright (c) 2024 Status Research & Development GmbH +## Copyright (c) 2025 Status Research & Development GmbH ## Licensed under either of ## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) ## * MIT license ([LICENSE-MIT](LICENSE-MIT)) @@ -9,21 +9,27 @@ {.push raises: [].} +import pkg/groth16 import pkg/circomcompat +import pkg/constantine/math/io/io_fields import ../../../contracts import ../../types import ../../../merkletree type - CircomG1* = G1 - CircomG2* = G2 + CircomCompatG1* = circomcompat.G1 + CircomCompatG2* = circomcompat.G2 - CircomProof* = Proof - CircomKey* = VerifyingKey - CircomInputs* = Inputs + CircomCompatProof* = circomcompat.Proof + CircomCompatKey* = circomcompat.VerifyingKey + CircomCompatInputs* = circomcompat.Inputs -proc toCircomInputs*(inputs: ProofInputs[Poseidon2Hash]): CircomInputs = + NimGroth16G1* = groth16.G1 + NimGroth16G2* = groth16.G2 + NimGroth16Proof* = groth16.Proof + +proc toCircomInputs*(inputs: ProofInputs[Poseidon2Hash]): CircomCompatInputs = var slotIndex = inputs.slotIndex.toF.toBytes.toArray32 datasetRoot = inputs.datasetRoot.toBytes.toArray32 @@ -34,21 +40,49 @@ proc toCircomInputs*(inputs: ProofInputs[Poseidon2Hash]): CircomInputs = let inputsPtr = allocShared0(32 * elms.len) copyMem(inputsPtr, addr elms[0], elms.len * 32) - CircomInputs(elms: cast[ptr array[32, byte]](inputsPtr), len: elms.len.uint) + CircomCompatInputs(elms: cast[ptr array[32, byte]](inputsPtr), len: elms.len.uint) -proc releaseCircomInputs*(inputs: var CircomInputs) = +proc releaseCircomInputs*(inputs: var CircomCompatInputs) = if not inputs.elms.isNil: deallocShared(inputs.elms) inputs.elms = nil -func toG1*(g: CircomG1): G1Point = +func toG1*(g: CircomCompatG1): G1Point = G1Point(x: UInt256.fromBytesLE(g.x), y: UInt256.fromBytesLE(g.y)) -func toG2*(g: CircomG2): G2Point = +func toG2*(g: CircomCompatG2): G2Point = G2Point( x: Fp2Element(real: UInt256.fromBytesLE(g.x[0]), imag: UInt256.fromBytesLE(g.x[1])), y: Fp2Element(real: UInt256.fromBytesLE(g.y[0]), imag: UInt256.fromBytesLE(g.y[1])), ) -func toGroth16Proof*(proof: CircomProof): Groth16Proof = +func toGroth16Proof*(proof: CircomCompatProof): Groth16Proof = Groth16Proof(a: proof.a.toG1, b: proof.b.toG2, c: proof.c.toG1) + +func toG1*(g: NimGroth16G1): G1Point = + var + x: array[32, byte] + y: array[32, byte] + + assert x.marshal(g.x, Endianness.littleEndian) + assert y.marshal(g.y, Endianness.littleEndian) + + G1Point(x: UInt256.fromBytesLE(x), y: UInt256.fromBytesLE(y)) + +func toG2*(g: NimGroth16G2): G2Point = + var + x: array[2, array[32, byte]] + y: array[2, array[32, byte]] + + assert x[0].marshal(g.x.coords[0], Endianness.littleEndian) + assert x[1].marshal(g.x.coords[1], Endianness.littleEndian) + assert y[0].marshal(g.y.coords[0], Endianness.littleEndian) + assert y[1].marshal(g.y.coords[1], Endianness.littleEndian) + + G2Point( + x: Fp2Element(real: UInt256.fromBytesLE(x[0]), imag: UInt256.fromBytesLE(x[1])), + y: Fp2Element(real: UInt256.fromBytesLE(y[0]), imag: UInt256.fromBytesLE(y[1])), + ) + +func toGroth16Proof*(proof: NimGroth16Proof): Groth16Proof = + Groth16Proof(a: proof.pi_a.toG1, b: proof.pi_b.toG2, c: proof.pi_c.toG1) diff --git a/codex/slots/proofs/backends/nimgroth16.nim b/codex/slots/proofs/backends/nimgroth16.nim new file mode 100644 index 000000000..2a3a8759a --- /dev/null +++ b/codex/slots/proofs/backends/nimgroth16.nim @@ -0,0 +1,211 @@ +## Nim-Codex +## Copyright (c) 2025 Status Research & Development GmbH +## Licensed under either of +## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +## * MIT license ([LICENSE-MIT](LICENSE-MIT)) +## at your option. +## This file may not be copied, modified, or distributed except according to +## those terms. + +{.push raises: [].} + +import std/sugar +import std/isolation +import std/atomics + +import pkg/chronos +import pkg/chronos/threadsync +import pkg/taskpools +import pkg/questionable/results + +import pkg/groth16 +import pkg/nim/circom_witnessgen +import pkg/nim/circom_witnessgen/load +import pkg/nim/circom_witnessgen/witness + +import ../../types +import ../../../stores +import ../../../contracts + +import ./converters + +export converters + +logScope: + topics = "codex backend nimgroth16" + +const DefaultCurve* = "bn128" + +type + NimGroth16Backend* = object + curve: string # curve name + slotDepth: int # max depth of the slot tree + datasetDepth: int # max depth of dataset tree + blkDepth: int # depth of the block merkle tree (pow2 for now) + cellElms: int # number of field elements per cell + numSamples: int # number of samples per slot + r1cs: R1CS # path to the r1cs file + zkey: ZKey # path to the zkey file + graph*: Graph # path to the graph file generated with circom-witnesscalc + tp: Taskpool # taskpool for async operations + + NimGroth16BackendRef* = ref NimGroth16Backend + + ProofTask* = object + proof: Isolated[Proof] + self: ptr NimGroth16Backend + inputs: Inputs + signal: ThreadSignalPtr + ok: Atomic[bool] + +proc release*(self: NimGroth16BackendRef) = + ## Release the ctx + ## + + discard + +proc normalizeInput[SomeHash]( + self: NimGroth16BackendRef, input: ProofInputs[SomeHash] +): Inputs = + ## Map inputs to witnessgen inputs + ## + + var normSlotProof = input.slotProof + normSlotProof.setLen(self.datasetDepth) + + { + "slotDepth": @[self.slotDepth.toF], + "datasetDepth": @[self.datasetDepth.toF], + "blkDepth": @[self.blkDepth.toF], + "cellElms": @[self.cellElms.toF], + "numSamples": @[self.numSamples.toF], + "entropy": @[input.entropy], + "dataSetRoot": @[input.datasetRoot], + "slotIndex": @[input.slotIndex.toF], + "slotRoot": @[input.slotRoot], + "nCellsPerSlot": @[input.nCellsPerSlot.toF], + "nSlotsPerDataSet": @[input.nSlotsPerDataSet.toF], + "slotProof": normSlotProof, + "cellData": input.samples.mapIt(it.cellData).concat, + "merklePaths": input.samples.mapIt( + block: + var mekrlePaths = it.merklePaths + mekrlePaths.setLen(self.slotDepth) + mekrlePaths + ).concat, + }.toTable + +proc generateProofTask(task: ptr ProofTask) = + defer: + if task[].signal != nil: + discard task[].signal.fireSync() + + try: + trace "Generating witness" + let + witnessValues = generateWitness(task[].self[].graph, task[].inputs) + witness = Witness( + curve: task[].self[].curve, + r: task[].self[].r1cs.r, + nvars: task[].self[].r1cs.cfg.nWires, + values: witnessValues, + ) + + trace "Generating nim groth16 proof" + var proof = generateProof(task[].self[].zkey, witness, task[].self[].tp) + trace "Proof generated, copying to main thread" + var isolatedProof = isolate(proof) + task[].proof = move isolatedProof + task[].ok.store true + except CatchableError as e: + error "Failed to generate proof", err = e.msg + task[].ok.store false + +proc prove*[SomeHash]( + self: NimGroth16BackendRef, input: ProofInputs[SomeHash] +): Future[?!NimGroth16Proof] {.async: (raises: [CancelledError]).} = + ## Prove a statement using backend. + ## + + var + signalPtr = ?ThreadSignalPtr.new().mapFailure + task = ProofTask( + self: cast[ptr NimGroth16Backend](self), + signal: signalPtr, + inputs: self.normalizeInput(input), + ) + + defer: + if signalPtr != nil: + ?signalPtr.close().mapFailure + signalPtr = nil + + self.tp.spawn generateProofTask(task.addr) + + let taskFut = signalPtr.wait() + if err =? catch(await taskFut.join()).errorOption: + # XXX: we need this because there is no way to cancel a task + # and without waiting for it to finish, we'll be writting to free'd + # memory in the task + warn "Error while generating proof, awaiting task to finish", err = err.msg + ?catch(await noCancel taskFut) + if err of CancelledError: # reraise cancelled error + trace "Task was cancelled" + raise (ref CancelledError) err + + trace "Task failed with error", err = err.msg + return failure err + + defer: + task.proof = default(Isolated[Proof]) + + if not task.ok.load: + trace "Task failed, no proof generated" + return failure("Failed to generate proof") + + var proof = task.proof.extract + trace "Task finished successfully, proof generated" + success proof + +proc verify*( + self: NimGroth16BackendRef, proof: NimGroth16Proof +): Future[?!bool] {.async: (raises: [CancelledError]).} = + let + vKey = self.zkey.extractVKey + verified = ?verifyProof(vKey, proof).catch + + success verified + +proc new*( + _: type NimGroth16BackendRef, + graphPath: string, + r1csPath: string, + zkeyPath: string, + curve = DefaultCurve, + slotDepth = DefaultMaxSlotDepth, + datasetDepth = DefaultMaxDatasetDepth, + blkDepth = DefaultBlockDepth, + cellElms = DefaultCellElms, + numSamples = DefaultSamplesNum, + tp: Taskpool, +): ?!NimGroth16BackendRef = + ## Create a new ctx + ## + + let + graph = ?loadGraph(graphPath).catch + r1cs = ?parseR1CS(r1csPath).catch + zkey = ?parseZKey(zkeyPath).catch + + success NimGroth16BackendRef( + graph: graph, + r1cs: r1cs, + zkey: zkey, + slotDepth: slotDepth, + datasetDepth: datasetDepth, + blkDepth: blkDepth, + cellElms: cellElms, + numSamples: numSamples, + curve: curve, + tp: tp, + ) diff --git a/codex/slots/proofs/backendutils.nim b/codex/slots/proofs/backendutils.nim deleted file mode 100644 index 0e334aced..000000000 --- a/codex/slots/proofs/backendutils.nim +++ /dev/null @@ -1,8 +0,0 @@ -import ./backends - -type BackendUtils* = ref object of RootObj - -method initializeCircomBackend*( - self: BackendUtils, r1csFile: string, wasmFile: string, zKeyFile: string -): AnyBackend {.base, gcsafe.} = - CircomCompat.init(r1csFile, wasmFile, zKeyFile) diff --git a/codex/slots/proofs/prover.nim b/codex/slots/proofs/prover.nim index 1afcd0684..b49ab0218 100644 --- a/codex/slots/proofs/prover.nim +++ b/codex/slots/proofs/prover.nim @@ -12,6 +12,7 @@ import pkg/chronos import pkg/chronicles import pkg/circomcompat import pkg/poseidon2 +import pkg/taskpools import pkg/questionable/results import pkg/libp2p/cid @@ -34,60 +35,71 @@ export backends logScope: topics = "codex prover" -type - AnyProof* = CircomProof - - AnySampler* = Poseidon2Sampler - # add any other generic type here, eg. Poseidon2Sampler | ReinforceConcreteSampler - AnyBuilder* = Poseidon2Builder - # add any other generic type here, eg. Poseidon2Builder | ReinforceConcreteBuilder - - AnyProofInputs* = ProofInputs[Poseidon2Hash] - Prover* = ref object of RootObj - backend: AnyBackend - store: BlockStore - nSamples: int - -proc prove*( - self: Prover, slotIdx: int, manifest: Manifest, challenge: ProofChallenge -): Future[?!(AnyProofInputs, AnyProof)] {.async: (raises: [CancelledError]).} = +type Prover* = ref object + case backendKind: ProverBackendCmd + of ProverBackendCmd.nimgroth16: + groth16Backend*: NimGroth16BackendRef + of ProverBackendCmd.circomcompat: + circomCompatBackend*: CircomCompatBackendRef + nSamples: int + tp: Taskpool + +proc prove*[SomeSampler]( + self: Prover, + sampler: SomeSampler, + manifest: Manifest, + challenge: ProofChallenge, + verify = false, +): Future[?!(Groth16Proof, ?bool)] {.async: (raises: [CancelledError]).} = ## Prove a statement using backend. ## Returns a future that resolves to a proof. logScope: cid = manifest.treeCid - slot = slotIdx challenge = challenge trace "Received proof challenge" - without builder =? AnyBuilder.new(self.store, manifest), err: - error "Unable to create slots builder", err = err.msg - return failure(err) - - without sampler =? AnySampler.new(slotIdx, self.store, builder), err: - error "Unable to create data sampler", err = err.msg - return failure(err) - - without proofInput =? await sampler.getProofInput(challenge, self.nSamples), err: - error "Unable to get proof input for slot", err = err.msg - return failure(err) - - # prove slot - without proof =? self.backend.prove(proofInput), err: - error "Unable to prove slot", err = err.msg - return failure(err) + let + proofInput = ?await sampler.getProofInput(challenge, self.nSamples) + # prove slot + + case self.backendKind + of ProverBackendCmd.nimgroth16: + let + proof = ?await self.groth16Backend.prove(proofInput) + verified = + if verify: + (?await self.groth16Backend.verify(proof)).some + else: + bool.none + return success (proof.toGroth16Proof, verified) + of ProverBackendCmd.circomcompat: + let + proof = ?await self.circomCompatBackend.prove(proofInput) + verified = + if verify: + (?await self.circomCompatBackend.verify(proof, proofInput)).some + else: + bool.none + return success (proof.toGroth16Proof, verified) - success (proofInput, proof) - -proc verify*( - self: Prover, proof: AnyProof, inputs: AnyProofInputs -): Future[?!bool] {.async: (raises: [CancelledError]).} = - ## Prove a statement using backend. - ## Returns a future that resolves to a proof. - self.backend.verify(proof, inputs) +proc new*( + _: type Prover, backend: CircomCompatBackendRef, nSamples: int, tp: Taskpool +): Prover = + Prover( + circomCompatBackend: backend, + backendKind: ProverBackendCmd.circomcompat, + nSamples: nSamples, + tp: tp, + ) proc new*( - _: type Prover, store: BlockStore, backend: AnyBackend, nSamples: int + _: type Prover, backend: NimGroth16BackendRef, nSamples: int, tp: Taskpool ): Prover = - Prover(store: store, backend: backend, nSamples: nSamples) + Prover( + groth16Backend: backend, + backendKind: ProverBackendCmd.nimgroth16, + nSamples: nSamples, + tp: tp, + ) diff --git a/codex/slots/proofs/proverfactory.nim b/codex/slots/proofs/proverfactory.nim new file mode 100644 index 000000000..419b0fa50 --- /dev/null +++ b/codex/slots/proofs/proverfactory.nim @@ -0,0 +1,145 @@ +{.push raises: [].} + +import os +import strutils +import pkg/chronos +import pkg/chronicles +import pkg/questionable +import pkg/confutils/defs +import pkg/stew/io2 +import pkg/ethers +import pkg/taskpools + +import ../../conf +import ./backends +import ./prover + +logScope: + topics = "codex slots proverfactory" + +template graphFilePath(config: CodexConf): string = + config.circuitDir / "proof_main.bin" + +template r1csFilePath(config: CodexConf): string = + config.circuitDir / "proof_main.r1cs" + +template wasmFilePath(config: CodexConf): string = + config.circuitDir / "proof_main.wasm" + +template zkeyFilePath(config: CodexConf): string = + config.circuitDir / "proof_main.zkey" + +proc getGraphFile*(config: CodexConf): ?!string = + if fileAccessible($config.circomGraph, {AccessFlags.Read}) and + endsWith($config.circomGraph, ".bin"): + success $config.circomGraph + elif fileAccessible(config.graphFilePath, {AccessFlags.Read}) and + endsWith(config.graphFilePath, ".bin"): + success config.graphFilePath + else: + failure("Graph file not accessible or not found") + +proc getR1csFile*(config: CodexConf): ?!string = + if fileAccessible($config.circomR1cs, {AccessFlags.Read}) and + endsWith($config.circomR1cs, ".r1cs"): + success $config.circomR1cs + elif fileAccessible(config.r1csFilePath, {AccessFlags.Read}) and + endsWith(config.r1csFilePath, ".r1cs"): + success config.r1csFilePath + else: + failure("R1CS file not accessible or not found") + +proc getWasmFile*(config: CodexConf): ?!string = + if fileAccessible($config.circomWasm, {AccessFlags.Read}) and + endsWith($config.circomWasm, ".wasm"): + success $config.circomWasm + elif fileAccessible(config.wasmFilePath, {AccessFlags.Read}) and + endsWith(config.wasmFilePath, ".wasm"): + success config.wasmFilePath + else: + failure("WASM file not accessible or not found") + +proc getZkeyFile*(config: CodexConf): ?!string = + if fileAccessible($config.circomZkey, {AccessFlags.Read}) and + endsWith($config.circomZkey, ".zkey"): + success $config.circomZkey + elif fileAccessible(config.zkeyFilePath, {AccessFlags.Read}) and + endsWith(config.zkeyFilePath, ".zkey"): + success config.zkeyFilePath + else: + failure("ZKey file not accessible or not found") + +proc suggestDownloadTool(config: CodexConf) = + without address =? config.marketplaceAddress: + raiseAssert("Proving backend initializing while marketplace address not set.") + + let + tokens = ["cirdl", "\"" & $config.circuitDir & "\"", config.ethProvider, $address] + instructions = "'./" & tokens.join(" ") & "'" + + warn "Proving circuit files are not found. Please run the following to download them:", + instructions + +proc initializeNimGroth16Backend( + config: CodexConf, tp: Taskpool +): ?!NimGroth16BackendRef = + trace "Initializing NimGroth16 backend" + + let + graphFile = ?getGraphFile(config) + r1csFile = ?getR1csFile(config) + zkeyFile = ?getZkeyFile(config) + + return NimGroth16BackendRef.new( + $graphFile, + $r1csFile, + $zkeyFile, + $config.curve, + config.maxSlotDepth, + config.maxDatasetDepth, + config.maxBlockDepth, + config.maxCellElms, + config.numProofSamples, + tp, + ) + +proc initializeCircomCompatBackend( + config: CodexConf, tp: Taskpool +): ?!CircomCompatBackendRef = + trace "Initializing CircomCompat backend" + + let + r1csFile = ?getR1csFile(config) + wasmFile = ?getWasmFile(config) + zkeyFile = ?getZkeyFile(config) + + return CircomCompatBackendRef.new( + $r1csFile, + $wasmFile, + $zkeyFile, + config.maxSlotDepth, + config.maxDatasetDepth, + config.maxBlockDepth, + config.maxCellElms, + config.numProofSamples, + ) + +proc initializeProver*(config: CodexConf, tp: Taskpool): ?!Prover = + let prover = + case config.proverBackend + of ProverBackendCmd.nimgroth16: + without backend =? initializeNimGroth16Backend(config, tp), err: + trace "Unable to initialize NimGroth16 backend: ", err = err.msg + suggestDownloadTool(config) + return failure(err) + + Prover.new(backend, config.numProofSamples, tp) + of ProverBackendCmd.circomcompat: + without backend =? initializeCircomCompatBackend(config, tp), err: + trace "Unable to initialize CircomCompat backend: ", err = err.msg + suggestDownloadTool(config) + return failure(err) + + Prover.new(backend, config.numProofSamples, tp) + + success prover diff --git a/codex/slots/sampler/sampler.nim b/codex/slots/sampler/sampler.nim index d7a36cfdb..403695c77 100644 --- a/codex/slots/sampler/sampler.nim +++ b/codex/slots/sampler/sampler.nim @@ -27,16 +27,16 @@ import ../types import ./utils logScope: - topics = "codex datasampler" + topics = "codex slots sampler" -type DataSampler*[T, H] = ref object of RootObj +type DataSampler*[SomeTree, SomeHash] = ref object of RootObj index: Natural blockStore: BlockStore - builder: SlotsBuilder[T, H] + builder: SlotsBuilder[SomeTree, SomeHash] -func getCell*[T, H]( - self: DataSampler[T, H], blkBytes: seq[byte], blkCellIdx: Natural -): seq[H] = +func getCell*[SomeTree, SomeHash]( + self: DataSampler[SomeTree, SomeHash], blkBytes: seq[byte], blkCellIdx: Natural +): seq[SomeHash] = let cellSize = self.builder.cellSize.uint64 dataStart = cellSize * blkCellIdx.uint64 @@ -44,11 +44,14 @@ func getCell*[T, H]( doAssert (dataEnd - dataStart) == cellSize, "Invalid cell size" - blkBytes[dataStart ..< dataEnd].elements(H).toSeq() + blkBytes[dataStart ..< dataEnd].elements(SomeHash).toSeq() -proc getSample*[T, H]( - self: DataSampler[T, H], cellIdx: int, slotTreeCid: Cid, slotRoot: H -): Future[?!Sample[H]] {.async: (raises: [CancelledError]).} = +proc getSample*[SomeTree, SomeHash]( + self: DataSampler[SomeTree, SomeHash], + cellIdx: int, + slotTreeCid: Cid, + slotRoot: SomeHash, +): Future[?!Sample[SomeHash]] {.async: (raises: [CancelledError]).} = let cellsPerBlock = self.builder.numBlockCells blkCellIdx = cellIdx.toCellInBlk(cellsPerBlock) # block cell index @@ -77,27 +80,22 @@ proc getSample*[T, H]( cellProof = blkTree.getProof(blkCellIdx).valueOr: return failure("Failed to get proof from block tree") - success Sample[H](cellData: cellData, merklePaths: (cellProof.path & slotProof.path)) + success Sample[SomeHash]( + cellData: cellData, merklePaths: (cellProof.path & slotProof.path) + ) -proc getProofInput*[T, H]( - self: DataSampler[T, H], entropy: ProofChallenge, nSamples: Natural -): Future[?!ProofInputs[H]] {.async: (raises: [CancelledError]).} = +proc getProofInput*[SomeTree, SomeHash]( + self: DataSampler[SomeTree, SomeHash], entropy: ProofChallenge, nSamples: Natural +): Future[?!ProofInputs[SomeHash]] {.async: (raises: [CancelledError]).} = ## Generate proofs as input to the proving circuit. ## let - entropy = H.fromBytes(array[31, byte].initCopyFrom(entropy[0 .. 30])) - # truncate to 31 bytes, otherwise it _might_ be greater than mod - - verifyTree = self.builder.verifyTree.toFailure.valueOr: - return failure("Failed to get verify tree") - - slotProof = verifyTree.getProof(self.index).valueOr: - return failure("Failed to get slot proof") - - datasetRoot = verifyTree.root().valueOr: - return failure("Failed to get dataset root") - + # truncate to 31 bytes, otherwise it _might_ be greater than mod + entropy = SomeHash.fromBytes(array[31, byte].initCopyFrom(entropy[0 .. 30])) + verifyTree = ?self.builder.verifyTree.toFailure + slotProof = ?verifyTree.getProof(self.index) + datasetRoot = ?verifyTree.root() slotTreeCid = self.builder.manifest.slotRoots[self.index] slotRoot = self.builder.slotRoots[self.index] cellIdxs = entropy.cellIndices(slotRoot, self.builder.numSlotCells, nSamples) @@ -108,10 +106,9 @@ proc getProofInput*[T, H]( trace "Collecting input for proof" let samples = collect(newSeq): for cellIdx in cellIdxs: - (await self.getSample(cellIdx, slotTreeCid, slotRoot)).valueOr: - return failure("Failed to get sample") + ?(await self.getSample(cellIdx, slotTreeCid, slotRoot)) - success ProofInputs[H]( + success ProofInputs[SomeHash]( entropy: entropy, datasetRoot: datasetRoot, slotProof: slotProof.path, @@ -122,12 +119,12 @@ proc getProofInput*[T, H]( samples: samples, ) -proc new*[T, H]( - _: type DataSampler[T, H], +proc new*[SomeTree, SomeHash]( + _: type DataSampler[SomeTree, SomeHash], index: Natural, blockStore: BlockStore, - builder: SlotsBuilder[T, H], -): ?!DataSampler[T, H] = + builder: SlotsBuilder[SomeTree, SomeHash], +): ?!DataSampler[SomeTree, SomeHash] = if index > builder.slotRoots.high: error "Slot index is out of range" return failure("Slot index is out of range") @@ -135,4 +132,6 @@ proc new*[T, H]( if not builder.verifiable: return failure("Cannot instantiate DataSampler for non-verifiable builder") - success DataSampler[T, H](index: index, blockStore: blockStore, builder: builder) + success DataSampler[SomeTree, SomeHash]( + index: index, blockStore: blockStore, builder: builder + ) diff --git a/codex/slots/types.nim b/codex/slots/types.nim index 0cd243261..aabba0caa 100644 --- a/codex/slots/types.nim +++ b/codex/slots/types.nim @@ -8,23 +8,23 @@ ## those terms. type - Sample*[H] = object - cellData*: seq[H] - merklePaths*: seq[H] + Sample*[SomeHash] = object + cellData*: seq[SomeHash] + merklePaths*: seq[SomeHash] - PublicInputs*[H] = object + PublicInputs*[SomeHash] = object slotIndex*: int - datasetRoot*: H - entropy*: H + datasetRoot*: SomeHash + entropy*: SomeHash - ProofInputs*[H] = object - entropy*: H - datasetRoot*: H + ProofInputs*[SomeHash] = object + entropy*: SomeHash + datasetRoot*: SomeHash slotIndex*: Natural - slotRoot*: H + slotRoot*: SomeHash nCellsPerSlot*: Natural nSlotsPerDataSet*: Natural - slotProof*: seq[H] + slotProof*: seq[SomeHash] # inclusion proof that shows that the slot root (leaf) is part of the dataset (root) - samples*: seq[Sample[H]] + samples*: seq[Sample[SomeHash]] # inclusion proofs which show that the selected cells (leafs) are part of the slot (roots) diff --git a/codex/stores/treehelper.nim b/codex/stores/treehelper.nim index e1f5d48d8..99f8cde1b 100644 --- a/codex/stores/treehelper.nim +++ b/codex/stores/treehelper.nim @@ -25,7 +25,7 @@ import ../merkletree proc putSomeProofs*( store: BlockStore, tree: CodexTree, iter: Iter[int] -): Future[?!void] {.async.} = +): Future[?!void] {.async: (raises: [CancelledError]).} = without treeCid =? tree.rootCid, err: return failure(err) @@ -51,8 +51,10 @@ proc putSomeProofs*( proc putSomeProofs*( store: BlockStore, tree: CodexTree, iter: Iter[Natural] -): Future[?!void] = +): Future[?!void] {.async: (raises: [CancelledError], raw: true).} = store.putSomeProofs(tree, iter.map((i: Natural) => i.ord)) -proc putAllProofs*(store: BlockStore, tree: CodexTree): Future[?!void] = +proc putAllProofs*( + store: BlockStore, tree: CodexTree +): Future[?!void] {.async: (raises: [CancelledError], raw: true).} = store.putSomeProofs(tree, Iter[int].new(0 ..< tree.leavesCount)) diff --git a/codex/utils/arrayutils.nim b/codex/utils/arrayutils.nim index e36a0cb34..c6721f6bb 100644 --- a/codex/utils/arrayutils.nim +++ b/codex/utils/arrayutils.nim @@ -1,5 +1,3 @@ -import std/sequtils - proc createDoubleArray*( outerLen, innerLen: int ): ptr UncheckedArray[ptr UncheckedArray[byte]] = diff --git a/tests/circuits/fixtures/proof_main.bin b/tests/circuits/fixtures/proof_main.bin new file mode 100644 index 000000000..6820a11b2 Binary files /dev/null and b/tests/circuits/fixtures/proof_main.bin differ diff --git a/tests/codex/slots/backends/helpers.nim b/tests/codex/slots/backends/helpers.nim index e1b6822a9..fe7f5c96d 100644 --- a/tests/codex/slots/backends/helpers.nim +++ b/tests/codex/slots/backends/helpers.nim @@ -19,13 +19,13 @@ func toJsonDecimal*(big: BigInt[254]): string = let s = big.toDecimal.strip(leading = true, trailing = false, chars = {'0'}) if s.len == 0: "0" else: s -func toJson*(g1: CircomG1): JsonNode = +func toJson*(g1: CircomCompatG1): JsonNode = %*{ "x": Bn254Fr.fromBytes(g1.x).get.toBig.toJsonDecimal, "y": Bn254Fr.fromBytes(g1.y).get.toBig.toJsonDecimal, } -func toJson*(g2: CircomG2): JsonNode = +func toJson*(g2: CircomCompatG2): JsonNode = %*{ "x": [ Bn254Fr.fromBytes(g2.x[0]).get.toBig.toJsonDecimal, @@ -38,8 +38,9 @@ func toJson*(g2: CircomG2): JsonNode = } proc toJson*(vpk: VerifyingKey): JsonNode = - let ic = - toSeq(cast[ptr UncheckedArray[CircomG1]](vpk.ic).toOpenArray(0, vpk.icLen.int - 1)) + let ic = toSeq( + cast[ptr UncheckedArray[CircomCompatG1]](vpk.ic).toOpenArray(0, vpk.icLen.int - 1) + ) echo ic.len %*{ diff --git a/tests/codex/slots/backends/testcircomcompat.nim b/tests/codex/slots/backends/testcircomcompat.nim index b61d4f188..91c04a66c 100644 --- a/tests/codex/slots/backends/testcircomcompat.nim +++ b/tests/codex/slots/backends/testcircomcompat.nim @@ -24,7 +24,7 @@ suite "Test Circom Compat Backend - control inputs": zkey = "tests/circuits/fixtures/proof_main.zkey" var - circom: CircomCompat + circom: CircomCompatBackendRef proofInputs: ProofInputs[Poseidon2Hash] setup: @@ -33,22 +33,20 @@ suite "Test Circom Compat Backend - control inputs": inputJson = !JsonNode.parse(inputData) proofInputs = Poseidon2Hash.jsonToProofInput(inputJson) - circom = CircomCompat.init(r1cs, wasm, zkey) + circom = CircomCompatBackendRef.new(r1cs, wasm, zkey).tryGet teardown: circom.release() # this comes from the rust FFI test "Should verify with correct inputs": - let proof = circom.prove(proofInputs).tryGet - - check circom.verify(proof, proofInputs).tryGet + let proof = (await circom.prove(proofInputs)).tryGet + check (await circom.verify(proof, proofInputs)).tryGet test "Should not verify with incorrect inputs": proofInputs.slotIndex = 1 # change slot index - let proof = circom.prove(proofInputs).tryGet - - check circom.verify(proof, proofInputs).tryGet == false + let proof = (await circom.prove(proofInputs)).tryGet + check (await circom.verify(proof, proofInputs)).tryGet == false suite "Test Circom Compat Backend": let @@ -72,7 +70,7 @@ suite "Test Circom Compat Backend": manifest: Manifest protected: Manifest verifiable: Manifest - circom: CircomCompat + circom: CircomCompatBackendRef proofInputs: ProofInputs[Poseidon2Hash] challenge: array[32, byte] builder: Poseidon2Builder @@ -92,7 +90,7 @@ suite "Test Circom Compat Backend": builder = Poseidon2Builder.new(store, verifiable).tryGet sampler = Poseidon2Sampler.new(slotId, store, builder).tryGet - circom = CircomCompat.init(r1cs, wasm, zkey) + circom = CircomCompatBackendRef.new(r1cs, wasm, zkey).tryGet challenge = 1234567.toF.toBytes.toArray32 proofInputs = (await sampler.getProofInput(challenge, samples)).tryGet @@ -103,13 +101,11 @@ suite "Test Circom Compat Backend": await metaTmp.destroyDb() test "Should verify with correct input": - var proof = circom.prove(proofInputs).tryGet - - check circom.verify(proof, proofInputs).tryGet + var proof = (await circom.prove(proofInputs)).tryGet + check (await circom.verify(proof, proofInputs)).tryGet test "Should not verify with incorrect input": proofInputs.slotIndex = 1 # change slot index - let proof = circom.prove(proofInputs).tryGet - - check circom.verify(proof, proofInputs).tryGet == false + let proof = (await circom.prove(proofInputs)).tryGet + check (await circom.verify(proof, proofInputs)).tryGet == false diff --git a/tests/codex/slots/backends/testnimgroth16.nim b/tests/codex/slots/backends/testnimgroth16.nim new file mode 100644 index 000000000..a7156fcc8 --- /dev/null +++ b/tests/codex/slots/backends/testnimgroth16.nim @@ -0,0 +1,119 @@ +import std/options +import std/isolation + +import ../../../asynctest + +import pkg/chronos +import pkg/poseidon2 +import pkg/serde/json +import pkg/taskpools + +import pkg/codex/slots {.all.} +import pkg/codex/slots/types {.all.} +import pkg/codex/merkletree +import pkg/codex/merkletree/poseidon2 +import pkg/codex/codextypes +import pkg/codex/manifest +import pkg/codex/stores + +import pkg/groth16 +import pkg/nim/circom_witnessgen +import pkg/nim/circom_witnessgen/load +import pkg/nim/circom_witnessgen/witness + +import ./helpers +import ../helpers +import ../../helpers + +suite "Test NimGoth16 Backend - control inputs": + let + graph = "tests/circuits/fixtures/proof_main.bin" + r1cs = "tests/circuits/fixtures/proof_main.r1cs" + zkey = "tests/circuits/fixtures/proof_main.zkey" + + var + nimGroth16: NimGroth16BackendRef + proofInputs: ProofInputs[Poseidon2Hash] + + setup: + let + inputData = readFile("tests/circuits/fixtures/input.json") + inputJson = !JsonNode.parse(inputData) + + proofInputs = Poseidon2Hash.jsonToProofInput(inputJson) + nimGroth16 = NimGroth16BackendRef.new(graph, r1cs, zkey, tp = Taskpool.new()).tryGet + + teardown: + nimGroth16.release() + + test "Should verify with correct inputs": + let proof = (await nimGroth16.prove(proofInputs)).tryGet + check (await nimGroth16.verify(proof)).tryGet + + test "Should not verify with incorrect inputs": + proofInputs.slotIndex = 1 # change slot index + + let proof = (await nimGroth16.prove(proofInputs)).tryGet + check (await nimGroth16.verify(proof)).tryGet == false + +suite "Test NimGoth16 Backend": + let + ecK = 2 + ecM = 2 + slotId = 3 + samples = 5 + numDatasetBlocks = 8 + blockSize = DefaultBlockSize + cellSize = DefaultCellSize + + graph = "tests/circuits/fixtures/proof_main.bin" + r1cs = "tests/circuits/fixtures/proof_main.r1cs" + zkey = "tests/circuits/fixtures/proof_main.zkey" + + repoTmp = TempLevelDb.new() + metaTmp = TempLevelDb.new() + + var + store: BlockStore + manifest: Manifest + protected: Manifest + verifiable: Manifest + nimGroth16: NimGroth16BackendRef + proofInputs: ProofInputs[Poseidon2Hash] + challenge: array[32, byte] + builder: Poseidon2Builder + sampler: Poseidon2Sampler + + setup: + let + repoDs = repoTmp.newDb() + metaDs = metaTmp.newDb() + + store = RepoStore.new(repoDs, metaDs) + + (manifest, protected, verifiable) = await createVerifiableManifest( + store, numDatasetBlocks, ecK, ecM, blockSize, cellSize + ) + + builder = Poseidon2Builder.new(store, verifiable).tryGet + sampler = Poseidon2Sampler.new(slotId, store, builder).tryGet + + nimGroth16 = NimGroth16BackendRef.new(graph, r1cs, zkey, tp = Taskpool.new()).tryGet + challenge = 1234567.toF.toBytes.toArray32 + + proofInputs = (await sampler.getProofInput(challenge, samples)).tryGet + + teardown: + nimGroth16.release() + await repoTmp.destroyDb() + await metaTmp.destroyDb() + + test "Should verify with correct input": + var proof = (await nimGroth16.prove(proofInputs)).tryGet + check (await nimGroth16.verify(proof)).tryGet + + test "Should not verify with incorrect input": + proofInputs.slotIndex = 1 # change slot index + + let proof = (await nimGroth16.prove(proofInputs)).tryGet + check (await nimGroth16.verify(proof)).tryGet == false diff --git a/tests/codex/slots/helpers.nim b/tests/codex/slots/helpers.nim index fced1f1c4..9394fd7c1 100644 --- a/tests/codex/slots/helpers.nim +++ b/tests/codex/slots/helpers.nim @@ -6,6 +6,7 @@ import pkg/libp2p/cid import pkg/codex/codextypes import pkg/codex/stores import pkg/codex/merkletree +import pkg/codex/utils/poseidon2digest import pkg/codex/manifest import pkg/codex/blocktype as bt import pkg/codex/chunker diff --git a/tests/codex/slots/testbackendfactory.nim b/tests/codex/slots/testbackendfactory.nim deleted file mode 100644 index a24bc41a5..000000000 --- a/tests/codex/slots/testbackendfactory.nim +++ /dev/null @@ -1,97 +0,0 @@ -import os -import ../../asynctest - -import pkg/chronos -import pkg/confutils/defs -import pkg/codex/conf -import pkg/codex/slots/proofs/backends -import pkg/codex/slots/proofs/backendfactory -import pkg/codex/slots/proofs/backendutils -import pkg/codex/utils/natutils - -import ../helpers -import ../examples - -type BackendUtilsMock = ref object of BackendUtils - argR1csFile: string - argWasmFile: string - argZKeyFile: string - -method initializeCircomBackend*( - self: BackendUtilsMock, r1csFile: string, wasmFile: string, zKeyFile: string -): AnyBackend = - self.argR1csFile = r1csFile - self.argWasmFile = wasmFile - self.argZKeyFile = zKeyFile - # We return a backend with *something* that's not nil that we can check for. - var - key = VerifyingKey(icLen: 123) - vkpPtr: ptr VerifyingKey = key.addr - return CircomCompat(vkp: vkpPtr) - -suite "Test BackendFactory": - let - utilsMock = BackendUtilsMock() - circuitDir = "testecircuitdir" - - setup: - createDir(circuitDir) - - teardown: - removeDir(circuitDir) - - test "Should create backend from cli config": - let - config = CodexConf( - cmd: StartUpCmd.persistence, - nat: NatConfig(hasExtIp: false, nat: NatNone), - metricsAddress: parseIpAddress("127.0.0.1"), - persistenceCmd: PersistenceCmd.prover, - marketplaceAddress: EthAddress.example.some, - circomR1cs: InputFile("tests/circuits/fixtures/proof_main.r1cs"), - circomWasm: InputFile("tests/circuits/fixtures/proof_main.wasm"), - circomZkey: InputFile("tests/circuits/fixtures/proof_main.zkey"), - ) - backend = config.initializeBackend(utilsMock).tryGet - - check: - backend.vkp != nil - utilsMock.argR1csFile == $config.circomR1cs - utilsMock.argWasmFile == $config.circomWasm - utilsMock.argZKeyFile == $config.circomZkey - - test "Should create backend from local files": - let - config = CodexConf( - cmd: StartUpCmd.persistence, - nat: NatConfig(hasExtIp: false, nat: NatNone), - metricsAddress: parseIpAddress("127.0.0.1"), - persistenceCmd: PersistenceCmd.prover, - marketplaceAddress: EthAddress.example.some, - - # Set the circuitDir such that the tests/circuits/fixtures/ files - # will be picked up as local files: - circuitDir: OutDir("tests/circuits/fixtures"), - ) - backend = config.initializeBackend(utilsMock).tryGet - - check: - backend.vkp != nil - utilsMock.argR1csFile == config.circuitDir / "proof_main.r1cs" - utilsMock.argWasmFile == config.circuitDir / "proof_main.wasm" - utilsMock.argZKeyFile == config.circuitDir / "proof_main.zkey" - - test "Should suggest usage of downloader tool when files not available": - let - config = CodexConf( - cmd: StartUpCmd.persistence, - nat: NatConfig(hasExtIp: false, nat: NatNone), - metricsAddress: parseIpAddress("127.0.0.1"), - persistenceCmd: PersistenceCmd.prover, - marketplaceAddress: EthAddress.example.some, - circuitDir: OutDir(circuitDir), - ) - backendResult = config.initializeBackend(utilsMock) - - check: - backendResult.isErr diff --git a/tests/codex/slots/testbackends.nim b/tests/codex/slots/testbackends.nim index b9994fcdf..f8f1b4508 100644 --- a/tests/codex/slots/testbackends.nim +++ b/tests/codex/slots/testbackends.nim @@ -1,3 +1,4 @@ import ./backends/testcircomcompat +import ./backends/testnimgroth16 {.warning[UnusedImport]: off.} diff --git a/tests/codex/slots/testprover.nim b/tests/codex/slots/testprover.nim index c567db55d..4d13a9a44 100644 --- a/tests/codex/slots/testprover.nim +++ b/tests/codex/slots/testprover.nim @@ -13,17 +13,19 @@ import pkg/confutils/defs import pkg/poseidon2/io import pkg/codex/utils/poseidon2digest import pkg/codex/nat +import pkg/taskpools import pkg/codex/utils/natutils import ./helpers import ../helpers -suite "Test Prover": +suite "Test CircomCompat Prover": let samples = 5 blockSize = DefaultBlockSize cellSize = DefaultCellSize repoTmp = TempLevelDb.new() metaTmp = TempLevelDb.new() + tp = Taskpool.new() challenge = 1234567.toF.toBytes.toArray32 var @@ -34,55 +36,137 @@ suite "Test Prover": let repoDs = repoTmp.newDb() metaDs = metaTmp.newDb() - config = CodexConf( - cmd: StartUpCmd.persistence, - nat: NatConfig(hasExtIp: false, nat: NatNone), - metricsAddress: parseIpAddress("127.0.0.1"), - persistenceCmd: PersistenceCmd.prover, - circomR1cs: InputFile("tests/circuits/fixtures/proof_main.r1cs"), - circomWasm: InputFile("tests/circuits/fixtures/proof_main.wasm"), - circomZkey: InputFile("tests/circuits/fixtures/proof_main.zkey"), - numProofSamples: samples, + backend = CircomCompatBackendRef.new( + r1csPath = "tests/circuits/fixtures/proof_main.r1cs", + wasmPath = "tests/circuits/fixtures/proof_main.wasm", + zkeyPath = "tests/circuits/fixtures/proof_main.zkey", + ).tryGet + tp = Taskpool.new() + + store = RepoStore.new(repoDs, metaDs) + prover = Prover.new(backend, samples, tp) + + teardown: + await repoTmp.destroyDb() + await metaTmp.destroyDb() + + test "Should sample and prove a slot": + let + (_, _, verifiable) = await createVerifiableManifest( + store, + 8, # number of blocks in the original dataset (before EC) + 5, # ecK + 3, # ecM + blockSize, + cellSize, + ) + + builder = + Poseidon2Builder.new(store, verifiable, verifiable.verifiableStrategy).tryGet + sampler = Poseidon2Sampler.new(1, store, builder).tryGet + (_, checked) = + (await prover.prove(sampler, verifiable, challenge, verify = true)).tryGet + + check: + checked.isSome and checked.get == true + + test "Should generate valid proofs when slots consist of single blocks": + # To get single-block slots, we just need to set the number of blocks in + # the original dataset to be the same as ecK. The total number of blocks + # after generating random data for parity will be ecK + ecM, which will + # match the number of slots. + let + (_, _, verifiable) = await createVerifiableManifest( + store, + 2, # number of blocks in the original dataset (before EC) + 2, # ecK + 1, # ecM + blockSize, + cellSize, ) - backend = config.initializeBackend().tryGet() + + builder = + Poseidon2Builder.new(store, verifiable, verifiable.verifiableStrategy).tryGet + sampler = Poseidon2Sampler.new(1, store, builder).tryGet + (_, checked) = + (await prover.prove(sampler, verifiable, challenge, verify = true)).tryGet + + check: + checked.isSome and checked.get == true + +suite "Test NimGroth16 Prover": + let + samples = 5 + blockSize = DefaultBlockSize + cellSize = DefaultCellSize + repoTmp = TempLevelDb.new() + metaTmp = TempLevelDb.new() + tp = Taskpool.new() + challenge = 1234567.toF.toBytes.toArray32 + + var + store: BlockStore + prover: Prover + + setup: + let + tp = Taskpool.new() + repoDs = repoTmp.newDb() + metaDs = metaTmp.newDb() + backend = NimGroth16BackendRef.new( + r1csPath = "tests/circuits/fixtures/proof_main.r1cs", + graphPath = "tests/circuits/fixtures/proof_main.bin", + zkeyPath = "tests/circuits/fixtures/proof_main.zkey", + tp = tp, + ).tryGet store = RepoStore.new(repoDs, metaDs) - prover = Prover.new(store, backend, config.numProofSamples) + prover = Prover.new(backend, samples, tp) teardown: await repoTmp.destroyDb() await metaTmp.destroyDb() test "Should sample and prove a slot": - let (_, _, verifiable) = await createVerifiableManifest( - store, - 8, # number of blocks in the original dataset (before EC) - 5, # ecK - 3, # ecM - blockSize, - cellSize, - ) + let + (_, _, verifiable) = await createVerifiableManifest( + store, + 8, # number of blocks in the original dataset (before EC) + 5, # ecK + 3, # ecM + blockSize, + cellSize, + ) - let (inputs, proof) = (await prover.prove(1, verifiable, challenge)).tryGet + builder = + Poseidon2Builder.new(store, verifiable, verifiable.verifiableStrategy).tryGet + sampler = Poseidon2Sampler.new(1, store, builder).tryGet + (_, checked) = + (await prover.prove(sampler, verifiable, challenge, verify = true)).tryGet check: - (await prover.verify(proof, inputs)).tryGet == true + checked.isSome and checked.get == true test "Should generate valid proofs when slots consist of single blocks": # To get single-block slots, we just need to set the number of blocks in # the original dataset to be the same as ecK. The total number of blocks # after generating random data for parity will be ecK + ecM, which will # match the number of slots. - let (_, _, verifiable) = await createVerifiableManifest( - store, - 2, # number of blocks in the original dataset (before EC) - 2, # ecK - 1, # ecM - blockSize, - cellSize, - ) + let + (_, _, verifiable) = await createVerifiableManifest( + store, + 2, # number of blocks in the original dataset (before EC) + 2, # ecK + 1, # ecM + blockSize, + cellSize, + ) - let (inputs, proof) = (await prover.prove(1, verifiable, challenge)).tryGet + builder = + Poseidon2Builder.new(store, verifiable, verifiable.verifiableStrategy).tryGet + sampler = Poseidon2Sampler.new(1, store, builder).tryGet + (_, checked) = + (await prover.prove(sampler, verifiable, challenge, verify = true)).tryGet check: - (await prover.verify(proof, inputs)).tryGet == true + checked.isSome and checked.get == true diff --git a/tests/codex/slots/testproverfactory.nim b/tests/codex/slots/testproverfactory.nim new file mode 100644 index 000000000..e3a3f2113 --- /dev/null +++ b/tests/codex/slots/testproverfactory.nim @@ -0,0 +1,111 @@ +import os +import ../../asynctest + +import pkg/chronos +import pkg/taskpools + +import pkg/confutils/defs +import pkg/codex/conf +import pkg/codex/slots/proofs/backends +import pkg/codex/slots/proofs/proverfactory {.all.} +import pkg/codex/utils/natutils + +import ../helpers +import ../examples + +suite "Test BackendFactory": + let circuitDir = "testecircuitdir" + + setup: + createDir(circuitDir) + + teardown: + removeDir(circuitDir) + + test "Should initialize with correct nimGroth16 config files": + let config = CodexConf( + cmd: StartUpCmd.persistence, + nat: NatConfig(hasExtIp: false, nat: NatNone), + metricsAddress: parseIpAddress("127.0.0.1"), + persistenceCmd: PersistenceCmd.prover, + marketplaceAddress: EthAddress.example.some, + proverBackend: ProverBackendCmd.nimgroth16, + circomGraph: InputFile("tests/circuits/fixtures/proof_main.bin"), + circomR1cs: InputFile("tests/circuits/fixtures/proof_main.r1cs"), + circomZkey: InputFile("tests/circuits/fixtures/proof_main.zkey"), + ) + + check: + getGraphFile(config).tryGet == $config.circomGraph + getR1csFile(config).tryGet == $config.circomR1cs + getZkeyFile(config).tryGet == $config.circomZkey + + test "Should initialize with correct circom compat config files": + let config = CodexConf( + cmd: StartUpCmd.persistence, + nat: NatConfig(hasExtIp: false, nat: NatNone), + metricsAddress: parseIpAddress("127.0.0.1"), + persistenceCmd: PersistenceCmd.prover, + marketplaceAddress: EthAddress.example.some, + proverBackend: ProverBackendCmd.circomcompat, + circomWasm: InputFile("tests/circuits/fixtures/proof_main.wasm"), + circomR1cs: InputFile("tests/circuits/fixtures/proof_main.r1cs"), + circomZkey: InputFile("tests/circuits/fixtures/proof_main.zkey"), + ) + + check: + getWasmFile(config).tryGet == $config.circomWasm + getR1csFile(config).tryGet == $config.circomR1cs + getZkeyFile(config).tryGet == $config.circomZkey + + test "Should initialize circom compat from local directory": + let config = CodexConf( + cmd: StartUpCmd.persistence, + nat: NatConfig(hasExtIp: false, nat: NatNone), + metricsAddress: parseIpAddress("127.0.0.1"), + persistenceCmd: PersistenceCmd.prover, + marketplaceAddress: EthAddress.example.some, + proverBackend: ProverBackendCmd.circomcompat, + # Set the circuitDir such that the tests/circuits/fixtures/ files + # will be picked up as local files: + circuitDir: OutDir("tests/circuits/fixtures"), + ) + + check: + getR1csFile(config).tryGet == config.circuitDir / "proof_main.r1cs" + getWasmFile(config).tryGet == config.circuitDir / "proof_main.wasm" + getZkeyFile(config).tryGet == config.circuitDir / "proof_main.zkey" + + test "Should initialize nim groth16 from local directory": + let config = CodexConf( + cmd: StartUpCmd.persistence, + nat: NatConfig(hasExtIp: false, nat: NatNone), + metricsAddress: parseIpAddress("127.0.0.1"), + persistenceCmd: PersistenceCmd.prover, + marketplaceAddress: EthAddress.example.some, + proverBackend: ProverBackendCmd.nimgroth16, + # Set the circuitDir such that the tests/circuits/fixtures/ files + # will be picked up as local files: + circuitDir: OutDir("tests/circuits/fixtures"), + ) + + check: + getGraphFile(config).tryGet == config.circuitDir / "proof_main.bin" + getR1csFile(config).tryGet == config.circuitDir / "proof_main.r1cs" + getZkeyFile(config).tryGet == config.circuitDir / "proof_main.zkey" + + test "Should suggest usage of downloader tool when files not available": + let + config = CodexConf( + cmd: StartUpCmd.persistence, + nat: NatConfig(hasExtIp: false, nat: NatNone), + metricsAddress: parseIpAddress("127.0.0.1"), + persistenceCmd: PersistenceCmd.prover, + proverBackend: ProverBackendCmd.nimgroth16, + marketplaceAddress: EthAddress.example.some, + circuitDir: OutDir(circuitDir), + ) + proverResult = config.initializeProver(Taskpool.new()) + + check: + proverResult.isErr diff --git a/tests/codex/testslots.nim b/tests/codex/testslots.nim index 059de7c2f..9c1c9204f 100644 --- a/tests/codex/testslots.nim +++ b/tests/codex/testslots.nim @@ -3,6 +3,6 @@ import ./slots/testsampler import ./slots/testconverters import ./slots/testbackends import ./slots/testprover -import ./slots/testbackendfactory +import ./slots/testproverfactory {.warning[UnusedImport]: off.} diff --git a/tests/integration/1_minute/testcli.nim b/tests/integration/1_minute/testcli.nim index 778608b89..3cd576a7e 100644 --- a/tests/integration/1_minute/testcli.nim +++ b/tests/integration/1_minute/testcli.nim @@ -1,4 +1,6 @@ import std/tempfiles +import std/appdirs +import std/paths import codex/conf import codex/utils/fileutils import ../../asynctest @@ -10,51 +12,90 @@ import ../../examples asyncchecksuite "Command line interface": let key = "4242424242424242424242424242424242424242424242424242424242424242" + var tmpDataDir: string + setup: + # Ensure the key file is created with safe permissions + tmpDataDir = createTempDir(prefix = "testcli_", suffix = "", dir = $getTempDir()) + + teardown: + # Remove the temporary data directory after tests + discard removeDir(tmpDataDir) + proc startCodex(args: seq[string]): Future[CodexProcess] {.async.} = - return await CodexProcess.startNode(args, false, "cli-test-node") + var args = args + if not args.anyIt(it.contains("--data-dir")): + args.add("--data-dir=" & tmpDataDir) + + return await CodexProcess.startNode(args, debug = false, "cli-test-node") test "complains when persistence is enabled without ethereum account": let node = await startCodex(@["persistence"]) + + defer: + await node.stop() + await node.waitUntilOutput("Persistence enabled, but no Ethereum account was set") - await node.stop() test "complains when ethereum private key file has wrong permissions": let unsafeKeyFile = genTempPath("", "") discard unsafeKeyFile.writeFile(key, 0o666) let node = await startCodex(@["persistence", "--eth-private-key=" & unsafeKeyFile]) + + defer: + await node.stop() + discard removeFile(unsafeKeyFile) + await node.waitUntilOutput( "Ethereum private key file does not have safe file permissions" ) - await node.stop() - discard removeFile(unsafeKeyFile) - let - marketplaceArg = "--marketplace-address=" & $EthAddress.example - expectedDownloadInstruction = - "Proving circuit files are not found. Please run the following to download them:" + let expectedDownloadInstruction = + "Proving circuit files are not found. Please run the following to download them:" test "suggests downloading of circuit files when persistence is enabled without accessible r1cs file": - let node = await startCodex(@["persistence", "prover", marketplaceArg]) + let node = await startCodex( + @[ + "persistence", + "prover", + "--marketplace-address=" & $EthAddress.example, + "--prover-backend=nimgroth16", + ] + ) + + defer: + await node.stop() + await node.waitUntilOutput(expectedDownloadInstruction) - await node.stop() - test "suggests downloading of circuit files when persistence is enabled without accessible wasm file": + test "suggests downloading of circuit files when persistence is enabled without accessible zkey file": let node = await startCodex( @[ - "persistence", "prover", marketplaceArg, + "persistence", + "prover", + "--marketplace-address=" & $EthAddress.example, + "--prover-backend=nimgroth16", "--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs", ] ) + + defer: + await node.stop() + await node.waitUntilOutput(expectedDownloadInstruction) - await node.stop() - test "suggests downloading of circuit files when persistence is enabled without accessible zkey file": + test "suggests downloading of circuit files when persistence is enabled without accessible graph file": let node = await startCodex( @[ - "persistence", "prover", marketplaceArg, + "persistence", + "prover", + "--marketplace-address=" & $EthAddress.example, + "--prover-backend=nimgroth16", "--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs", - "--circom-wasm=tests/circuits/fixtures/proof_main.wasm", + "--circom-zkey=tests/circuits/fixtures/proof_main.zkey", ] ) + + defer: + await node.stop() + await node.waitUntilOutput(expectedDownloadInstruction) - await node.stop() diff --git a/tests/integration/30_minutes/testmarketplace.nim b/tests/integration/30_minutes/testmarketplace.nim index b04626c49..06c55e336 100644 --- a/tests/integration/30_minutes/testmarketplace.nim +++ b/tests/integration/30_minutes/testmarketplace.nim @@ -4,13 +4,28 @@ import ../../examples import ../../contracts/time import ../../contracts/deployment import ./../marketplacesuite +import ../../helpers import ../twonodes import ../nodeconfigs marketplacesuite(name = "Marketplace", stopOnRequestFail = true): let marketplaceConfig = NodeConfigs( - clients: CodexConfigs.init(nodes = 1).some, - providers: CodexConfigs.init(nodes = 1).some, + clients: CodexConfigs + .init(nodes = 1) + .debug() + .withLogFile() + .withLogTopics( + "codex", "codex slots builder", "codex slots sampler", "marketplace", "sales", + "statemachine", "slotqueue", "reservations", + ).some, + providers: CodexConfigs + .init(nodes = 1) + .debug() + .withLogFile() + .withLogTopics( + "codex", "codex slots builder", "codex slots sampler", "marketplace", "sales", + "statemachine", "slotqueue", "reservations", + ).some, ) var host: CodexClient @@ -50,6 +65,7 @@ marketplacesuite(name = "Marketplace", stopOnRequestFail = true): # client requests storage let cid = (await client.upload(data)).get + let id = await client.requestStorage( cid, duration = 20 * 60.uint64, @@ -133,14 +149,22 @@ marketplacesuite(name = "Marketplace", stopOnRequestFail = true): test "SP are able to process slots after workers were busy with other slots and ignored them", NodeConfigs( - clients: CodexConfigs.init(nodes = 1) - # .debug() - .some, - providers: CodexConfigs.init(nodes = 2) - # .debug() - # .withLogFile() - # .withLogTopics("marketplace", "sales", "statemachine","slotqueue", "reservations") - .some, + clients: CodexConfigs + .init(nodes = 1) + .debug() + .withLogFile() + .withLogTopics( + "codex", "codex slots builder", "codex slots sampler", "marketplace", "sales", + "statemachine", "slotqueue", "reservations", + ).some, + providers: CodexConfigs + .init(nodes = 2) + .debug() + .withLogFile() + .withLogTopics( + "codex", "codex slots builder", "codex slots sampler", "marketplace", "sales", + "statemachine", "slotqueue", "reservations", + ).some, ): let client0 = clients()[0] let provider0 = providers()[0] @@ -217,23 +241,25 @@ marketplacesuite(name = "Marketplace payouts", stopOnRequestFail = true): NodeConfigs( # Uncomment to start Hardhat automatically, typically so logs can be inspected locally hardhat: HardhatConfig.none, - clients: CodexConfigs.init(nodes = 1) - # .debug() # uncomment to enable console log output.debug() - # .withLogFile() - # # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("node", "erasure") - .some, - providers: CodexConfigs.init(nodes = 1) - # .debug() # uncomment to enable console log output - # .withLogFile() - # # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics( - # "node", "marketplace", "sales", "reservations", "node", "statemachine" - # ) - .some, + clients: CodexConfigs + .init(nodes = 1) + .debug() + .withLogFile() + .withLogTopics( + "codex", "codex slots builder", "codex slots sampler", "marketplace", "sales", + "statemachine", "slotqueue", "reservations", "erasure", + ).some, + providers: CodexConfigs + .init(nodes = 1) + .debug() + .withLogFile() + .withLogTopics( + "codex", "codex slots builder", "codex slots sampler", "marketplace", "sales", + "statemachine", "slotqueue", "reservations", "erasure", + ).some, ): - let duration = 20.periods - let expiry = 10.periods + let duration = 6.periods + let expiry = 4.periods let data = await RandomChunker.example(blocks = blocks) let client = clients()[0] let provider = providers()[0] @@ -243,15 +269,14 @@ marketplacesuite(name = "Marketplace payouts", stopOnRequestFail = true): let startBalanceClient = await token.balanceOf(client.ethAccount) # provider makes storage available - let datasetSize = datasetSize(blocks, ecNodes, ecTolerance) - let totalAvailabilitySize = (datasetSize div 2).truncate(uint64) + let slotSize = slotSize(blocks, ecNodes, ecTolerance) discard await providerApi.postAvailability( # make availability size small enough that we can't fill all the slots, # thus causing a cancellation - totalSize = totalAvailabilitySize, + totalSize = slotSize.truncate(uint64), duration = duration.uint64, minPricePerBytePerSecond = minPricePerBytePerSecond, - totalCollateral = collateralPerByte * totalAvailabilitySize.u256, + totalCollateral = collateralPerByte * slotSize, ) let cid = (await clientApi.upload(data)).get @@ -289,11 +314,10 @@ marketplacesuite(name = "Marketplace payouts", stopOnRequestFail = true): # wait until sale is cancelled await ethProvider.advanceTime(expiry.u256) - await requestCancelledEvent.wait().wait(timeout = chronos.seconds(5)) + await requestCancelledEvent.wait().wait(timeout = chronos.seconds(expiry.int + 10)) await advanceToNextPeriod() - let slotSize = slotSize(blocks, ecNodes, ecTolerance) let pricePerSlotPerSecond = minPricePerBytePerSecond * slotSize check eventually ( @@ -317,16 +341,22 @@ marketplacesuite(name = "Marketplace payouts", stopOnRequestFail = true): test "the collateral is returned after a sale is ignored", NodeConfigs( hardhat: HardhatConfig.none, - clients: CodexConfigs.init(nodes = 1).some, - providers: CodexConfigs.init(nodes = 3) - # .debug() - # uncomment to enable console log output - # .withLogFile() - # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics( - # "node", "marketplace", "sales", "reservations", "statemachine" - # ) - .some, + clients: CodexConfigs + .init(nodes = 1) + .debug() + .withLogFile() + .withLogTopics( + "codex", "codex slots builder", "codex slots sampler", "marketplace", "sales", + "statemachine", "slotqueue", "reservations", "erasure", + ).some, + providers: CodexConfigs + .init(nodes = 3) + .debug() + .withLogFile() + .withLogTopics( + "codex", "codex slots builder", "codex slots sampler", "marketplace", "sales", + "statemachine", "slotqueue", "reservations", "erasure", + ).some, ): let data = await RandomChunker.example(blocks = blocks) let client0 = clients()[0] @@ -394,4 +424,5 @@ marketplacesuite(name = "Marketplace payouts", stopOnRequestFail = true): availability.totalRemainingCollateral == availableSlots * slotSize * minPricePerBytePerSecond, timeout = 30 * 1000, + pollInterval = 100, ) diff --git a/tests/integration/30_minutes/testproofs.nim b/tests/integration/30_minutes/testproofs.nim index b06e4d824..f7a2cd101 100644 --- a/tests/integration/30_minutes/testproofs.nim +++ b/tests/integration/30_minutes/testproofs.nim @@ -64,17 +64,18 @@ marketplacesuite(name = "Hosts submit regular proofs", stopOnRequestFail = false let slotSize = slotSize(blocks, ecNodes, ecTolerance) - discard await waitForRequestToStart(expiry.int) + discard await waitForRequestToStart(expiry.int + 10) var proofWasSubmitted = false proc onProofSubmitted(event: ?!ProofSubmitted) = proofWasSubmitted = event.isOk - let subscription = await marketplace.subscribe(ProofSubmitted, onProofSubmitted) + let proofSubmittedSubscription = + await marketplace.subscribe(ProofSubmitted, onProofSubmitted) check eventually(proofWasSubmitted, timeout = (duration - expiry).int * 1000) - await subscription.unsubscribe() + await proofSubmittedSubscription.unsubscribe() marketplacesuite(name = "Simulate invalid proofs", stopOnRequestFail = false): # TODO: these are very loose tests in that they are not testing EXACTLY how @@ -117,7 +118,7 @@ marketplacesuite(name = "Simulate invalid proofs", stopOnRequestFail = false): .some, ): let client0 = clients()[0].client - let expiry = 10.periods + let expiry = 15.periods let duration = expiry + 10.periods let data = await RandomChunker.example(blocks = blocks) diff --git a/tests/integration/5_minutes/testsales.nim b/tests/integration/5_minutes/testsales.nim index 246d8fc7d..29e636e02 100644 --- a/tests/integration/5_minutes/testsales.nim +++ b/tests/integration/5_minutes/testsales.nim @@ -140,7 +140,7 @@ marketplacesuite(name = "Sales", stopOnRequestFail = true): tolerance = 1, ) - discard await waitForRequestToStart() + discard await waitForRequestToStart((10 * 60) + 10) let updatedAvailability = ((await host.getAvailabilities()).get).findItem(availability).get @@ -215,7 +215,7 @@ marketplacesuite(name = "Sales", stopOnRequestFail = true): ) ).get - discard await waitForRequestToStart() + discard await waitForRequestToStart((10 * 60) + 10) let purchase = (await client.getPurchase(id)).get check purchase.error == none string diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim index 17ed6dd4c..699c376dd 100644 --- a/tests/integration/codexclient.nim +++ b/tests/integration/codexclient.nim @@ -87,12 +87,20 @@ proc getContent( client: CodexClient, url: string, headers: seq[HttpHeaderTuple] = @[] ): Future[string] {.async: (raises: [CancelledError, HttpError]).} = let response = await client.get(url, headers) + + defer: + await response.closeWait() + return await response.body proc info*( client: CodexClient ): Future[?!JsonNode] {.async: (raises: [CancelledError, HttpError]).} = let response = await client.get(client.baseurl & "/debug/info") + + defer: + await response.closeWait() + return JsonNode.parse(await response.body) proc setLogLevel*( @@ -102,6 +110,10 @@ proc setLogLevel*( url = client.baseurl & "/debug/chronicles/loglevel?level=" & level headers = @[("Content-Type", "text/plain")] response = await client.post(url, headers = headers, body = "") + + defer: + await response.closeWait() + assert response.status == 200 proc uploadRaw*( @@ -115,6 +127,10 @@ proc upload*( client: CodexClient, contents: string ): Future[?!Cid] {.async: (raises: [CancelledError, HttpError]).} = let response = await client.uploadRaw(contents) + + defer: + await response.closeWait() + assert response.status == 200 Cid.init(await response.body).mapFailure @@ -136,6 +152,9 @@ proc downloadBytes*( ): Future[?!seq[byte]] {.async: (raises: [CancelledError, HttpError]).} = let response = await client.downloadRaw($cid, local = local) + defer: + await response.closeWait() + if response.status != 200: return failure($response.status) @@ -153,6 +172,9 @@ proc downloadNoStream*( ): Future[?!string] {.async: (raises: [CancelledError, HttpError]).} = let response = await client.post(client.baseurl & "/data/" & $cid & "/network") + defer: + await response.closeWait() + if response.status != 200: return failure($response.status) @@ -164,6 +186,9 @@ proc downloadManifestOnly*( let response = await client.get(client.baseurl & "/data/" & $cid & "/network/manifest") + defer: + await response.closeWait() + if response.status != 200: return failure($response.status) @@ -181,6 +206,9 @@ proc delete*( ): Future[?!void] {.async: (raises: [CancelledError, HttpError]).} = let response = await client.deleteRaw($cid) + defer: + await response.closeWait() + if response.status != 204: return failure($response.status) @@ -198,6 +226,9 @@ proc list*( ): Future[?!RestContentList] {.async: (raises: [CancelledError, HttpError]).} = let response = await client.listRaw() + defer: + await response.closeWait() + if response.status != 200: return failure($response.status) @@ -209,6 +240,9 @@ proc space*( let url = client.baseurl & "/space" let response = await client.get(url) + defer: + await response.closeWait() + if response.status != 200: return failure($response.status) @@ -265,6 +299,9 @@ proc requestStorage*( ) body = await response.body + defer: + await response.closeWait() + if response.status != 200: doAssert(false, body) PurchaseId.fromHex(body).catch @@ -326,6 +363,9 @@ proc postAvailability*( until = until, ) + defer: + await response.closeWait() + let body = await response.body doAssert response.status == 201, @@ -389,6 +429,10 @@ proc patchAvailability*( enabled = enabled, until = until, ) + + defer: + await response.closeWait() + doAssert response.status == 204, "expected No Content, got " & $response.status proc getAvailabilities*( diff --git a/tests/integration/multinodes.nim b/tests/integration/multinodes.nim index 42fff1576..c37b7b216 100644 --- a/tests/integration/multinodes.nim +++ b/tests/integration/multinodes.nim @@ -85,7 +85,7 @@ template multinodesuite*(name: string, body: untyped) = # If you want to use a different provider url in the nodes, you can # use withEthProvider config modifier in the node config # to set the desired provider url. E.g.: - # NodeConfigs( + # NodeConfigs( # hardhat: # HardhatConfig.none, # clients: @@ -230,15 +230,15 @@ template multinodesuite*(name: string, body: untyped) = ) config.addCliOption( PersistenceCmd.prover, "--circom-r1cs", - "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.r1cs", + "tests/circuits/fixtures/proof_main.r1cs", ) config.addCliOption( - PersistenceCmd.prover, "--circom-wasm", - "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.wasm", + PersistenceCmd.prover, "--circom-graph", + "tests/circuits/fixtures/proof_main.bin", ) config.addCliOption( PersistenceCmd.prover, "--circom-zkey", - "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.zkey", + "tests/circuits/fixtures/proof_main.zkey", ) return await newCodexProcess(providerIdx, config, Role.Provider) diff --git a/vendor/circom-witnessgen b/vendor/circom-witnessgen new file mode 160000 index 000000000..1291cf0e6 --- /dev/null +++ b/vendor/circom-witnessgen @@ -0,0 +1 @@ +Subproject commit 1291cf0e62ddd829b7be14cccc301af82e033391 diff --git a/vendor/nim-goldilocks-hash b/vendor/nim-goldilocks-hash new file mode 160000 index 000000000..3c5a2bea1 --- /dev/null +++ b/vendor/nim-goldilocks-hash @@ -0,0 +1 @@ +Subproject commit 3c5a2bea154b0712ac9576c17dc4d92c00fe003e diff --git a/vendor/nim-groth16 b/vendor/nim-groth16 new file mode 160000 index 000000000..434170541 --- /dev/null +++ b/vendor/nim-groth16 @@ -0,0 +1 @@ +Subproject commit 434170541e154bcac37a3306d835d2dfe1e81549