diff --git a/.github/workflows/sync-master-main.yaml b/.github/workflows/sync-master-main.yaml index 6a7e9a65002..05a60f08969 100644 --- a/.github/workflows/sync-master-main.yaml +++ b/.github/workflows/sync-master-main.yaml @@ -3,6 +3,10 @@ on: push: branches: - master + +permissions: + contents: write + jobs: sync: runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index ea354ba7388..d620e557b4f 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ /lotus-shed /lotus-sim /curio +/sptool /lotus-townhall /lotus-fountain /lotus-stats @@ -36,7 +37,6 @@ build/paramfetch.sh /darwin /linux *.snap -curio devgen.car localnet.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 89aad3563c6..e724b49717b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,16 +3,13 @@ # UNRELEASED ## New features -- feat: CLI: add claim-extend cli (#11711) ([filecoin-project/lotus#11711](https://github.com/filecoin-project/lotus/pull/11711)) ## Improvements -# v1.26.0-rc2 / 2024-03-0y +# v1.26.0 / 2024-03-21 -This is a release candidate of the upcoming MANDATORY Lotus v1.26.0 release, which will deliver the Filecoin network version 22, codenamed Dragon 🐉. +This is the stable release for the upcoming MANDATORY Filecoin network upgrade v22, codenamed Dragon 🐉, at `epoch 3817920 - 2024-04-11 - 14:00:00Z` -**This release candidate sets the calibration network to upgrade at epoch 1427974, which is 2024-03-11T14:00:00Z** -This release does NOT set the mainnet upgrade epoch yet, in which will be updated in the final release. The Filecoin network version 22 delivers the following FIPs: - [FIP-0063: Switching to new Drand mainnet network](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0063.md) @@ -20,33 +17,38 @@ The Filecoin network version 22 delivers the following FIPs: - [FIP-0076: Direct data onboarding](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0076.md) - [FIP-0083: Add built-in Actor events in the Verified Registry, Miner and Market Actors](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0083.md) +## ☢️ Upgrade Warnings ☢️ + +- This release requires a minimum Go version of v1.21.7 or higher to successfully build Lotus. + ## v13 Builtin Actor Bundle -The actor bundles for the **calibration network** can be checked as follows: +[Builtin actor v13.0.0](https://github.com/filecoin-project/builtin-actors/releases/tag/v13.0.0) is used for supporting this upgrade. Make sure that your lotus actor bundle matches the v13 actors manifest by running the following cli after upgrading: ``` lotus state actor-cids --network-version=22 Network Version: 22 Actor Version: 13 -Manifest CID: bafy2bzacea4firkyvt2zzdwqjrws5pyeluaesh6uaid246tommayr4337xpmi + +Manifest CID: bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e Actor CID -account bafk2bzaceb3j36ri5y5mfklgp5emlvrms6g4733ss2j3l7jismrxq6ng3tcc6 -cron bafk2bzaceaz6rocamdxehgpwcbku6wlapwpgzyyvkrploj66mlqptsulf52bs -datacap bafk2bzacea22nv5g3yngpxvonqfj4r2nkfk64y6yw2malicm7odk77x7zuads -eam bafk2bzaceatqtjzj7623i426noaslouvluhz6e3md3vvquqzku5qj3532uaxg -ethaccount bafk2bzacean3hs7ga5csw6g3uu7watxfnqv5uvxviebn3ba6vg4sagwdur5pu -evm bafk2bzacec5ibmbtzuzjgwjmksm2n6zfq3gkicxqywwu7tsscqgdzajpfctxk -init bafk2bzaced5sq72oemz6qwi6yssxwlos2g54zfprslrx5qfhhx2vlgsbvdpcs -multisig bafk2bzacedbgei6jkx36fwdgvoohce4aghvpohqdhoco7p4thszgssms7olv2 -paymentchannel bafk2bzaceasmgmfsi4mjanxlowsub65fmevhzky4toeqbtw4kp6tmu4kxjpgq +account bafk2bzacedxnbtlsqdk76fsfmnhyvsblwyfducerwwtp3mqtx2wbrvs5idl52 +cron bafk2bzacebbopddyn5csb3fsuhh2an4ttd23x6qnwixgohlirj5ahtcudphyc +datacap bafk2bzaceah42tfnhd7xnztawgf46gbvc3m2gudoxshlba2ucmmo2vy67t7ci +eam bafk2bzaceb23bhvvcjsth7cn7vp3gbaphrutsaz7v6hkls3ogotzs4bnhm4mk +ethaccount bafk2bzaceautge6zhuy6jbj3uldwoxwhpywuon6z3xfvmdbzpbdribc6zzmei +evm bafk2bzacedq6v2lyuhgywhlllwmudfj2zufzcauxcsvvd34m2ek5xr55mvh2q +init bafk2bzacedr4xacm3fts4vilyeiacjr2hpmwzclyzulbdo24lrfxbtau2wbai +multisig bafk2bzacecr5zqarfqak42xqcfeulsxlavcltawsx2fvc7zsjtby6ti4b3wqc +paymentchannel bafk2bzacebntdhfmyc24e7tm52ggx5tnw4i3hrr3jmllsepv3mibez4hywsa2 placeholder bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro -reward bafk2bzacedjyp6ll5ez27dfgldjj4tntxfvyp4pa5zkk7s5uhipzqjyx2gmuc -storagemarket bafk2bzaceabolct6qdnefwcrtati2us3sxtxfghyqk6aamfhl6byyefmtssqi -storageminer bafk2bzaceckzw3v7wqliyggvjvihz4wywchnnsie4frfvkm3fm5znb64mofri -storagepower bafk2bzacea7t4wynzjajl442mpdqbnh3wusjusqtnzgpvefvweh4n2tgzgqhu -system bafk2bzacedjnrb5glewazsxpcx6rwiuhl4kwrfcqolyprn6rrjtlzmthlhdq6 -verifiedregistry bafk2bzacednskl3bykz5qpo54z2j2p4q44t5of4ktd6vs6ymmg2zebsbxazkm +reward bafk2bzacedq4q2kwkruu4xm7rkyygumlbw2yt4nimna2ivea4qarvtkohnuwu +storagemarket bafk2bzacebjtoltdviyznpj34hh5qp6u257jnnbjole5rhqfixm7ug3epvrfu +storageminer bafk2bzacebf4rrqyk7gcfggggul6nfpzay7f2ordnkwm7z2wcf4mq6r7i77t2 +storagepower bafk2bzacecjy4dkulvxppg3ocbmeixe2wgg6yxoyjxrm4ko2fm3uhpvfvam6e +system bafk2bzacecyf523quuq2kdjfdvyty446z2ounmamtgtgeqnr3ynlu5cqrlt6e +verifiedregistry bafk2bzacedkxehp7y7iyukbcje3wbpqcvufisos6exatkanyrbotoecdkrbta ``` ## Migration @@ -63,7 +65,6 @@ For certain node operators, such as full archival nodes or systems that need to ## New features - feat: api: new verified registry methods to get all allocations and claims (#11631) ([filecoin-project/lotus#11631](https://github.com/filecoin-project/lotus/pull/11631)) -- new: add forest bootstrap nodes (#11636) ([filecoin-project/lotus#11636](https://github.com/filecoin-project/lotus/pull/11636)) - feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) ([filecoin-project/lotus#11226](https://github.com/filecoin-project/lotus/pull/11226)) - feat: implement FIP-0063 ([filecoin-project/lotus#11572](https://github.com/filecoin-project/lotus/pull/11572)) - feat: events: Add Lotus APIs to consume smart contract and built-in actor events ([filecoin-project/lotus#11618](https://github.com/filecoin-project/lotus/pull/11618)) @@ -150,7 +151,14 @@ Both `GetActorEventsRaw` and `SubscribeActorEventsRaw` take a filter parameter w A future Lotus release may include `GetActorEvents` and `SubscribeActorEvents` methods which will provide a more user-friendly interface to actor events, including deserialization of event data. +### Events Configuration Changes + +All configuration options previously under `Fevm.Events` are now in the top-level `Events` section along with the new `Events.EnableActorEventsAPI` option mentioned above. If you have non-default options in `[Events]` under `[Fevm]` in your configuration file, please move them to the top-level `[Events]`. + +While `Fevm.Events.*` options are deprecated and replaced by `Events.*`, any existing custom values will be respected if their new form isn't set, but a warning will be printed to standard error upon startup. Support for these deprecated options will be removed in a future Lotus release, so please migrate your configuration promptly. + ### GetAllClaims and GetAllAlocations + Additionally the methods `GetAllAllocations` and `GetAllClaims` has been added to the Lotus API. These methods lists all the available allocations and claims available in the actor state. ### Lotus CLI @@ -185,11 +193,12 @@ OPTIONS: ``` ## Dependencies -- github.com/filecoin-project/go-state-types (v0.12.8 -> v0.13.0-rc.2) +- github.com/filecoin-project/go-state-types (v0.12.8 -> v0.13.1) - chore: deps: update to go-state-types v13.0.0-rc.1 ([filecoin-project/lotus#11662](https://github.com/filecoin-project/lotus/pull/11662)) - chore: deps: update to go-state-types v13.0.0-rc.2 ([filecoin-project/lotus#11675](https://github.com/filecoin-project/lotus/pull/11675)) - chore: deps: update to go-multiaddr v0.12.2 (#11602) ([filecoin-project/lotus#11602](https://github.com/filecoin-project/lotus/pull/11602)) - feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) ([filecoin-project/lotus#11612](https://github.com/filecoin-project/lotus/pull/11612)) +- chore: deps: update builtin-actors, GST, verified claims tests ([filecoin-project/lotus#11768](https://github.com/filecoin-project/lotus/pull/11768)) ## Others - Remove PL operated bootstrap nodes from mainnet.pi ([filecoin-project/lotus#11491](https://github.com/filecoin-project/lotus/pull/11491)) @@ -199,6 +208,11 @@ OPTIONS: - fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) ([filecoin-project/lotus#11648](https://github.com/filecoin-project/lotus/pull/11648)) - feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet ([filecoin-project/lotus#11667]https://github.com/filecoin-project/lotus/pull/11667) - chore: backport #11632 to release/v1.26.0 ([filecoin-project/lotus#11667](https://github.com/filecoin-project/lotus/pull/11667)) +- release: bump to v1.26.0-rc2 ([filecoin-project/lotus#11691](https://github.com/filecoin-project/lotus/pull/11691)) +- Docs: Drand: document the meaning of "IsChained ([filecoin-project/lotus#11692](https://github.com/filecoin-project/lotus/pull/11692)) +- chore: remove old calibnet bootstrappers ([filecoin-project/lotus#11702](https://github.com/filecoin-project/lotus/pull/11702)) +- chore: Add lotus-provider to build to match install ([filecoin-project/lotus#11616](https://github.com/filecoin-project/lotus/pull/11616)) +- new: add forest bootstrap nodes (#11636) ([filecoin-project/lotus#11636](https://github.com/filecoin-project/lotus/pull/11636)) # v1.25.2 / 2024-01-11 diff --git a/Makefile b/Makefile index 6a1c9cc8a8e..83e3fd89da2 100644 --- a/Makefile +++ b/Makefile @@ -66,7 +66,7 @@ CLEAN+=build/.update-modules deps: $(BUILD_DEPS) .PHONY: deps -build-devnets: build lotus-seed lotus-shed curio +build-devnets: build lotus-seed lotus-shed curio sptool .PHONY: build-devnets debug: GOFLAGS+=-tags=debug @@ -106,6 +106,12 @@ BINS+=curio cu2k: GOFLAGS+=-tags=2k cu2k: curio +sptool: $(BUILD_DEPS) + rm -f sptool + $(GOCC) build $(GOFLAGS) -o sptool ./cmd/sptool +.PHONY: sptool +BINS+=sptool + lotus-worker: $(BUILD_DEPS) rm -f lotus-worker $(GOCC) build $(GOFLAGS) -o lotus-worker ./cmd/lotus-worker @@ -124,13 +130,13 @@ lotus-gateway: $(BUILD_DEPS) .PHONY: lotus-gateway BINS+=lotus-gateway -build: lotus lotus-miner lotus-worker curio +build: lotus lotus-miner lotus-worker curio sptool @[[ $$(type -P "lotus") ]] && echo "Caution: you have \ an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true .PHONY: build -install: install-daemon install-miner install-worker install-curio +install: install-daemon install-miner install-worker install-curio install-sptool install-daemon: install -C ./lotus /usr/local/bin/lotus @@ -141,6 +147,9 @@ install-miner: install-curio: install -C ./curio /usr/local/bin/curio +install-sptool: + install -C ./sptool /usr/local/bin/sptool + install-worker: install -C ./lotus-worker /usr/local/bin/lotus-worker @@ -159,6 +168,9 @@ uninstall-miner: uninstall-curio: rm -f /usr/local/bin/curio +uninstall-sptool: + rm -f /usr/local/bin/sptool + uninstall-worker: rm -f /usr/local/bin/lotus-worker @@ -260,7 +272,7 @@ install-miner-service: install-miner install-daemon-service @echo "To start the service, run: 'sudo systemctl start lotus-miner'" @echo "To enable the service on startup, run: 'sudo systemctl enable lotus-miner'" -install-curio-service: install-curio install-daemon-service +install-curio-service: install-curio install-sptool install-daemon-service mkdir -p /etc/systemd/system mkdir -p /var/log/lotus install -C -m 0644 ./scripts/curio.service /etc/systemd/system/curio.service @@ -401,12 +413,12 @@ gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci jen: gen -snap: lotus lotus-miner lotus-worker curio +snap: lotus lotus-miner lotus-worker curio sptool snapcraft # snapcraft upload ./lotus_*.snap # separate from gen because it needs binaries -docsgen-cli: lotus lotus-miner lotus-worker curio +docsgen-cli: lotus lotus-miner lotus-worker curio sptool python3 ./scripts/generate-lotus-cli.py ./lotus config default > documentation/en/default-lotus-config.toml ./lotus-miner config default > documentation/en/default-lotus-miner-config.toml diff --git a/blockstore/ipfs.go b/blockstore/ipfs.go index 8e4224535df..f0606519ffb 100644 --- a/blockstore/ipfs.go +++ b/blockstore/ipfs.go @@ -5,9 +5,7 @@ import ( "context" "io" - iface "github.com/ipfs/boxo/coreiface" - "github.com/ipfs/boxo/coreiface/options" - "github.com/ipfs/boxo/coreiface/path" + "github.com/ipfs/boxo/path" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/multiformats/go-multiaddr" @@ -15,6 +13,8 @@ import ( "golang.org/x/xerrors" rpc "github.com/filecoin-project/kubo-api-client" + iface "github.com/filecoin-project/kubo-api-client/coreiface" + "github.com/filecoin-project/kubo-api-client/coreiface/options" ) type IPFSBlockstore struct { @@ -83,7 +83,7 @@ func (i *IPFSBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { } func (i *IPFSBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { - _, err := i.offlineAPI.Block().Stat(ctx, path.IpldPath(cid)) + _, err := i.offlineAPI.Block().Stat(ctx, path.FromCid(cid)) if err != nil { // The underlying client is running in Offline mode. // Stat() will fail with an err if the block isn't in the @@ -99,7 +99,7 @@ func (i *IPFSBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { } func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { - rd, err := i.api.Block().Get(ctx, path.IpldPath(cid)) + rd, err := i.api.Block().Get(ctx, path.FromCid(cid)) if err != nil { return nil, xerrors.Errorf("getting ipfs block: %w", err) } @@ -113,7 +113,7 @@ func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, er } func (i *IPFSBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { - st, err := i.api.Block().Stat(ctx, path.IpldPath(cid)) + st, err := i.api.Block().Stat(ctx, path.FromCid(cid)) if err != nil { return 0, xerrors.Errorf("getting ipfs block: %w", err) } diff --git a/build/actors/v13.tar.zst b/build/actors/v13.tar.zst index 77565abc97b..1b852decece 100644 Binary files a/build/actors/v13.tar.zst and b/build/actors/v13.tar.zst differ diff --git a/build/builtin_actors_gen.go b/build/builtin_actors_gen.go index e8772ee4d4f..cd52b515703 100644 --- a/build/builtin_actors_gen.go +++ b/build/builtin_actors_gen.go @@ -120,8 +120,8 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet }, { Network: "butterflynet", Version: 13, - BundleGitTag: "v13.0.0-rc.3", - ManifestCid: MustParseCid("bafy2bzaceaqx5xa4cwso24rjiu2ketjlztrqlac6dkyol7tlyuhzrle3zfbos"), + BundleGitTag: "v13.0.0", + ManifestCid: MustParseCid("bafy2bzacec75zk7ufzwx6tg5avls5fxdjx5asaqmd2bfqdvkqrkzoxgyflosu"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzacedl533kwbzouqxibejpwp6syfdekvmzy4vmmno6j4iaydbdmv4xek"), "cron": MustParseCid("bafk2bzacecimv5xnuwyoqgxk26qt4xqpgntleret475pnh35s3vvhqtdct4ow"), @@ -138,7 +138,7 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "storageminer": MustParseCid("bafk2bzacebedx7iaa2ruspxvghkg46ez7un5b7oiijjtnvddq2aot5wk7p7ry"), "storagepower": MustParseCid("bafk2bzacebvne7m2l3hxxw4xa6oujol75x35yqpnlqiwx74jilyrop4cs7cse"), "system": MustParseCid("bafk2bzaceacjmlxrvydlud77ilpzbscez46yedx6zjsj6olxsdeuv6d4x4cwe"), - "verifiedregistry": MustParseCid("bafk2bzaceaf2po4fxf7gw7cdvulwxxtvnsvzfn4gff5w267qnz7r44ywk25c6"), + "verifiedregistry": MustParseCid("bafk2bzacebs5muoq7ft2wgqojhjio7a4vltbyprqkmlr43ojlzbil4nwvj3jg"), }, }, { Network: "calibrationnet", @@ -387,8 +387,8 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet }, { Network: "caterpillarnet", Version: 13, - BundleGitTag: "v13.0.0-rc.3", - ManifestCid: MustParseCid("bafy2bzacecozgyaqlzq4qebq52uogmrk6ahk7z2i4qfkh5iv235bpqqv7w24m"), + BundleGitTag: "v13.0.0", + ManifestCid: MustParseCid("bafy2bzacedu7kk2zngxp7y3lynhtaht6vgadgn5jzkxe5nuowtwzasnogx63w"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzacecro3uo6ypqhfzwdhnamzcole5qmhrbkx7qny6t2qsrcpqxelt6s2"), "cron": MustParseCid("bafk2bzaceam3kci46y4siltbw7f4itoap34kp7b7pvn2fco5s2bvnotomwdbe"), @@ -405,7 +405,7 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "storageminer": MustParseCid("bafk2bzaceardbn5a7aq5jxl7efr4btmsbl7txnxm4hrrd3llyhujuc2cr5vcs"), "storagepower": MustParseCid("bafk2bzacear4563jznjqyseoy42xl6kenyqk6umv6xl3bp5bsjb3hbs6sp6bm"), "system": MustParseCid("bafk2bzacecc5oavxivfnvirx2g7megpdf6lugooyoc2wijloju247xzjcdezy"), - "verifiedregistry": MustParseCid("bafk2bzacecpqldvrs6i7xzbyizkpdvrick3cahrbdptmimdsrpnxu6k4xs4pm"), + "verifiedregistry": MustParseCid("bafk2bzacebnkdt42mpf5emypo6iroux3hszfh5yt54v2mmnnura3ketholly4"), }, }, { Network: "devnet", @@ -516,8 +516,8 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet }, { Network: "devnet", Version: 13, - BundleGitTag: "v13.0.0-rc.3", - ManifestCid: MustParseCid("bafy2bzaceap34qfq4emg4fp3xd7bxtzt7pvkaj37kunqm2ccvttchtlljw7d4"), + BundleGitTag: "v13.0.0", + ManifestCid: MustParseCid("bafy2bzacecn7uxgehrqbcs462ktl2h23u23cmduy2etqj6xrd6tkkja56fna4"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzacebev3fu5geeehpx577b3kvza4xsmmggmepjj7rlsnr27hpoq27q2i"), "cron": MustParseCid("bafk2bzacedalzqahtuz2bmnf7uawbcujfhhe5xzv5ys5ufadu6ggs3tcu6lsy"), @@ -534,7 +534,7 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "storageminer": MustParseCid("bafk2bzacecsputz6xygjfyrvx2d7bxkpp7b5v4icrmpckec7gnbabx2w377qs"), "storagepower": MustParseCid("bafk2bzaceceyaa5yjwhxvvcqouob4l746zp5nesivr6enhtpimakdtby6kafi"), "system": MustParseCid("bafk2bzaceaxg6k5vuozxlemfi5hv663m6jcawzu5puboo4znj73i36e3tsovs"), - "verifiedregistry": MustParseCid("bafk2bzacebjwc4fp4n556agi5i4pccuzn4bhn2tl24l4cskgvmwgadycff3oo"), + "verifiedregistry": MustParseCid("bafk2bzacea2czkb4vt2iiiwdb6e57qfwqse4mk2pcyvwjmdl5ojbnla57oh2u"), }, }, { Network: "hyperspace", @@ -668,8 +668,8 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet }, { Network: "mainnet", Version: 13, - BundleGitTag: "v13.0.0-rc.3", - ManifestCid: MustParseCid("bafy2bzacecoplaet2m4kzueqgutjxpl76bhmuiq5hmo3ueighbnxas3rj4dvy"), + BundleGitTag: "v13.0.0", + ManifestCid: MustParseCid("bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzacedxnbtlsqdk76fsfmnhyvsblwyfducerwwtp3mqtx2wbrvs5idl52"), "cron": MustParseCid("bafk2bzacebbopddyn5csb3fsuhh2an4ttd23x6qnwixgohlirj5ahtcudphyc"), @@ -686,7 +686,7 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "storageminer": MustParseCid("bafk2bzacebf4rrqyk7gcfggggul6nfpzay7f2ordnkwm7z2wcf4mq6r7i77t2"), "storagepower": MustParseCid("bafk2bzacecjy4dkulvxppg3ocbmeixe2wgg6yxoyjxrm4ko2fm3uhpvfvam6e"), "system": MustParseCid("bafk2bzacecyf523quuq2kdjfdvyty446z2ounmamtgtgeqnr3ynlu5cqrlt6e"), - "verifiedregistry": MustParseCid("bafk2bzaceblqlrece7lezbp42lfba5ojlyxuv3vcbkldw45wpdadqwqslev3g"), + "verifiedregistry": MustParseCid("bafk2bzacedkxehp7y7iyukbcje3wbpqcvufisos6exatkanyrbotoecdkrbta"), }, }, { Network: "testing", @@ -797,8 +797,8 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet }, { Network: "testing", Version: 13, - BundleGitTag: "v13.0.0-rc.3", - ManifestCid: MustParseCid("bafy2bzacedcrzpgb4jac75auzcjkh55bxipdiospgvjsivumnqlvg2rp2ahmg"), + BundleGitTag: "v13.0.0", + ManifestCid: MustParseCid("bafy2bzacedg47dqxmtgzjch6i42kth72esd7w23gujyd6c6oppg3n6auag5ou"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzaceb3tncntgeqvzzr5fzhvpsc5ntv3tpqrsh4jst4irfyzpkdyigibc"), "cron": MustParseCid("bafk2bzacecwwasmhixpgtshczm5cfspwciyawc25mrefknqhlxfrd6m57tqmc"), @@ -815,7 +815,7 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "storageminer": MustParseCid("bafk2bzaceailclue4dba2edjethfjw6ycufcwsx4qjjmgsh77xcyprmogdjvu"), "storagepower": MustParseCid("bafk2bzaceaqw6dhdjlqovhk3p4lb4sb25i5d6mhln2ir5m7tj6m4fegkgkinw"), "system": MustParseCid("bafk2bzaceby6aiiosnrtb5kzlmrvd4k3o27oo3idmbd6llydz2uqibbp23pzq"), - "verifiedregistry": MustParseCid("bafk2bzacebqwmxch4np2nwzi2yt6vkciy2mp75otwoipulkmfxly3ifhj5g6i"), + "verifiedregistry": MustParseCid("bafk2bzaceadw6mxuyb6ipaq3fhekk7u5osyyiis3c4wbkvysy2ut6qfsua5zs"), }, }, { Network: "testing-fake-proofs", @@ -926,8 +926,8 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet }, { Network: "testing-fake-proofs", Version: 13, - BundleGitTag: "v13.0.0-rc.3", - ManifestCid: MustParseCid("bafy2bzaceaeepylii2u3lvuvrbdureocn6cuizhaq6o6ivmtzldweqf675w5s"), + BundleGitTag: "v13.0.0", + ManifestCid: MustParseCid("bafy2bzaceaf7fz33sp2i5ag5xg5ompn3dwppqlbwfacrwuvzaqdbqrtni7m5q"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzaceb3tncntgeqvzzr5fzhvpsc5ntv3tpqrsh4jst4irfyzpkdyigibc"), "cron": MustParseCid("bafk2bzacecwwasmhixpgtshczm5cfspwciyawc25mrefknqhlxfrd6m57tqmc"), @@ -944,6 +944,6 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "storageminer": MustParseCid("bafk2bzaceb6atn3k6yhmskgmc3lgfiwpzpfmaxzacohtnb2hivme2oroycqr6"), "storagepower": MustParseCid("bafk2bzacedameh56mp2g4y7nprhax5sddbzcmpk5p7l523l45rtn2wjc6ah4e"), "system": MustParseCid("bafk2bzaceby6aiiosnrtb5kzlmrvd4k3o27oo3idmbd6llydz2uqibbp23pzq"), - "verifiedregistry": MustParseCid("bafk2bzacebqwmxch4np2nwzi2yt6vkciy2mp75otwoipulkmfxly3ifhj5g6i"), + "verifiedregistry": MustParseCid("bafk2bzaceadw6mxuyb6ipaq3fhekk7u5osyyiis3c4wbkvysy2ut6qfsua5zs"), }, }} diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index 09793ae3323..eb2a3aae54c 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/gateway.json.gz b/build/openrpc/gateway.json.gz index bc012704306..1bd72f65612 100644 Binary files a/build/openrpc/gateway.json.gz and b/build/openrpc/gateway.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz new file mode 100644 index 00000000000..9929d440f4e Binary files /dev/null and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz new file mode 100644 index 00000000000..97b3be27395 Binary files /dev/null and b/build/openrpc/worker.json.gz differ diff --git a/build/params_mainnet.go b/build/params_mainnet.go index c3c1b131bb1..ba871eccad7 100644 --- a/build/params_mainnet.go +++ b/build/params_mainnet.go @@ -99,11 +99,11 @@ const UpgradeThunderHeight = UpgradeLightningHeight + 2880*21 // 2023-12-12T13:30:00Z const UpgradeWatermelonHeight = 3469380 -// 2024-04-02T14:00:00Z - Epoch will be updated in final release -var UpgradeDragonHeight = abi.ChainEpoch(999999999999999) +// 2024-04-11T14:00:00Z +var UpgradeDragonHeight = abi.ChainEpoch(3817920) // This epoch, 120 epochs after the "rest" of the nv22 upgrade, is when we switch to Drand quicknet -// 2024-04-02T15:00:00Z +// 2024-04-11T15:00:00Z var UpgradePhoenixHeight = UpgradeDragonHeight + 120 // This fix upgrade only ran on calibrationnet diff --git a/chain/actors/builtin/miner/actor.go.template b/chain/actors/builtin/miner/actor.go.template index 0f7204ec733..c29590bba28 100644 --- a/chain/actors/builtin/miner/actor.go.template +++ b/chain/actors/builtin/miner/actor.go.template @@ -17,7 +17,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/types" - minertypes13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/manifest" diff --git a/chain/events/filter/index.go b/chain/events/filter/index.go index 83998767e1e..4d9538c089e 100644 --- a/chain/events/filter/index.go +++ b/chain/events/filter/index.go @@ -536,12 +536,11 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever return nil } -// prefillFilter fills a filter's collection of events from the historic index +// PrefillFilter fills a filter's collection of events from the historic index func (ei *EventIndex) prefillFilter(ctx context.Context, f *eventFilter, excludeReverted bool) error { - var ( - clauses, joins []string - values []any - ) + clauses := []string{} + values := []any{} + joins := []string{} if f.tipsetCid != cid.Undef { clauses = append(clauses, "event.tipset_key_cid=?") diff --git a/cli/clicommands/cmd.go b/cli/clicommands/cmd.go new file mode 100644 index 00000000000..a37ce329acc --- /dev/null +++ b/cli/clicommands/cmd.go @@ -0,0 +1,30 @@ +package clicommands + +import ( + "github.com/urfave/cli/v2" + + lcli "github.com/filecoin-project/lotus/cli" +) + +var Commands = []*cli.Command{ + lcli.WithCategory("basic", lcli.SendCmd), + lcli.WithCategory("basic", lcli.WalletCmd), + lcli.WithCategory("basic", lcli.InfoCmd), + lcli.WithCategory("basic", lcli.ClientCmd), + lcli.WithCategory("basic", lcli.MultisigCmd), + lcli.WithCategory("basic", lcli.FilplusCmd), + lcli.WithCategory("basic", lcli.PaychCmd), + lcli.WithCategory("developer", lcli.AuthCmd), + lcli.WithCategory("developer", lcli.MpoolCmd), + lcli.WithCategory("developer", StateCmd), + lcli.WithCategory("developer", lcli.ChainCmd), + lcli.WithCategory("developer", lcli.LogCmd), + lcli.WithCategory("developer", lcli.WaitApiCmd), + lcli.WithCategory("developer", lcli.FetchParamCmd), + lcli.WithCategory("developer", lcli.EvmCmd), + lcli.WithCategory("network", lcli.NetCmd), + lcli.WithCategory("network", lcli.SyncCmd), + lcli.WithCategory("status", lcli.StatusCmd), + lcli.PprofCmd, + lcli.VersionCmd, +} diff --git a/cli/clicommands/state.go b/cli/clicommands/state.go new file mode 100644 index 00000000000..e990cceb0be --- /dev/null +++ b/cli/clicommands/state.go @@ -0,0 +1,70 @@ +// Package clicommands contains only the cli.Command definitions that are +// common to sptool and miner. These are here because they can't be referenced +// in cli/spcli or cli/ because of the import cycle with all the other cli functions. +package clicommands + +import ( + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/go-address" + + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/cli/spcli" +) + +var StateCmd = &cli.Command{ + Name: "state", + Usage: "Interact with and query filecoin chain state", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "tipset", + Usage: "specify tipset to call method on (pass comma separated array of cids)", + }, + }, + Subcommands: []*cli.Command{ + lcli.StatePowerCmd, + lcli.StateSectorsCmd, + lcli.StateActiveSectorsCmd, + lcli.StateListActorsCmd, + lcli.StateListMinersCmd, + lcli.StateCircSupplyCmd, + lcli.StateSectorCmd, + lcli.StateGetActorCmd, + lcli.StateLookupIDCmd, + lcli.StateReplayCmd, + lcli.StateSectorSizeCmd, + lcli.StateReadStateCmd, + lcli.StateListMessagesCmd, + lcli.StateComputeStateCmd, + lcli.StateCallCmd, + lcli.StateGetDealSetCmd, + lcli.StateWaitMsgCmd, + lcli.StateSearchMsgCmd, + StateMinerInfo, + lcli.StateMarketCmd, + lcli.StateExecTraceCmd, + lcli.StateNtwkVersionCmd, + lcli.StateMinerProvingDeadlineCmd, + lcli.StateSysActorCIDsCmd, + }, +} + +var StateMinerInfo = &cli.Command{ + Name: "miner-info", + Usage: "Retrieve miner information", + ArgsUsage: "[minerAddress]", + Action: func(cctx *cli.Context) error { + addressGetter := func(_ *cli.Context) (address.Address, error) { + if cctx.NArg() != 1 { + return address.Address{}, lcli.IncorrectNumArgs(cctx) + } + + return address.NewFromString(cctx.Args().First()) + } + err := spcli.InfoCmd(addressGetter).Action(cctx) + if err != nil { + return err + } + return nil + }, +} diff --git a/cli/client.go b/cli/client.go index 81299b8fb3e..302e31e98be 100644 --- a/cli/client.go +++ b/cli/client.go @@ -74,7 +74,7 @@ func GetCidEncoder(cctx *cli.Context) (cidenc.Encoder, error) { return e, nil } -var clientCmd = &cli.Command{ +var ClientCmd = &cli.Command{ Name: "client", Usage: "Make deals, store data, retrieve data", Subcommands: []*cli.Command{ diff --git a/cli/cmd.go b/cli/cmd.go index 802df0c99ac..76c0ab300a6 100644 --- a/cli/cmd.go +++ b/cli/cmd.go @@ -66,29 +66,6 @@ var CommonCommands = []*cli.Command{ VersionCmd, } -var Commands = []*cli.Command{ - WithCategory("basic", sendCmd), - WithCategory("basic", walletCmd), - WithCategory("basic", infoCmd), - WithCategory("basic", clientCmd), - WithCategory("basic", multisigCmd), - WithCategory("basic", filplusCmd), - WithCategory("basic", paychCmd), - WithCategory("developer", AuthCmd), - WithCategory("developer", MpoolCmd), - WithCategory("developer", StateCmd), - WithCategory("developer", ChainCmd), - WithCategory("developer", LogCmd), - WithCategory("developer", WaitApiCmd), - WithCategory("developer", FetchParamCmd), - WithCategory("developer", EvmCmd), - WithCategory("network", NetCmd), - WithCategory("network", SyncCmd), - WithCategory("status", StatusCmd), - PprofCmd, - VersionCmd, -} - func WithCategory(cat string, cmd *cli.Command) *cli.Command { cmd.Category = strings.ToUpper(cat) return cmd diff --git a/cli/filplus.go b/cli/filplus.go index fbb922a24a2..b8e88749871 100644 --- a/cli/filplus.go +++ b/cli/filplus.go @@ -39,7 +39,7 @@ import ( "github.com/filecoin-project/lotus/lib/tablewriter" ) -var filplusCmd = &cli.Command{ +var FilplusCmd = &cli.Command{ Name: "filplus", Usage: "Interact with the verified registry actor used by Filplus", Flags: []cli.Flag{}, @@ -934,7 +934,11 @@ var filplusSignRemoveDataCapProposal = &cli.Command{ var filplusExtendClaimCmd = &cli.Command{ Name: "extend-claim", - Usage: "extend claim expiration (TermMax)", + Usage: "extends claim expiration (TermMax)", + UsageText: `Extends claim expiration (TermMax). +If the client is original client then claim can be extended to maximum 5 years and no Datacap is required. +If the client id different then claim can be extended up to maximum 5 years from now and Datacap is required. +`, Flags: []cli.Flag{ &cli.Int64Flag{ Name: "term-max", @@ -966,6 +970,11 @@ var filplusExtendClaimCmd = &cli.Command{ Usage: "number of block confirmations to wait for", Value: int(build.MessageConfidence), }, + &cli.IntFlag{ + Name: "batch-size", + Usage: "number of extend requests per batch. If set incorrectly, this will lead to out of gas error", + Value: 500, + }, }, ArgsUsage: " ... or ...", Action: func(cctx *cli.Context) error { @@ -1069,7 +1078,7 @@ var filplusExtendClaimCmd = &cli.Command{ } } - msgs, err := CreateExtendClaimMsg(ctx, api, claimMap, miners, clientAddr, abi.ChainEpoch(tmax), all, cctx.Bool("assume-yes")) + msgs, err := CreateExtendClaimMsg(ctx, api, claimMap, miners, clientAddr, abi.ChainEpoch(tmax), all, cctx.Bool("assume-yes"), cctx.Int("batch-size")) if err != nil { return err } @@ -1122,7 +1131,7 @@ type ProvInfo struct { // 6. Extend all claims for multiple miner IDs with different client address (2 messages) // 7. Extend specified claims for a miner ID with different client address (2 messages) // 8. Extend specific claims for specific miner ID with different client address (2 messages) -func CreateExtendClaimMsg(ctx context.Context, api api.FullNode, pcm map[verifregtypes13.ClaimId]ProvInfo, miners []string, wallet address.Address, tmax abi.ChainEpoch, all, assumeYes bool) ([]*types.Message, error) { +func CreateExtendClaimMsg(ctx context.Context, api api.FullNode, pcm map[verifregtypes13.ClaimId]ProvInfo, miners []string, wallet address.Address, tmax abi.ChainEpoch, all, assumeYes bool, batchSize int) ([]*types.Message, error) { ac, err := api.StateLookupID(ctx, wallet, types.EmptyTSK) if err != nil { @@ -1141,7 +1150,7 @@ func CreateExtendClaimMsg(ctx context.Context, api api.FullNode, pcm map[verifre } var terms []verifregtypes13.ClaimTerm - var newClaims []verifregtypes13.ClaimExtensionRequest + newClaims := make(map[verifregtypes13.ClaimExtensionRequest]big.Int) rDataCap := big.NewInt(0) // If --all is set @@ -1162,17 +1171,23 @@ func CreateExtendClaimMsg(ctx context.Context, api api.FullNode, pcm map[verifre for claimID, claim := range claims { claimID := claimID claim := claim - if claim.TermMax < tmax && claim.TermStart+claim.TermMax > head.Height() { - // If client is not same - needs to burn datacap - if claim.Client != wid { - newClaims = append(newClaims, verifregtypes13.ClaimExtensionRequest{ + // If the client is not the original client - burn datacap + if claim.Client != wid { + // The new duration should be greater than the original deal duration and claim should not already be expired + if head.Height()+tmax-claim.TermStart > claim.TermMax-claim.TermStart && claim.TermStart+claim.TermMax > head.Height() { + req := verifregtypes13.ClaimExtensionRequest{ Claim: verifregtypes13.ClaimId(claimID), Provider: abi.ActorID(mid), - TermMax: tmax, - }) + TermMax: head.Height() + tmax - claim.TermStart, + } + newClaims[req] = big.NewInt(int64(claim.Size)) rDataCap.Add(big.NewInt(int64(claim.Size)).Int, rDataCap.Int) - continue } + // If new duration shorter than the original duration then do nothing + continue + } + // For original client, compare duration(TermMax) and claim should not already be expired + if claim.TermMax < tmax && claim.TermStart+claim.TermMax > head.Height() { terms = append(terms, verifregtypes13.ClaimTerm{ ClaimId: verifregtypes13.ClaimId(claimID), TermMax: tmax, @@ -1204,17 +1219,23 @@ func CreateExtendClaimMsg(ctx context.Context, api api.FullNode, pcm map[verifre if !ok { return nil, xerrors.Errorf("claim %d not found for provider %s", claimID, miners[0]) } - if claim.TermMax < tmax && claim.TermStart+claim.TermMax > head.Height() { - // If client is not same - needs to burn datacap - if claim.Client != wid { - newClaims = append(newClaims, verifregtypes13.ClaimExtensionRequest{ + // If the client is not the original client - burn datacap + if claim.Client != wid { + // The new duration should be greater than the original deal duration and claim should not already be expired + if head.Height()+tmax-claim.TermStart > claim.TermMax-claim.TermStart && claim.TermStart+claim.TermMax > head.Height() { + req := verifregtypes13.ClaimExtensionRequest{ Claim: claimID, Provider: abi.ActorID(mid), - TermMax: tmax, - }) + TermMax: head.Height() + tmax - claim.TermStart, + } + newClaims[req] = big.NewInt(int64(claim.Size)) rDataCap.Add(big.NewInt(int64(claim.Size)).Int, rDataCap.Int) - continue } + // If new duration shorter than the original duration then do nothing + continue + } + // For original client, compare duration(TermMax) and claim should not already be expired + if claim.TermMax < tmax && claim.TermStart+claim.TermMax > head.Height() { terms = append(terms, verifregtypes13.ClaimTerm{ ClaimId: claimID, TermMax: tmax, @@ -1235,17 +1256,23 @@ func CreateExtendClaimMsg(ctx context.Context, api api.FullNode, pcm map[verifre if claim == nil { return nil, xerrors.Errorf("claim %d not found in the actor state", claimID) } - if claim.TermMax < tmax && claim.TermStart+claim.TermMax > head.Height() { - // If client is not same - needs to burn datacap - if claim.Client != wid { - newClaims = append(newClaims, verifregtypes13.ClaimExtensionRequest{ + // If the client is not the original client - burn datacap + if claim.Client != wid { + // The new duration should be greater than the original deal duration and claim should not already be expired + if head.Height()+tmax-claim.TermStart > claim.TermMax-claim.TermStart && claim.TermStart+claim.TermMax > head.Height() { + req := verifregtypes13.ClaimExtensionRequest{ Claim: claimID, Provider: prov.ID, - TermMax: tmax, - }) + TermMax: head.Height() + tmax - claim.TermStart, + } + newClaims[req] = big.NewInt(int64(claim.Size)) rDataCap.Add(big.NewInt(int64(claim.Size)).Int, rDataCap.Int) - continue } + // If new duration shorter than the original duration then do nothing + continue + } + // For original client, compare duration(TermMax) and claim should not already be expired + if claim.TermMax < tmax && claim.TermStart+claim.TermMax > head.Height() { terms = append(terms, verifregtypes13.ClaimTerm{ ClaimId: claimID, TermMax: tmax, @@ -1258,22 +1285,29 @@ func CreateExtendClaimMsg(ctx context.Context, api api.FullNode, pcm map[verifre var msgs []*types.Message if len(terms) > 0 { - params, err := actors.SerializeParams(&verifregtypes13.ExtendClaimTermsParams{ - Terms: terms, - }) + // Batch in 500 to avoid running out of gas + for i := 0; i < len(terms); i += batchSize { + batchEnd := i + batchSize + if batchEnd > len(terms) { + batchEnd = len(terms) + } - if err != nil { - return nil, xerrors.Errorf("failed to searialise the parameters: %s", err) - } + batch := terms[i:batchEnd] - oclaimMsg := &types.Message{ - To: verifreg.Address, - From: wallet, - Method: verifreg.Methods.ExtendClaimTerms, - Params: params, + params, err := actors.SerializeParams(&verifregtypes13.ExtendClaimTermsParams{ + Terms: batch, + }) + if err != nil { + return nil, xerrors.Errorf("failed to searialise the parameters: %s", err) + } + oclaimMsg := &types.Message{ + To: verifreg.Address, + From: wallet, + Method: verifreg.Methods.ExtendClaimTerms, + Params: params, + } + msgs = append(msgs, oclaimMsg) } - - msgs = append(msgs, oclaimMsg) } if len(newClaims) > 0 { @@ -1292,32 +1326,6 @@ func CreateExtendClaimMsg(ctx context.Context, api api.FullNode, pcm map[verifre return nil, xerrors.Errorf("requested datacap %s is greater then the available datacap %s", rDataCap, aDataCap) } - ncparams, err := actors.SerializeParams(&verifregtypes13.AllocationRequests{ - Extensions: newClaims, - }) - - if err != nil { - return nil, xerrors.Errorf("failed to searialise the parameters: %s", err) - } - - transferParams, err := actors.SerializeParams(&datacap2.TransferParams{ - To: builtin.VerifiedRegistryActorAddr, - Amount: big.Mul(rDataCap, builtin.TokenPrecision), - OperatorData: ncparams, - }) - - if err != nil { - return nil, xerrors.Errorf("failed to serialize transfer parameters: %s", err) - } - - nclaimMsg := &types.Message{ - To: builtin.DatacapActorAddr, - From: wallet, - Method: datacap.Methods.TransferExported, - Params: transferParams, - Value: big.Zero(), - } - if !assumeYes { out := fmt.Sprintf("Some of the specified allocation have a different client address and will require %d Datacap to extend. Proceed? Yes [Y/y] / No [N/n], Ctrl+C (^C) to exit", rDataCap.Int) validate := func(input string) error { @@ -1353,7 +1361,54 @@ func CreateExtendClaimMsg(ctx context.Context, api api.FullNode, pcm map[verifre } } - msgs = append(msgs, nclaimMsg) + // Create a map of just keys, so we can easily batch based on the numeric keys + keys := make([]verifregtypes13.ClaimExtensionRequest, 0, len(newClaims)) + for k := range newClaims { + keys = append(keys, k) + } + + // Batch in 500 to avoid running out of gas + for i := 0; i < len(keys); i += batchSize { + batchEnd := i + batchSize + if batchEnd > len(newClaims) { + batchEnd = len(newClaims) + } + + batch := keys[i:batchEnd] + + // Calculate Datacap for this batch + dcap := big.NewInt(0) + for _, k := range batch { + dc := newClaims[k] + dcap.Add(dcap.Int, dc.Int) + } + + ncparams, err := actors.SerializeParams(&verifregtypes13.AllocationRequests{ + Extensions: batch, + }) + if err != nil { + return nil, xerrors.Errorf("failed to searialise the parameters: %s", err) + } + + transferParams, err := actors.SerializeParams(&datacap2.TransferParams{ + To: builtin.VerifiedRegistryActorAddr, + Amount: big.Mul(dcap, builtin.TokenPrecision), + OperatorData: ncparams, + }) + + if err != nil { + return nil, xerrors.Errorf("failed to serialize transfer parameters: %s", err) + } + + nclaimMsg := &types.Message{ + To: builtin.DatacapActorAddr, + From: wallet, + Method: datacap.Methods.TransferExported, + Params: transferParams, + Value: big.Zero(), + } + msgs = append(msgs, nclaimMsg) + } } return msgs, nil diff --git a/cli/info.go b/cli/info.go index 8b36be4889b..a406fc48014 100644 --- a/cli/info.go +++ b/cli/info.go @@ -23,7 +23,7 @@ import ( "github.com/filecoin-project/lotus/journal/alerting" ) -var infoCmd = &cli.Command{ +var InfoCmd = &cli.Command{ Name: "info", Usage: "Print node info", Action: infoCmdAct, diff --git a/cli/multisig.go b/cli/multisig.go index 1af2a4c9e4e..290cf6700e2 100644 --- a/cli/multisig.go +++ b/cli/multisig.go @@ -32,7 +32,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -var multisigCmd = &cli.Command{ +var MultisigCmd = &cli.Command{ Name: "msig", Usage: "Interact with a multisig wallet", Flags: []cli.Flag{ diff --git a/cli/paych.go b/cli/paych.go index 1067d091376..46b043d6a6d 100644 --- a/cli/paych.go +++ b/cli/paych.go @@ -20,7 +20,7 @@ import ( "github.com/filecoin-project/lotus/paychmgr" ) -var paychCmd = &cli.Command{ +var PaychCmd = &cli.Command{ Name: "paych", Usage: "Manage payment channels", Subcommands: []*cli.Command{ diff --git a/cli/send.go b/cli/send.go index cfa2515c07b..89c79e109bd 100644 --- a/cli/send.go +++ b/cli/send.go @@ -19,7 +19,7 @@ import ( "github.com/filecoin-project/lotus/chain/types/ethtypes" ) -var sendCmd = &cli.Command{ +var SendCmd = &cli.Command{ Name: "send", Usage: "Send funds between accounts", ArgsUsage: "[targetAddress] [amount]", diff --git a/cli/send_test.go b/cli/send_test.go index 2c59a9641f6..59b8942f44b 100644 --- a/cli/send_test.go +++ b/cli/send_test.go @@ -45,7 +45,7 @@ func TestSendCLI(t *testing.T) { oneFil := abi.TokenAmount(types.MustParseFIL("1")) t.Run("simple", func(t *testing.T) { - app, mockSrvcs, buf, done := newMockApp(t, sendCmd) + app, mockSrvcs, buf, done := newMockApp(t, SendCmd) defer done() arbtProto := &api.MessagePrototype{ @@ -76,7 +76,7 @@ func TestSendEthereum(t *testing.T) { oneFil := abi.TokenAmount(types.MustParseFIL("1")) t.Run("simple", func(t *testing.T) { - app, mockSrvcs, buf, done := newMockApp(t, sendCmd) + app, mockSrvcs, buf, done := newMockApp(t, SendCmd) defer done() testEthAddr, err := ethtypes.CastEthAddress(make([]byte, 20)) diff --git a/cli/spcli/actor.go b/cli/spcli/actor.go new file mode 100644 index 00000000000..296d5ffb194 --- /dev/null +++ b/cli/spcli/actor.go @@ -0,0 +1,1240 @@ +package spcli + +import ( + "bytes" + "fmt" + "strconv" + + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/libp2p/go-libp2p/core/peer" + ma "github.com/multiformats/go-multiaddr" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/go-state-types/builtin/v9/miner" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/api" + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/node/impl" +) + +func ActorWithdrawCmd(getActor ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "withdraw", + Usage: "withdraw available balance to beneficiary", + ArgsUsage: "[amount (FIL)]", + Flags: []cli.Flag{ + &cli.IntFlag{ + Name: "confidence", + Usage: "number of block confirmations to wait for", + Value: int(build.MessageConfidence), + }, + &cli.BoolFlag{ + Name: "beneficiary", + Usage: "send withdraw message from the beneficiary address", + }, + }, + Action: func(cctx *cli.Context) error { + amount := abi.NewTokenAmount(0) + + if cctx.Args().Present() { + f, err := types.ParseFIL(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("parsing 'amount' argument: %w", err) + } + + amount = abi.TokenAmount(f) + } + + api, acloser, err := lcli.GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := getActor(cctx) + if err != nil { + return err + } + + res, err := impl.WithdrawBalance(ctx, api, maddr, amount, !cctx.IsSet("beneficiary")) + if err != nil { + return err + } + + fmt.Printf("Requested withdrawal in message %s\nwaiting for it to be included in a block..\n", res) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, res, uint64(cctx.Int("confidence")), lapi.LookbackNoLimit, true) + if err != nil { + return xerrors.Errorf("Timeout waiting for withdrawal message %s", res) + } + + if wait.Receipt.ExitCode.IsError() { + return xerrors.Errorf("Failed to execute withdrawal message %s: %w", wait.Message, wait.Receipt.ExitCode.Error()) + } + + nv, err := api.StateNetworkVersion(ctx, wait.TipSet) + if err != nil { + return err + } + + if nv >= network.Version14 { + var withdrawn abi.TokenAmount + if err := withdrawn.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return err + } + + fmt.Printf("Successfully withdrew %s \n", types.FIL(withdrawn)) + if withdrawn.LessThan(amount) { + fmt.Printf("Note that this is less than the requested amount of %s\n", types.FIL(amount)) + } + } + + return nil + }, + } +} + +func ActorSetAddrsCmd(getActor ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "set-addresses", + Aliases: []string{"set-addrs"}, + Usage: "set addresses that your miner can be publicly dialed on", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "optionally specify the account to send the message from", + }, + &cli.Int64Flag{ + Name: "gas-limit", + Usage: "set gas limit", + Value: 0, + }, + &cli.BoolFlag{ + Name: "unset", + Usage: "unset address", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + args := cctx.Args().Slice() + unset := cctx.Bool("unset") + if len(args) == 0 && !unset { + return cli.ShowSubcommandHelp(cctx) + } + if len(args) > 0 && unset { + return fmt.Errorf("unset can only be used with no arguments") + } + + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + var addrs []abi.Multiaddrs + for _, a := range args { + maddr, err := ma.NewMultiaddr(a) + if err != nil { + return fmt.Errorf("failed to parse %q as a multiaddr: %w", a, err) + } + + maddrNop2p, strip := ma.SplitFunc(maddr, func(c ma.Component) bool { + return c.Protocol().Code == ma.P_P2P + }) + + if strip != nil { + fmt.Println("Stripping peerid ", strip, " from ", maddr) + } + addrs = append(addrs, maddrNop2p.Bytes()) + } + + maddr, err := getActor(cctx) + if err != nil { + return err + } + + minfo, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + fromAddr := minfo.Worker + if from := cctx.String("from"); from != "" { + addr, err := address.NewFromString(from) + if err != nil { + return err + } + + fromAddr = addr + } + + fromId, err := api.StateLookupID(ctx, fromAddr, types.EmptyTSK) + if err != nil { + return err + } + + if !isController(minfo, fromId) { + return xerrors.Errorf("sender isn't a controller of miner: %s", fromId) + } + + params, err := actors.SerializeParams(&miner.ChangeMultiaddrsParams{NewMultiaddrs: addrs}) + if err != nil { + return err + } + + gasLimit := cctx.Int64("gas-limit") + + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + To: maddr, + From: fromId, + Value: types.NewInt(0), + GasLimit: gasLimit, + Method: builtin.MethodsMiner.ChangeMultiaddrs, + Params: params, + }, nil) + if err != nil { + return err + } + + fmt.Printf("Requested multiaddrs change in message %s\n", smsg.Cid()) + return nil + + }, + } +} + +func ActorSetPeeridCmd(getActor ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "set-peer-id", + Usage: "set the peer id of your miner", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.Int64Flag{ + Name: "gas-limit", + Usage: "set gas limit", + Value: 0, + }, + }, + Action: func(cctx *cli.Context) error { + + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + if cctx.NArg() != 1 { + return lcli.IncorrectNumArgs(cctx) + } + + pid, err := peer.Decode(cctx.Args().Get(0)) + if err != nil { + return fmt.Errorf("failed to parse input as a peerId: %w", err) + } + + maddr, err := getActor(cctx) + if err != nil { + return err + } + + minfo, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + params, err := actors.SerializeParams(&miner.ChangePeerIDParams{NewID: abi.PeerID(pid)}) + if err != nil { + return err + } + + gasLimit := cctx.Int64("gas-limit") + + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + To: maddr, + From: minfo.Worker, + Value: types.NewInt(0), + GasLimit: gasLimit, + Method: builtin.MethodsMiner.ChangePeerID, + Params: params, + }, nil) + if err != nil { + return err + } + + fmt.Printf("Requested peerid change in message %s\n", smsg.Cid()) + return nil + + }, + } +} + +func ActorRepayDebtCmd(getActor ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "repay-debt", + Usage: "pay down a miner's debt", + ArgsUsage: "[amount (FIL)]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "optionally specify the account to send funds from", + }, + }, + Action: func(cctx *cli.Context) error { + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := getActor(cctx) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + var amount abi.TokenAmount + if cctx.Args().Present() { + f, err := types.ParseFIL(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("parsing 'amount' argument: %w", err) + } + + amount = abi.TokenAmount(f) + } else { + mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api))) + + mst, err := lminer.Load(store, mact) + if err != nil { + return err + } + + amount, err = mst.FeeDebt() + if err != nil { + return err + } + + } + + fromAddr := mi.Worker + if from := cctx.String("from"); from != "" { + addr, err := address.NewFromString(from) + if err != nil { + return err + } + + fromAddr = addr + } + + fromId, err := api.StateLookupID(ctx, fromAddr, types.EmptyTSK) + if err != nil { + return err + } + + if !isController(mi, fromId) { + return xerrors.Errorf("sender isn't a controller of miner: %s", fromId) + } + + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + To: maddr, + From: fromId, + Value: amount, + Method: builtin.MethodsMiner.RepayDebt, + Params: nil, + }, nil) + if err != nil { + return err + } + + fmt.Printf("Sent repay debt message %s\n", smsg.Cid()) + + return nil + }, + } +} + +func ActorControlCmd(getActor ActorAddressGetter, actorControlListCmd *cli.Command) *cli.Command { + return &cli.Command{ + Name: "control", + Usage: "Manage control addresses", + Subcommands: []*cli.Command{ + actorControlListCmd, + actorControlSet(getActor), + }, + } +} + +func actorControlSet(getActor ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "set", + Usage: "Set control address(-es)", + ArgsUsage: "[...address]", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := getActor(cctx) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + del := map[address.Address]struct{}{} + existing := map[address.Address]struct{}{} + for _, controlAddress := range mi.ControlAddresses { + ka, err := api.StateAccountKey(ctx, controlAddress, types.EmptyTSK) + if err != nil { + return err + } + + del[ka] = struct{}{} + existing[ka] = struct{}{} + } + + var toSet []address.Address + + for i, as := range cctx.Args().Slice() { + a, err := address.NewFromString(as) + if err != nil { + return xerrors.Errorf("parsing address %d: %w", i, err) + } + + ka, err := api.StateAccountKey(ctx, a, types.EmptyTSK) + if err != nil { + return err + } + + // make sure the address exists on chain + _, err = api.StateLookupID(ctx, ka, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("looking up %s: %w", ka, err) + } + + delete(del, ka) + toSet = append(toSet, ka) + } + + for a := range del { + fmt.Println("Remove", a) + } + for _, a := range toSet { + if _, exists := existing[a]; !exists { + fmt.Println("Add", a) + } + } + + if !cctx.Bool("really-do-it") { + fmt.Println("Pass --really-do-it to actually execute this action") + return nil + } + + cwp := &miner.ChangeWorkerAddressParams{ + NewWorker: mi.Worker, + NewControlAddrs: toSet, + } + + sp, err := actors.SerializeParams(cwp) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: maddr, + Method: builtin.MethodsMiner.ChangeWorkerAddress, + + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Println("Message CID:", smsg.Cid()) + + return nil + }, + } +} + +func ActorSetOwnerCmd(getActor ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "set-owner", + Usage: "Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner)", + ArgsUsage: "[newOwnerAddress senderAddress]", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 2 { + return lcli.IncorrectNumArgs(cctx) + } + + if !cctx.Bool("really-do-it") { + fmt.Println("Pass --really-do-it to actually execute this action") + return nil + } + + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddrId, err := api.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + fa, err := address.NewFromString(cctx.Args().Get(1)) + if err != nil { + return err + } + + fromAddrId, err := api.StateLookupID(ctx, fa, types.EmptyTSK) + if err != nil { + return err + } + + maddr, err := getActor(cctx) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + if fromAddrId != mi.Owner && fromAddrId != newAddrId { + return xerrors.New("from address must either be the old owner or the new owner") + } + + sp, err := actors.SerializeParams(&newAddrId) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + From: fromAddrId, + To: maddr, + Method: builtin.MethodsMiner.ChangeOwnerAddress, + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Println("Message CID:", smsg.Cid()) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode.IsError() { + fmt.Println("owner change failed!") + return err + } + + fmt.Println("message succeeded!") + + return nil + }, + } +} + +func ActorProposeChangeWorkerCmd(getActor ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "propose-change-worker", + Usage: "Propose a worker address change", + ArgsUsage: "[address]", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass address of new worker address") + } + + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + maddr, err := getActor(cctx) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + if mi.NewWorker.Empty() { + if mi.Worker == newAddr { + return fmt.Errorf("worker address already set to %s", na) + } + } else { + if mi.NewWorker == newAddr { + return fmt.Errorf("change to worker address %s already pending", na) + } + } + + if !cctx.Bool("really-do-it") { + fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action") + return nil + } + + cwp := &miner.ChangeWorkerAddressParams{ + NewWorker: newAddr, + NewControlAddrs: mi.ControlAddresses, + } + + sp, err := actors.SerializeParams(cwp) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: maddr, + Method: builtin.MethodsMiner.ChangeWorkerAddress, + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", smsg.Cid()) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode.IsError() { + return fmt.Errorf("propose worker change failed") + } + + mi, err = api.StateMinerInfo(ctx, maddr, wait.TipSet) + if err != nil { + return err + } + if mi.NewWorker != newAddr { + return fmt.Errorf("Proposed worker address change not reflected on chain: expected '%s', found '%s'", na, mi.NewWorker) + } + + fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully sent, change happens at height %d.\n", na, mi.WorkerChangeEpoch) + fmt.Fprintf(cctx.App.Writer, "If you have no active deadlines, call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch) + + return nil + }, + } +} + +func ActorProposeChangeBeneficiaryCmd(getActor ActorAddressGetter) *cli.Command { + + return &cli.Command{ + Name: "propose-change-beneficiary", + Usage: "Propose a beneficiary address change", + ArgsUsage: "[beneficiaryAddress quota expiration]", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + &cli.BoolFlag{ + Name: "overwrite-pending-change", + Usage: "Overwrite the current beneficiary change proposal", + Value: false, + }, + &cli.StringFlag{ + Name: "actor", + Usage: "specify the address of miner actor", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 3 { + return lcli.IncorrectNumArgs(cctx) + } + + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return xerrors.Errorf("getting fullnode api: %w", err) + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + na, err := address.NewFromString(cctx.Args().Get(0)) + if err != nil { + return xerrors.Errorf("parsing beneficiary address: %w", err) + } + + newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("looking up new beneficiary address: %w", err) + } + + quota, err := types.ParseFIL(cctx.Args().Get(1)) + if err != nil { + return xerrors.Errorf("parsing quota: %w", err) + } + + expiration, err := strconv.ParseInt(cctx.Args().Get(2), 10, 64) + if err != nil { + return xerrors.Errorf("parsing expiration: %w", err) + } + + maddr, err := getActor(cctx) + if err != nil { + return xerrors.Errorf("getting miner address: %w", err) + } + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting miner info: %w", err) + } + + if mi.Beneficiary == mi.Owner && newAddr == mi.Owner { + return fmt.Errorf("beneficiary %s already set to owner address", mi.Beneficiary) + } + + if mi.PendingBeneficiaryTerm != nil { + fmt.Println("WARNING: replacing Pending Beneficiary Term of:") + fmt.Println("Beneficiary: ", mi.PendingBeneficiaryTerm.NewBeneficiary) + fmt.Println("Quota:", mi.PendingBeneficiaryTerm.NewQuota) + fmt.Println("Expiration Epoch:", mi.PendingBeneficiaryTerm.NewExpiration) + + if !cctx.Bool("overwrite-pending-change") { + return fmt.Errorf("must pass --overwrite-pending-change to replace current pending beneficiary change. Please review CAREFULLY") + } + } + + if !cctx.Bool("really-do-it") { + fmt.Println("Pass --really-do-it to actually execute this action. Review what you're about to approve CAREFULLY please") + return nil + } + + params := &miner.ChangeBeneficiaryParams{ + NewBeneficiary: newAddr, + NewQuota: abi.TokenAmount(quota), + NewExpiration: abi.ChainEpoch(expiration), + } + + sp, err := actors.SerializeParams(params) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: maddr, + Method: builtin.MethodsMiner.ChangeBeneficiary, + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Println("Propose Message CID:", smsg.Cid()) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return xerrors.Errorf("waiting for message to be included in block: %w", err) + } + + // check it executed successfully + if wait.Receipt.ExitCode.IsError() { + return fmt.Errorf("propose beneficiary change failed") + } + + updatedMinerInfo, err := api.StateMinerInfo(ctx, maddr, wait.TipSet) + if err != nil { + return xerrors.Errorf("getting miner info: %w", err) + } + + if updatedMinerInfo.PendingBeneficiaryTerm == nil && updatedMinerInfo.Beneficiary == newAddr { + fmt.Println("Beneficiary address successfully changed") + } else { + fmt.Println("Beneficiary address change awaiting additional confirmations") + } + + return nil + }, + } +} + +func ActorConfirmChangeWorkerCmd(getActor ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "confirm-change-worker", + Usage: "Confirm a worker address change", + ArgsUsage: "[address]", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass address of new worker address") + } + + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + maddr, err := getActor(cctx) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + if mi.NewWorker.Empty() { + return xerrors.Errorf("no worker key change proposed") + } else if mi.NewWorker != newAddr { + return xerrors.Errorf("worker key %s does not match current worker key proposal %s", newAddr, mi.NewWorker) + } + + if head, err := api.ChainHead(ctx); err != nil { + return xerrors.Errorf("failed to get the chain head: %w", err) + } else if head.Height() < mi.WorkerChangeEpoch { + return xerrors.Errorf("worker key change cannot be confirmed until %d, current height is %d", mi.WorkerChangeEpoch, head.Height()) + } + + if !cctx.Bool("really-do-it") { + fmt.Println("Pass --really-do-it to actually execute this action") + return nil + } + + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + From: mi.Owner, + To: maddr, + Method: builtin.MethodsMiner.ConfirmChangeWorkerAddress, + Value: big.Zero(), + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Println("Confirm Message CID:", smsg.Cid()) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode.IsError() { + fmt.Fprintln(cctx.App.Writer, "Worker change failed!") + return err + } + + mi, err = api.StateMinerInfo(ctx, maddr, wait.TipSet) + if err != nil { + return err + } + if mi.Worker != newAddr { + return fmt.Errorf("Confirmed worker address change not reflected on chain: expected '%s', found '%s'", newAddr, mi.Worker) + } + + return nil + }, + } +} + +func ActorConfirmChangeBeneficiaryCmd(getActor ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "confirm-change-beneficiary", + Usage: "Confirm a beneficiary address change", + ArgsUsage: "[minerID]", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + &cli.BoolFlag{ + Name: "existing-beneficiary", + Usage: "send confirmation from the existing beneficiary address", + }, + &cli.BoolFlag{ + Name: "new-beneficiary", + Usage: "send confirmation from the new beneficiary address", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 1 { + return lcli.IncorrectNumArgs(cctx) + } + + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return xerrors.Errorf("getting fullnode api: %w", err) + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("parsing beneficiary address: %w", err) + } + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting miner info: %w", err) + } + + if mi.PendingBeneficiaryTerm == nil { + return fmt.Errorf("no pending beneficiary term found for miner %s", maddr) + } + + if (cctx.IsSet("existing-beneficiary") && cctx.IsSet("new-beneficiary")) || (!cctx.IsSet("existing-beneficiary") && !cctx.IsSet("new-beneficiary")) { + return lcli.ShowHelp(cctx, fmt.Errorf("must pass exactly one of --existing-beneficiary or --new-beneficiary")) + } + + var fromAddr address.Address + if cctx.IsSet("existing-beneficiary") { + if mi.PendingBeneficiaryTerm.ApprovedByBeneficiary { + return fmt.Errorf("beneficiary change already approved by current beneficiary") + } + fromAddr = mi.Beneficiary + } else { + if mi.PendingBeneficiaryTerm.ApprovedByNominee { + return fmt.Errorf("beneficiary change already approved by new beneficiary") + } + fromAddr = mi.PendingBeneficiaryTerm.NewBeneficiary + } + + fmt.Println("Confirming Pending Beneficiary Term of:") + fmt.Println("Beneficiary: ", mi.PendingBeneficiaryTerm.NewBeneficiary) + fmt.Println("Quota:", mi.PendingBeneficiaryTerm.NewQuota) + fmt.Println("Expiration Epoch:", mi.PendingBeneficiaryTerm.NewExpiration) + + if !cctx.Bool("really-do-it") { + fmt.Println("Pass --really-do-it to actually execute this action. Review what you're about to approve CAREFULLY please") + return nil + } + + params := &miner.ChangeBeneficiaryParams{ + NewBeneficiary: mi.PendingBeneficiaryTerm.NewBeneficiary, + NewQuota: mi.PendingBeneficiaryTerm.NewQuota, + NewExpiration: mi.PendingBeneficiaryTerm.NewExpiration, + } + + sp, err := actors.SerializeParams(params) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + From: fromAddr, + To: maddr, + Method: builtin.MethodsMiner.ChangeBeneficiary, + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Println("Confirm Message CID:", smsg.Cid()) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return xerrors.Errorf("waiting for message to be included in block: %w", err) + } + + // check it executed successfully + if wait.Receipt.ExitCode.IsError() { + return fmt.Errorf("confirm beneficiary change failed with code %d", wait.Receipt.ExitCode) + } + + updatedMinerInfo, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + if updatedMinerInfo.PendingBeneficiaryTerm == nil && updatedMinerInfo.Beneficiary == mi.PendingBeneficiaryTerm.NewBeneficiary { + fmt.Println("Beneficiary address successfully changed") + } else { + fmt.Println("Beneficiary address change awaiting additional confirmations") + } + + return nil + }, + } +} + +func ActorCompactAllocatedCmd(getActor ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "compact-allocated", + Usage: "compact allocated sectors bitfield", + Flags: []cli.Flag{ + &cli.Uint64Flag{ + Name: "mask-last-offset", + Usage: "Mask sector IDs from 0 to 'highest_allocated - offset'", + }, + &cli.Uint64Flag{ + Name: "mask-upto-n", + Usage: "Mask sector IDs from 0 to 'n'", + }, + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Bool("really-do-it") { + fmt.Println("Pass --really-do-it to actually execute this action") + return nil + } + + if !cctx.Args().Present() { + return xerrors.Errorf("must pass address of new owner address") + } + + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := getActor(cctx) + if err != nil { + return err + } + + mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api))) + + mst, err := lminer.Load(store, mact) + if err != nil { + return err + } + + allocs, err := mst.GetAllocatedSectors() + if err != nil { + return err + } + + var maskBf bitfield.BitField + + { + exclusiveFlags := []string{"mask-last-offset", "mask-upto-n"} + hasFlag := false + for _, f := range exclusiveFlags { + if hasFlag && cctx.IsSet(f) { + return xerrors.Errorf("more than one 'mask` flag set") + } + hasFlag = hasFlag || cctx.IsSet(f) + } + } + switch { + case cctx.IsSet("mask-last-offset"): + last, err := allocs.Last() + if err != nil { + return err + } + + m := cctx.Uint64("mask-last-offset") + if last <= m+1 { + return xerrors.Errorf("highest allocated sector lower than mask offset %d: %d", m+1, last) + } + // securty to not brick a miner + if last > 1<<60 { + return xerrors.Errorf("very high last sector number, refusing to mask: %d", last) + } + + maskBf, err = bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{ + Runs: []rlepluslazy.Run{{Val: true, Len: last - m}}}) + if err != nil { + return xerrors.Errorf("forming bitfield: %w", err) + } + case cctx.IsSet("mask-upto-n"): + n := cctx.Uint64("mask-upto-n") + maskBf, err = bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{ + Runs: []rlepluslazy.Run{{Val: true, Len: n}}}) + if err != nil { + return xerrors.Errorf("forming bitfield: %w", err) + } + default: + return xerrors.Errorf("no 'mask' flags set") + } + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + params := &miner.CompactSectorNumbersParams{ + MaskSectorNumbers: maskBf, + } + + sp, err := actors.SerializeParams(params) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := api.MpoolPushMessage(ctx, &types.Message{ + From: mi.Worker, + To: maddr, + Method: builtin.MethodsMiner.CompactSectorNumbers, + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + fmt.Println("CompactSectorNumbers Message CID:", smsg.Cid()) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode.IsError() { + fmt.Println("Sector Bitfield compaction failed") + return err + } + + return nil + }, + } +} + +func isController(mi api.MinerInfo, addr address.Address) bool { + if addr == mi.Owner || addr == mi.Worker { + return true + } + + for _, ca := range mi.ControlAddresses { + if addr == ca { + return true + } + } + + return false +} diff --git a/cli/spcli/info.go b/cli/spcli/info.go new file mode 100644 index 00000000000..69436b2c7d5 --- /dev/null +++ b/cli/spcli/info.go @@ -0,0 +1,121 @@ +package spcli + +import ( + "fmt" + + "github.com/fatih/color" + "github.com/multiformats/go-multiaddr" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + cliutil "github.com/filecoin-project/lotus/cli/util" +) + +func InfoCmd(getActorAddress ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "info", + Usage: "Print miner actor info", + Action: func(cctx *cli.Context) error { + api, closer, err := cliutil.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := cliutil.ReqContext(cctx) + + ts, err := lcli.LoadTipSet(ctx, cctx, api) + if err != nil { + return err + } + + addr, err := getActorAddress(cctx) + if err != nil { + return err + } + mi, err := api.StateMinerInfo(ctx, addr, ts.Key()) + if err != nil { + return err + } + + availableBalance, err := api.StateMinerAvailableBalance(ctx, addr, ts.Key()) + if err != nil { + return xerrors.Errorf("getting miner available balance: %w", err) + } + fmt.Printf("Available Balance: %s\n", types.FIL(availableBalance)) + fmt.Printf("Owner:\t%s\n", mi.Owner) + fmt.Printf("Worker:\t%s\n", mi.Worker) + for i, controlAddress := range mi.ControlAddresses { + fmt.Printf("Control %d: \t%s\n", i, controlAddress) + } + if mi.Beneficiary != address.Undef { + fmt.Printf("Beneficiary:\t%s\n", mi.Beneficiary) + if mi.Beneficiary != mi.Owner { + fmt.Printf("Beneficiary Quota:\t%s\n", mi.BeneficiaryTerm.Quota) + fmt.Printf("Beneficiary Used Quota:\t%s\n", mi.BeneficiaryTerm.UsedQuota) + fmt.Printf("Beneficiary Expiration:\t%s\n", mi.BeneficiaryTerm.Expiration) + } + } + if mi.PendingBeneficiaryTerm != nil { + fmt.Printf("Pending Beneficiary Term:\n") + fmt.Printf("New Beneficiary:\t%s\n", mi.PendingBeneficiaryTerm.NewBeneficiary) + fmt.Printf("New Quota:\t%s\n", mi.PendingBeneficiaryTerm.NewQuota) + fmt.Printf("New Expiration:\t%s\n", mi.PendingBeneficiaryTerm.NewExpiration) + fmt.Printf("Approved By Beneficiary:\t%t\n", mi.PendingBeneficiaryTerm.ApprovedByBeneficiary) + fmt.Printf("Approved By Nominee:\t%t\n", mi.PendingBeneficiaryTerm.ApprovedByNominee) + } + + fmt.Printf("PeerID:\t%s\n", mi.PeerId) + fmt.Printf("Multiaddrs:\t") + for _, addr := range mi.Multiaddrs { + a, err := multiaddr.NewMultiaddrBytes(addr) + if err != nil { + return xerrors.Errorf("undecodable listen address: %w", err) + } + fmt.Printf("%s ", a) + } + fmt.Println() + fmt.Printf("Consensus Fault End:\t%d\n", mi.ConsensusFaultElapsed) + + fmt.Printf("SectorSize:\t%s (%d)\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize))), mi.SectorSize) + pow, err := api.StateMinerPower(ctx, addr, ts.Key()) + if err != nil { + return err + } + + fmt.Printf("Byte Power: %s / %s (%0.4f%%)\n", + color.BlueString(types.SizeStr(pow.MinerPower.RawBytePower)), + types.SizeStr(pow.TotalPower.RawBytePower), + types.BigDivFloat( + types.BigMul(pow.MinerPower.RawBytePower, big.NewInt(100)), + pow.TotalPower.RawBytePower, + ), + ) + + fmt.Printf("Actual Power: %s / %s (%0.4f%%)\n", + color.GreenString(types.DeciStr(pow.MinerPower.QualityAdjPower)), + types.DeciStr(pow.TotalPower.QualityAdjPower), + types.BigDivFloat( + types.BigMul(pow.MinerPower.QualityAdjPower, big.NewInt(100)), + pow.TotalPower.QualityAdjPower, + ), + ) + + fmt.Println() + + cd, err := api.StateMinerProvingDeadline(ctx, addr, ts.Key()) + if err != nil { + return xerrors.Errorf("getting miner info: %w", err) + } + + fmt.Printf("Proving Period Start:\t%s\n", cliutil.EpochTime(cd.CurrentEpoch, cd.PeriodStart)) + + return nil + }, + } +} diff --git a/cli/spcli/proving.go b/cli/spcli/proving.go new file mode 100644 index 00000000000..ed4251f1b3c --- /dev/null +++ b/cli/spcli/proving.go @@ -0,0 +1,451 @@ +package spcli + +import ( + "bytes" + "fmt" + "os" + "strconv" + "strings" + "text/tabwriter" + "time" + + "github.com/fatih/color" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + cliutil "github.com/filecoin-project/lotus/cli/util" +) + +func ProvingInfoCmd(getActorAddress ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "info", + Usage: "View current state information", + Action: func(cctx *cli.Context) error { + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := getActorAddress(cctx) + if err != nil { + return err + } + + head, err := api.ChainHead(ctx) + if err != nil { + return xerrors.Errorf("getting chain head: %w", err) + } + + mact, err := api.StateGetActor(ctx, maddr, head.Key()) + if err != nil { + return err + } + + stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(api)) + + mas, err := miner.Load(stor, mact) + if err != nil { + return err + } + + cd, err := api.StateMinerProvingDeadline(ctx, maddr, head.Key()) + if err != nil { + return xerrors.Errorf("getting miner info: %w", err) + } + + fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr)) + + proving := uint64(0) + faults := uint64(0) + recovering := uint64(0) + curDeadlineSectors := uint64(0) + + if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error { + return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error { + if bf, err := part.LiveSectors(); err != nil { + return err + } else if count, err := bf.Count(); err != nil { + return err + } else { + proving += count + if dlIdx == cd.Index { + curDeadlineSectors += count + } + } + + if bf, err := part.FaultySectors(); err != nil { + return err + } else if count, err := bf.Count(); err != nil { + return err + } else { + faults += count + } + + if bf, err := part.RecoveringSectors(); err != nil { + return err + } else if count, err := bf.Count(); err != nil { + return err + } else { + recovering += count + } + + return nil + }) + }); err != nil { + return xerrors.Errorf("walking miner deadlines and partitions: %w", err) + } + + var faultPerc float64 + if proving > 0 { + faultPerc = float64(faults * 100 / proving) + } + + fmt.Printf("Current Epoch: %d\n", cd.CurrentEpoch) + + fmt.Printf("Proving Period Boundary: %d\n", cd.PeriodStart%cd.WPoStProvingPeriod) + fmt.Printf("Proving Period Start: %s\n", cliutil.EpochTimeTs(cd.CurrentEpoch, cd.PeriodStart, head)) + fmt.Printf("Next Period Start: %s\n\n", cliutil.EpochTimeTs(cd.CurrentEpoch, cd.PeriodStart+cd.WPoStProvingPeriod, head)) + + fmt.Printf("Faults: %d (%.2f%%)\n", faults, faultPerc) + fmt.Printf("Recovering: %d\n", recovering) + + fmt.Printf("Deadline Index: %d\n", cd.Index) + fmt.Printf("Deadline Sectors: %d\n", curDeadlineSectors) + fmt.Printf("Deadline Open: %s\n", cliutil.EpochTime(cd.CurrentEpoch, cd.Open)) + fmt.Printf("Deadline Close: %s\n", cliutil.EpochTime(cd.CurrentEpoch, cd.Close)) + fmt.Printf("Deadline Challenge: %s\n", cliutil.EpochTime(cd.CurrentEpoch, cd.Challenge)) + fmt.Printf("Deadline FaultCutoff: %s\n", cliutil.EpochTime(cd.CurrentEpoch, cd.FaultCutoff)) + return nil + }, + } +} + +func ProvingDeadlinesCmd(getActorAddress ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "deadlines", + Usage: "View the current proving period deadlines information", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "all", + Usage: "Count all sectors (only live sectors are counted by default)", + Aliases: []string{"a"}, + }, + }, + Action: func(cctx *cli.Context) error { + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := getActorAddress(cctx) + if err != nil { + return err + } + + deadlines, err := api.StateMinerDeadlines(ctx, maddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting deadlines: %w", err) + } + + di, err := api.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting deadlines: %w", err) + } + + head, err := api.ChainHead(ctx) + if err != nil { + return err + } + + fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr)) + + tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) + _, _ = fmt.Fprintln(tw, "deadline\topen\tpartitions\tsectors (faults)\tproven partitions") + + for dlIdx, deadline := range deadlines { + partitions, err := api.StateMinerPartitions(ctx, maddr, uint64(dlIdx), types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting partitions for deadline %d: %w", dlIdx, err) + } + + provenPartitions, err := deadline.PostSubmissions.Count() + if err != nil { + return err + } + + sectors := uint64(0) + faults := uint64(0) + var partitionCount int + + for _, partition := range partitions { + if !cctx.Bool("all") { + sc, err := partition.LiveSectors.Count() + if err != nil { + return err + } + + if sc > 0 { + partitionCount++ + } + + sectors += sc + } else { + sc, err := partition.AllSectors.Count() + if err != nil { + return err + } + + partitionCount++ + sectors += sc + } + + fc, err := partition.FaultySectors.Count() + if err != nil { + return err + } + + faults += fc + } + + var cur string + if di.Index == uint64(dlIdx) { + cur += "\t(current)" + } + + _, _ = fmt.Fprintf(tw, "%d\t%s\t%d\t%d (%d)\t%d%s\n", dlIdx, deadlineOpenTime(head, uint64(dlIdx), di), + partitionCount, sectors, faults, provenPartitions, cur) + } + + return tw.Flush() + }, + } +} + +func deadlineOpenTime(ts *types.TipSet, dlIdx uint64, di *dline.Info) string { + gapIdx := dlIdx - di.Index + gapHeight := uint64(di.WPoStProvingPeriod) / di.WPoStPeriodDeadlines * gapIdx + + openHeight := di.Open + abi.ChainEpoch(gapHeight) + genesisBlockTimestamp := ts.MinTimestamp() - uint64(ts.Height())*build.BlockDelaySecs + + return time.Unix(int64(genesisBlockTimestamp+build.BlockDelaySecs*uint64(openHeight)), 0).Format(time.TimeOnly) +} + +func ProvingDeadlineInfoCmd(getActorAddress ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "deadline", + Usage: "View the current proving period deadline information by its index", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "sector-nums", + Aliases: []string{"n"}, + Usage: "Print sector/fault numbers belonging to this deadline", + }, + &cli.BoolFlag{ + Name: "bitfield", + Aliases: []string{"b"}, + Usage: "Print partition bitfield stats", + }, + }, + ArgsUsage: "", + Action: func(cctx *cli.Context) error { + + if cctx.NArg() != 1 { + return lcli.IncorrectNumArgs(cctx) + } + + dlIdx, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64) + if err != nil { + return xerrors.Errorf("could not parse deadline index: %w", err) + } + + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := getActorAddress(cctx) + if err != nil { + return err + } + + deadlines, err := api.StateMinerDeadlines(ctx, maddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting deadlines: %w", err) + } + + di, err := api.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting deadlines: %w", err) + } + + partitions, err := api.StateMinerPartitions(ctx, maddr, dlIdx, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting partitions for deadline %d: %w", dlIdx, err) + } + + head, err := api.ChainHead(ctx) + if err != nil { + return err + } + + provenPartitions, err := deadlines[dlIdx].PostSubmissions.Count() + if err != nil { + return err + } + + fmt.Printf("Deadline Index: %d\n", dlIdx) + fmt.Printf("Deadline Open: %s\n", deadlineOpenTime(head, dlIdx, di)) + fmt.Printf("Partitions: %d\n", len(partitions)) + fmt.Printf("Proven Partitions: %d\n", provenPartitions) + fmt.Printf("Current: %t\n\n", di.Index == dlIdx) + + for pIdx, partition := range partitions { + fmt.Printf("Partition Index: %d\n", pIdx) + + printStats := func(bf bitfield.BitField, name string) error { + count, err := bf.Count() + if err != nil { + return err + } + + rit, err := bf.RunIterator() + if err != nil { + return err + } + + if cctx.Bool("bitfield") { + var ones, zeros, oneRuns, zeroRuns, invalid uint64 + for rit.HasNext() { + r, err := rit.NextRun() + if err != nil { + return xerrors.Errorf("next run: %w", err) + } + if !r.Valid() { + invalid++ + } + if r.Val { + ones += r.Len + oneRuns++ + } else { + zeros += r.Len + zeroRuns++ + } + } + + var buf bytes.Buffer + if err := bf.MarshalCBOR(&buf); err != nil { + return err + } + sz := len(buf.Bytes()) + szstr := types.SizeStr(types.NewInt(uint64(sz))) + + fmt.Printf("\t%s Sectors:%s%d (bitfield - runs %d+%d=%d - %d 0s %d 1s - %d inv - %s %dB)\n", name, strings.Repeat(" ", 18-len(name)), count, zeroRuns, oneRuns, zeroRuns+oneRuns, zeros, ones, invalid, szstr, sz) + } else { + fmt.Printf("\t%s Sectors:%s%d\n", name, strings.Repeat(" ", 18-len(name)), count) + } + + if cctx.Bool("sector-nums") { + nums, err := bf.All(count) + if err != nil { + return err + } + fmt.Printf("\t%s Sector Numbers:%s%v\n", name, strings.Repeat(" ", 12-len(name)), nums) + } + + return nil + } + + if err := printStats(partition.AllSectors, "All"); err != nil { + return err + } + if err := printStats(partition.LiveSectors, "Live"); err != nil { + return err + } + if err := printStats(partition.ActiveSectors, "Active"); err != nil { + return err + } + if err := printStats(partition.FaultySectors, "Faulty"); err != nil { + return err + } + if err := printStats(partition.RecoveringSectors, "Recovering"); err != nil { + return err + } + } + return nil + }, + } +} + +func ProvingFaultsCmd(getActorAddress ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "faults", + Usage: "View the currently known proving faulty sectors information", + Action: func(cctx *cli.Context) error { + api, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(api)) + + maddr, err := getActorAddress(cctx) + if err != nil { + return err + } + + mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + mas, err := miner.Load(stor, mact) + if err != nil { + return err + } + + fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr)) + + tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) + _, _ = fmt.Fprintln(tw, "deadline\tpartition\tsectors") + err = mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error { + return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error { + faults, err := part.FaultySectors() + if err != nil { + return err + } + return faults.ForEach(func(num uint64) error { + _, _ = fmt.Fprintf(tw, "%d\t%d\t%d\n", dlIdx, partIdx, num) + return nil + }) + }) + }) + if err != nil { + return err + } + return tw.Flush() + }, + } +} diff --git a/cli/spcli/sectors.go b/cli/spcli/sectors.go new file mode 100644 index 00000000000..5528c6438a2 --- /dev/null +++ b/cli/spcli/sectors.go @@ -0,0 +1,1398 @@ +package spcli + +import ( + "bufio" + "encoding/csv" + "encoding/json" + "errors" + "fmt" + "math" + "os" + "sort" + "strconv" + "strings" + "time" + + "github.com/fatih/color" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + cliutil "github.com/filecoin-project/lotus/cli/util" + "github.com/filecoin-project/lotus/lib/tablewriter" +) + +type OnDiskInfoGetter func(cctx *cli.Context, id abi.SectorNumber, onChainInfo bool) (api.SectorInfo, error) + +func SectorsStatusCmd(getActorAddress ActorAddressGetter, getOnDiskInfo OnDiskInfoGetter) *cli.Command { + return &cli.Command{ + Name: "status", + Usage: "Get the seal status of a sector by its number", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "log", + Usage: "display event log", + Aliases: []string{"l"}, + }, + &cli.BoolFlag{ + Name: "on-chain-info", + Usage: "show sector on chain info", + Aliases: []string{"c"}, + }, + &cli.BoolFlag{ + Name: "partition-info", + Usage: "show partition related info", + Aliases: []string{"p"}, + }, + &cli.BoolFlag{ + Name: "proof", + Usage: "print snark proof bytes as hex", + }, + }, + Action: func(cctx *cli.Context) error { + ctx := lcli.ReqContext(cctx) + + if cctx.NArg() != 1 { + return lcli.IncorrectNumArgs(cctx) + } + + id, err := strconv.ParseUint(cctx.Args().First(), 10, 64) + if err != nil { + return err + } + + onChainInfo := cctx.Bool("on-chain-info") + + var status api.SectorInfo + if getOnDiskInfo != nil { + status, err = getOnDiskInfo(cctx, abi.SectorNumber(id), onChainInfo) + if err != nil { + return err + } + fmt.Printf("SectorID:\t%d\n", status.SectorID) + fmt.Printf("Status:\t\t%s\n", status.State) + fmt.Printf("CIDcommD:\t%s\n", status.CommD) + fmt.Printf("CIDcommR:\t%s\n", status.CommR) + fmt.Printf("Ticket:\t\t%x\n", status.Ticket.Value) + fmt.Printf("TicketH:\t%d\n", status.Ticket.Epoch) + fmt.Printf("Seed:\t\t%x\n", status.Seed.Value) + fmt.Printf("SeedH:\t\t%d\n", status.Seed.Epoch) + fmt.Printf("Precommit:\t%s\n", status.PreCommitMsg) + fmt.Printf("Commit:\t\t%s\n", status.CommitMsg) + if cctx.Bool("proof") { + fmt.Printf("Proof:\t\t%x\n", status.Proof) + } + fmt.Printf("Deals:\t\t%v\n", status.Deals) + fmt.Printf("Retries:\t%d\n", status.Retries) + if status.LastErr != "" { + fmt.Printf("Last Error:\t\t%s\n", status.LastErr) + } + + fmt.Printf("\nExpiration Info\n") + fmt.Printf("OnTime:\t\t%v\n", status.OnTime) + fmt.Printf("Early:\t\t%v\n", status.Early) + + } else { + onChainInfo = true + } + + maddr, err := getActorAddress(cctx) + if err != nil { + return err + } + + if onChainInfo { + fullApi, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + head, err := fullApi.ChainHead(ctx) + if err != nil { + return xerrors.Errorf("getting chain head: %w", err) + } + + status, err := fullApi.StateSectorGetInfo(ctx, maddr, abi.SectorNumber(id), head.Key()) + if err != nil { + return err + } + + mid, err := address.IDFromAddress(maddr) + if err != nil { + return err + } + fmt.Printf("\nSector On Chain Info\n") + fmt.Printf("SealProof:\t\t%x\n", status.SealProof) + fmt.Printf("Activation:\t\t%v\n", status.Activation) + fmt.Printf("Expiration:\t\t%v\n", status.Expiration) + fmt.Printf("DealWeight:\t\t%v\n", status.DealWeight) + fmt.Printf("VerifiedDealWeight:\t\t%v\n", status.VerifiedDealWeight) + fmt.Printf("InitialPledge:\t\t%v\n", types.FIL(status.InitialPledge)) + fmt.Printf("SectorID:\t\t{Miner: %v, Number: %v}\n", abi.ActorID(mid), status.SectorNumber) + } + + if cctx.Bool("partition-info") { + fullApi, nCloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer nCloser() + + maddr, err := getActorAddress(cctx) + if err != nil { + return err + } + + mact, err := fullApi.StateGetActor(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(fullApi), blockstore.NewMemory()) + mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact) + if err != nil { + return err + } + + errFound := errors.New("found") + if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error { + return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error { + pas, err := part.AllSectors() + if err != nil { + return err + } + + set, err := pas.IsSet(id) + if err != nil { + return err + } + if set { + fmt.Printf("\nDeadline:\t%d\n", dlIdx) + fmt.Printf("Partition:\t%d\n", partIdx) + + checkIn := func(name string, bg func() (bitfield.BitField, error)) error { + bf, err := bg() + if err != nil { + return err + } + + set, err := bf.IsSet(id) + if err != nil { + return err + } + setstr := "no" + if set { + setstr = "yes" + } + fmt.Printf("%s: \t%s\n", name, setstr) + return nil + } + + if err := checkIn("Unproven", part.UnprovenSectors); err != nil { + return err + } + if err := checkIn("Live", part.LiveSectors); err != nil { + return err + } + if err := checkIn("Active", part.ActiveSectors); err != nil { + return err + } + if err := checkIn("Faulty", part.FaultySectors); err != nil { + return err + } + if err := checkIn("Recovering", part.RecoveringSectors); err != nil { + return err + } + + return errFound + } + + return nil + }) + }); err != errFound { + if err != nil { + return err + } + + fmt.Println("\nNot found in any partition") + } + } + + if cctx.Bool("log") { + fmt.Printf("--------\nEvent Log:\n") + + for i, l := range status.Log { + fmt.Printf("%d.\t%s:\t[%s]\t%s\n", i, time.Unix(int64(l.Timestamp), 0), l.Kind, l.Message) + if l.Trace != "" { + fmt.Printf("\t%s\n", l.Trace) + } + } + } + return nil + }, + } +} + +func SectorsListUpgradeBoundsCmd(getActorAddress ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "upgrade-bounds", + Usage: "Output upgrade bounds for available sectors", + Flags: []cli.Flag{ + &cli.IntFlag{ + Name: "buckets", + Value: 25, + }, + &cli.BoolFlag{ + Name: "csv", + Usage: "output machine-readable values", + }, + &cli.BoolFlag{ + Name: "deal-terms", + Usage: "bucket by how many deal-sectors can start at a given expiration", + }, + }, + Action: func(cctx *cli.Context) error { + fullApi, closer2, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer2() + + ctx := lcli.ReqContext(cctx) + + head, err := fullApi.ChainHead(ctx) + if err != nil { + return xerrors.Errorf("getting chain head: %w", err) + } + + maddr, err := getActorAddress(cctx) + if err != nil { + return err + } + + list, err := fullApi.StateMinerActiveSectors(ctx, maddr, head.Key()) + if err != nil { + return err + } + filter := bitfield.New() + + for _, s := range list { + filter.Set(uint64(s.SectorNumber)) + } + sset, err := fullApi.StateMinerSectors(ctx, maddr, &filter, head.Key()) + if err != nil { + return err + } + + if len(sset) == 0 { + return nil + } + + var minExpiration, maxExpiration abi.ChainEpoch + + for _, s := range sset { + if s.Expiration < minExpiration || minExpiration == 0 { + minExpiration = s.Expiration + } + if s.Expiration > maxExpiration { + maxExpiration = s.Expiration + } + } + + buckets := cctx.Int("buckets") + bucketSize := (maxExpiration - minExpiration) / abi.ChainEpoch(buckets) + bucketCounts := make([]int, buckets+1) + + for b := range bucketCounts { + bucketMin := minExpiration + abi.ChainEpoch(b)*bucketSize + bucketMax := minExpiration + abi.ChainEpoch(b+1)*bucketSize + + if cctx.Bool("deal-terms") { + bucketMax = bucketMax + policy.MarketDefaultAllocationTermBuffer + } + + for _, s := range sset { + isInBucket := s.Expiration >= bucketMin && s.Expiration < bucketMax + + if isInBucket { + bucketCounts[b]++ + } + } + + } + + // Creating CSV writer + writer := csv.NewWriter(os.Stdout) + + // Writing CSV headers + err = writer.Write([]string{"Max Expiration in Bucket", "Sector Count"}) + if err != nil { + return xerrors.Errorf("writing csv headers: %w", err) + } + + // Writing bucket details + + if cctx.Bool("csv") { + for i := 0; i < buckets; i++ { + maxExp := minExpiration + abi.ChainEpoch(i+1)*bucketSize + + timeStr := strconv.FormatInt(int64(maxExp), 10) + + err = writer.Write([]string{ + timeStr, + strconv.Itoa(bucketCounts[i]), + }) + if err != nil { + return xerrors.Errorf("writing csv row: %w", err) + } + } + + // Flush to make sure all data is written to the underlying writer + writer.Flush() + + if err := writer.Error(); err != nil { + return xerrors.Errorf("flushing csv writer: %w", err) + } + + return nil + } + + tw := tablewriter.New( + tablewriter.Col("Bucket Expiration"), + tablewriter.Col("Sector Count"), + tablewriter.Col("Bar"), + ) + + var barCols = 40 + var maxCount int + + for _, c := range bucketCounts { + if c > maxCount { + maxCount = c + } + } + + for i := 0; i < buckets; i++ { + maxExp := minExpiration + abi.ChainEpoch(i+1)*bucketSize + timeStr := cliutil.EpochTime(head.Height(), maxExp) + + tw.Write(map[string]interface{}{ + "Bucket Expiration": timeStr, + "Sector Count": color.YellowString("%d", bucketCounts[i]), + "Bar": "[" + color.GreenString(strings.Repeat("|", bucketCounts[i]*barCols/maxCount)) + strings.Repeat(" ", barCols-bucketCounts[i]*barCols/maxCount) + "]", + }) + } + + return tw.Flush(os.Stdout) + }, + } +} + +func SectorPreCommitsCmd(getActorAddress ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "precommits", + Usage: "Print on-chain precommit info", + Action: func(cctx *cli.Context) error { + ctx := lcli.ReqContext(cctx) + mapi, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + maddr, err := getActorAddress(cctx) + if err != nil { + return err + } + mact, err := mapi.StateGetActor(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(mapi))) + mst, err := miner.Load(store, mact) + if err != nil { + return err + } + preCommitSector := make([]miner.SectorPreCommitOnChainInfo, 0) + err = mst.ForEachPrecommittedSector(func(info miner.SectorPreCommitOnChainInfo) error { + preCommitSector = append(preCommitSector, info) + return err + }) + less := func(i, j int) bool { + return preCommitSector[i].Info.SectorNumber <= preCommitSector[j].Info.SectorNumber + } + sort.Slice(preCommitSector, less) + for _, info := range preCommitSector { + fmt.Printf("%s: %s\n", info.Info.SectorNumber, info.PreCommitEpoch) + } + + return nil + }, + } +} + +func SectorsCheckExpireCmd(getActorAddress ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "check-expire", + Usage: "Inspect expiring sectors", + Flags: []cli.Flag{ + &cli.Int64Flag{ + Name: "cutoff", + Usage: "skip sectors whose current expiration is more than epochs from now, defaults to 60 days", + Value: 172800, + }, + }, + Action: func(cctx *cli.Context) error { + + fullApi, nCloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer nCloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := getActorAddress(cctx) + if err != nil { + return err + } + + head, err := fullApi.ChainHead(ctx) + if err != nil { + return err + } + currEpoch := head.Height() + + nv, err := fullApi.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return err + } + + sectors, err := fullApi.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + n := 0 + for _, s := range sectors { + if s.Expiration-currEpoch <= abi.ChainEpoch(cctx.Int64("cutoff")) { + sectors[n] = s + n++ + } + } + sectors = sectors[:n] + + sort.Slice(sectors, func(i, j int) bool { + if sectors[i].Expiration == sectors[j].Expiration { + return sectors[i].SectorNumber < sectors[j].SectorNumber + } + return sectors[i].Expiration < sectors[j].Expiration + }) + + tw := tablewriter.New( + tablewriter.Col("ID"), + tablewriter.Col("SealProof"), + tablewriter.Col("InitialPledge"), + tablewriter.Col("Activation"), + tablewriter.Col("Expiration"), + tablewriter.Col("MaxExpiration"), + tablewriter.Col("MaxExtendNow")) + + for _, sector := range sectors { + MaxExpiration := sector.Activation + policy.GetSectorMaxLifetime(sector.SealProof, nv) + maxExtension, err := policy.GetMaxSectorExpirationExtension(nv) + if err != nil { + return xerrors.Errorf("failed to get max extension: %w", err) + } + + MaxExtendNow := currEpoch + maxExtension + + if MaxExtendNow > MaxExpiration { + MaxExtendNow = MaxExpiration + } + + tw.Write(map[string]interface{}{ + "ID": sector.SectorNumber, + "SealProof": sector.SealProof, + "InitialPledge": types.FIL(sector.InitialPledge).Short(), + "Activation": cliutil.EpochTime(currEpoch, sector.Activation), + "Expiration": cliutil.EpochTime(currEpoch, sector.Expiration), + "MaxExpiration": cliutil.EpochTime(currEpoch, MaxExpiration), + "MaxExtendNow": cliutil.EpochTime(currEpoch, MaxExtendNow), + }) + } + + return tw.Flush(os.Stdout) + }, + } +} + +func SectorsExtendCmd(getActorAddress ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "extend", + Usage: "Extend expiring sectors while not exceeding each sector's max life", + ArgsUsage: "", + Flags: []cli.Flag{ + &cli.Int64Flag{ + Name: "from", + Usage: "only consider sectors whose current expiration epoch is in the range of [from, to], defaults to: now + 120 (1 hour)", + }, + &cli.Int64Flag{ + Name: "to", + Usage: "only consider sectors whose current expiration epoch is in the range of [from, to], defaults to: now + 92160 (32 days)", + }, + &cli.StringFlag{ + Name: "sector-file", + Usage: "provide a file containing one sector number in each line, ignoring above selecting criteria", + }, + &cli.StringFlag{ + Name: "exclude", + Usage: "optionally provide a file containing excluding sectors", + }, + &cli.Int64Flag{ + Name: "extension", + Usage: "try to extend selected sectors by this number of epochs, defaults to 540 days", + Value: 1555200, + }, + &cli.Int64Flag{ + Name: "new-expiration", + Usage: "try to extend selected sectors to this epoch, ignoring extension", + }, + &cli.BoolFlag{ + Name: "only-cc", + Usage: "only extend CC sectors (useful for making sector ready for snap upgrade)", + }, + &cli.BoolFlag{ + Name: "drop-claims", + Usage: "drop claims for sectors that can be extended, but only by dropping some of their verified power claims", + }, + &cli.Int64Flag{ + Name: "tolerance", + Usage: "don't try to extend sectors by fewer than this number of epochs, defaults to 7 days", + Value: 20160, + }, + &cli.StringFlag{ + Name: "max-fee", + Usage: "use up to this amount of FIL for one message. pass this flag to avoid message congestion.", + Value: "0", + }, + &cli.Int64Flag{ + Name: "max-sectors", + Usage: "the maximum number of sectors contained in each message", + }, + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "pass this flag to really extend sectors, otherwise will only print out json representation of parameters", + }, + }, + Action: func(cctx *cli.Context) error { + mf, err := types.ParseFIL(cctx.String("max-fee")) + if err != nil { + return err + } + + spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(mf)} + + fullApi, nCloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer nCloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := getActorAddress(cctx) + if err != nil { + return err + } + + head, err := fullApi.ChainHead(ctx) + if err != nil { + return err + } + currEpoch := head.Height() + + nv, err := fullApi.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return err + } + + activeSet, err := fullApi.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + activeSectorsInfo := make(map[abi.SectorNumber]*miner.SectorOnChainInfo, len(activeSet)) + for _, info := range activeSet { + activeSectorsInfo[info.SectorNumber] = info + } + + mact, err := fullApi.StateGetActor(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(fullApi), blockstore.NewMemory()) + adtStore := adt.WrapStore(ctx, cbor.NewCborStore(tbs)) + mas, err := miner.Load(adtStore, mact) + if err != nil { + return err + } + + activeSectorsLocation := make(map[abi.SectorNumber]*miner.SectorLocation, len(activeSet)) + + if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error { + return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error { + pas, err := part.ActiveSectors() + if err != nil { + return err + } + + return pas.ForEach(func(i uint64) error { + activeSectorsLocation[abi.SectorNumber(i)] = &miner.SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + } + return nil + }) + }) + }); err != nil { + return err + } + + excludeSet := make(map[abi.SectorNumber]struct{}) + if cctx.IsSet("exclude") { + excludeSectors, err := getSectorsFromFile(cctx.String("exclude")) + if err != nil { + return err + } + + for _, id := range excludeSectors { + excludeSet[id] = struct{}{} + } + } + + var sectors []abi.SectorNumber + if cctx.Args().Present() { + if cctx.IsSet("sector-file") { + return xerrors.Errorf("sector-file specified along with command line params") + } + + for i, s := range cctx.Args().Slice() { + id, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return xerrors.Errorf("could not parse sector %d: %w", i, err) + } + + sectors = append(sectors, abi.SectorNumber(id)) + } + } else if cctx.IsSet("sector-file") { + sectors, err = getSectorsFromFile(cctx.String("sector-file")) + if err != nil { + return err + } + } else { + from := currEpoch + 120 + to := currEpoch + 92160 + + if cctx.IsSet("from") { + from = abi.ChainEpoch(cctx.Int64("from")) + } + + if cctx.IsSet("to") { + to = abi.ChainEpoch(cctx.Int64("to")) + } + + for _, si := range activeSet { + if si.Expiration >= from && si.Expiration <= to { + sectors = append(sectors, si.SectorNumber) + } + } + } + + var sis []*miner.SectorOnChainInfo + for _, id := range sectors { + if _, exclude := excludeSet[id]; exclude { + continue + } + + si, found := activeSectorsInfo[id] + if !found { + return xerrors.Errorf("sector %d is not active", id) + } + if len(si.DealIDs) > 0 && cctx.Bool("only-cc") { + continue + } + + sis = append(sis, si) + } + + withinTolerance := func(a, b abi.ChainEpoch) bool { + diff := a - b + if diff < 0 { + diff = -diff + } + + return diff <= abi.ChainEpoch(cctx.Int64("tolerance")) + } + + extensions := map[miner.SectorLocation]map[abi.ChainEpoch][]abi.SectorNumber{} + for _, si := range sis { + extension := abi.ChainEpoch(cctx.Int64("extension")) + newExp := si.Expiration + extension + + if cctx.IsSet("new-expiration") { + newExp = abi.ChainEpoch(cctx.Int64("new-expiration")) + } + + maxExtension, err := policy.GetMaxSectorExpirationExtension(nv) + if err != nil { + return xerrors.Errorf("failed to get max extension: %w", err) + } + + maxExtendNow := currEpoch + maxExtension + if newExp > maxExtendNow { + newExp = maxExtendNow + } + + maxExp := si.Activation + policy.GetSectorMaxLifetime(si.SealProof, nv) + if newExp > maxExp { + newExp = maxExp + } + + if newExp <= si.Expiration || withinTolerance(newExp, si.Expiration) { + continue + } + + l, found := activeSectorsLocation[si.SectorNumber] + if !found { + return xerrors.Errorf("location for sector %d not found", si.SectorNumber) + } + + es, found := extensions[*l] + if !found { + ne := make(map[abi.ChainEpoch][]abi.SectorNumber) + ne[newExp] = []abi.SectorNumber{si.SectorNumber} + extensions[*l] = ne + } else { + added := false + for exp := range es { + if withinTolerance(newExp, exp) { + es[exp] = append(es[exp], si.SectorNumber) + added = true + break + } + } + + if !added { + es[newExp] = []abi.SectorNumber{si.SectorNumber} + } + } + } + + verifregAct, err := fullApi.StateGetActor(ctx, builtin.VerifiedRegistryActorAddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("failed to lookup verifreg actor: %w", err) + } + + verifregSt, err := verifreg.Load(adtStore, verifregAct) + if err != nil { + return xerrors.Errorf("failed to load verifreg state: %w", err) + } + + claimsMap, err := verifregSt.GetClaims(maddr) + if err != nil { + return xerrors.Errorf("failed to lookup claims for miner: %w", err) + } + + claimIdsBySector, err := verifregSt.GetClaimIdsBySector(maddr) + if err != nil { + return xerrors.Errorf("failed to lookup claim IDs by sector: %w", err) + } + + sectorsMax, err := policy.GetAddressedSectorsMax(nv) + if err != nil { + return err + } + + declMax, err := policy.GetDeclarationsMax(nv) + if err != nil { + return err + } + + addrSectors := sectorsMax + if cctx.Int("max-sectors") != 0 { + addrSectors = cctx.Int("max-sectors") + if addrSectors > sectorsMax { + return xerrors.Errorf("the specified max-sectors exceeds the maximum limit") + } + } + + var params []miner.ExtendSectorExpiration2Params + + p := miner.ExtendSectorExpiration2Params{} + scount := 0 + + for l, exts := range extensions { + for newExp, numbers := range exts { + sectorsWithoutClaimsToExtend := bitfield.New() + var sectorsWithClaims []miner.SectorClaim + for _, sectorNumber := range numbers { + claimIdsToMaintain := make([]verifreg.ClaimId, 0) + claimIdsToDrop := make([]verifreg.ClaimId, 0) + cannotExtendSector := false + claimIds, ok := claimIdsBySector[sectorNumber] + // Nothing to check, add to ccSectors + if !ok { + sectorsWithoutClaimsToExtend.Set(uint64(sectorNumber)) + } else { + for _, claimId := range claimIds { + claim, ok := claimsMap[claimId] + if !ok { + return xerrors.Errorf("failed to find claim for claimId %d", claimId) + } + claimExpiration := claim.TermStart + claim.TermMax + // can be maintained in the extended sector + if claimExpiration > newExp { + claimIdsToMaintain = append(claimIdsToMaintain, claimId) + } else { + sectorInfo, ok := activeSectorsInfo[sectorNumber] + if !ok { + return xerrors.Errorf("failed to find sector in active sector set: %w", err) + } + if !cctx.Bool("drop-claims") || + // FIP-0045 requires the claim minimum duration to have passed + currEpoch <= (claim.TermStart+claim.TermMin) || + // FIP-0045 requires the sector to be in its last 30 days of life + (currEpoch <= sectorInfo.Expiration-builtin.EndOfLifeClaimDropPeriod) { + fmt.Printf("skipping sector %d because claim %d does not live long enough \n", sectorNumber, claimId) + cannotExtendSector = true + break + } + + claimIdsToDrop = append(claimIdsToDrop, claimId) + } + } + if cannotExtendSector { + continue + } + + if len(claimIdsToMaintain)+len(claimIdsToDrop) != 0 { + sectorsWithClaims = append(sectorsWithClaims, miner.SectorClaim{ + SectorNumber: sectorNumber, + MaintainClaims: claimIdsToMaintain, + DropClaims: claimIdsToDrop, + }) + } + } + } + + sectorsWithoutClaimsCount, err := sectorsWithoutClaimsToExtend.Count() + if err != nil { + return xerrors.Errorf("failed to count cc sectors: %w", err) + } + + sectorsInDecl := int(sectorsWithoutClaimsCount) + len(sectorsWithClaims) + scount += sectorsInDecl + + if scount > addrSectors || len(p.Extensions) >= declMax { + params = append(params, p) + p = miner.ExtendSectorExpiration2Params{} + scount = sectorsInDecl + } + + p.Extensions = append(p.Extensions, miner.ExpirationExtension2{ + Deadline: l.Deadline, + Partition: l.Partition, + Sectors: SectorNumsToBitfield(numbers), + SectorsWithClaims: sectorsWithClaims, + NewExpiration: newExp, + }) + + } + } + + // if we have any sectors, then one last append is needed here + if scount != 0 { + params = append(params, p) + } + + if len(params) == 0 { + fmt.Println("nothing to extend") + return nil + } + + mi, err := fullApi.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting miner info: %w", err) + } + + stotal := 0 + + for i := range params { + scount := 0 + for _, ext := range params[i].Extensions { + count, err := ext.Sectors.Count() + if err != nil { + return err + } + scount += int(count) + } + fmt.Printf("Extending %d sectors: ", scount) + stotal += scount + + if !cctx.Bool("really-do-it") { + pp, err := NewPseudoExtendParams(¶ms[i]) + if err != nil { + return err + } + + data, err := json.MarshalIndent(pp, "", " ") + if err != nil { + return err + } + + fmt.Println("\n", string(data)) + continue + } + + sp, aerr := actors.SerializeParams(¶ms[i]) + if aerr != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + smsg, err := fullApi.MpoolPushMessage(ctx, &types.Message{ + From: mi.Worker, + To: maddr, + Method: builtin.MethodsMiner.ExtendSectorExpiration2, + Value: big.Zero(), + Params: sp, + }, spec) + if err != nil { + return xerrors.Errorf("mpool push message: %w", err) + } + + fmt.Println(smsg.Cid()) + } + + fmt.Printf("%d sectors extended\n", stotal) + + return nil + }, + } +} + +func SectorNumsToBitfield(sectors []abi.SectorNumber) bitfield.BitField { + var numbers []uint64 + for _, sector := range sectors { + numbers = append(numbers, uint64(sector)) + } + + return bitfield.NewFromSet(numbers) +} + +func getSectorsFromFile(filePath string) ([]abi.SectorNumber, error) { + file, err := os.Open(filePath) + if err != nil { + return nil, err + } + + scanner := bufio.NewScanner(file) + sectors := make([]abi.SectorNumber, 0) + + for scanner.Scan() { + line := scanner.Text() + + id, err := strconv.ParseUint(line, 10, 64) + if err != nil { + return nil, xerrors.Errorf("could not parse %s as sector id: %s", line, err) + } + + sectors = append(sectors, abi.SectorNumber(id)) + } + + if err = file.Close(); err != nil { + return nil, err + } + + return sectors, nil +} + +func NewPseudoExtendParams(p *miner.ExtendSectorExpiration2Params) (*PseudoExtendSectorExpirationParams, error) { + res := PseudoExtendSectorExpirationParams{} + for _, ext := range p.Extensions { + scount, err := ext.Sectors.Count() + if err != nil { + return nil, err + } + + sectors, err := ext.Sectors.All(scount) + if err != nil { + return nil, err + } + + res.Extensions = append(res.Extensions, PseudoExpirationExtension{ + Deadline: ext.Deadline, + Partition: ext.Partition, + Sectors: ArrayToString(sectors), + NewExpiration: ext.NewExpiration, + }) + } + return &res, nil +} + +type PseudoExtendSectorExpirationParams struct { + Extensions []PseudoExpirationExtension +} + +type PseudoExpirationExtension struct { + Deadline uint64 + Partition uint64 + Sectors string + NewExpiration abi.ChainEpoch +} + +// ArrayToString Example: {1,3,4,5,8,9} -> "1,3-5,8-9" +func ArrayToString(array []uint64) string { + sort.Slice(array, func(i, j int) bool { + return array[i] < array[j] + }) + + var sarray []string + s := "" + + for i, elm := range array { + if i == 0 { + s = strconv.FormatUint(elm, 10) + continue + } + if elm == array[i-1] { + continue // filter out duplicates + } else if elm == array[i-1]+1 { + s = strings.Split(s, "-")[0] + "-" + strconv.FormatUint(elm, 10) + } else { + sarray = append(sarray, s) + s = strconv.FormatUint(elm, 10) + } + } + + if s != "" { + sarray = append(sarray, s) + } + + return strings.Join(sarray, ",") +} + +func SectorsCompactPartitionsCmd(getActorAddress ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "compact-partitions", + Usage: "removes dead sectors from partitions and reduces the number of partitions used if possible", + Flags: []cli.Flag{ + &cli.Uint64Flag{ + Name: "deadline", + Usage: "the deadline to compact the partitions in", + Required: true, + }, + &cli.Int64SliceFlag{ + Name: "partitions", + Usage: "list of partitions to compact sectors in", + Required: true, + }, + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + fullNodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := getActorAddress(cctx) + if err != nil { + return err + } + + minfo, err := fullNodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + deadline := cctx.Uint64("deadline") + if deadline > miner.WPoStPeriodDeadlines { + return fmt.Errorf("deadline %d out of range", deadline) + } + + parts := cctx.Int64Slice("partitions") + if len(parts) <= 0 { + return fmt.Errorf("must include at least one partition to compact") + } + fmt.Printf("compacting %d partitions\n", len(parts)) + + var makeMsgForPartitions func(partitionsBf bitfield.BitField) ([]*types.Message, error) + makeMsgForPartitions = func(partitionsBf bitfield.BitField) ([]*types.Message, error) { + params := miner.CompactPartitionsParams{ + Deadline: deadline, + Partitions: partitionsBf, + } + + sp, aerr := actors.SerializeParams(¶ms) + if aerr != nil { + return nil, xerrors.Errorf("serializing params: %w", err) + } + + msg := &types.Message{ + From: minfo.Worker, + To: maddr, + Method: builtin.MethodsMiner.CompactPartitions, + Value: big.Zero(), + Params: sp, + } + + estimatedMsg, err := fullNodeAPI.GasEstimateMessageGas(ctx, msg, nil, types.EmptyTSK) + if err != nil && errors.Is(err, &api.ErrOutOfGas{}) { + // the message is too big -- split into 2 + partitionsSlice, err := partitionsBf.All(math.MaxUint64) + if err != nil { + return nil, err + } + + partitions1 := bitfield.New() + for i := 0; i < len(partitionsSlice)/2; i++ { + partitions1.Set(uint64(i)) + } + + msgs1, err := makeMsgForPartitions(partitions1) + if err != nil { + return nil, err + } + + // time for the second half + partitions2 := bitfield.New() + for i := len(partitionsSlice) / 2; i < len(partitionsSlice); i++ { + partitions2.Set(uint64(i)) + } + + msgs2, err := makeMsgForPartitions(partitions2) + if err != nil { + return nil, err + } + + return append(msgs1, msgs2...), nil + } else if err != nil { + return nil, err + } + + return []*types.Message{estimatedMsg}, nil + } + + partitions := bitfield.New() + for _, partition := range parts { + partitions.Set(uint64(partition)) + } + + msgs, err := makeMsgForPartitions(partitions) + if err != nil { + return xerrors.Errorf("failed to make messages: %w", err) + } + + // Actually send the messages if really-do-it provided, simulate otherwise + if cctx.Bool("really-do-it") { + smsgs, err := fullNodeAPI.MpoolBatchPushMessage(ctx, msgs, nil) + if err != nil { + return xerrors.Errorf("mpool push: %w", err) + } + + if len(smsgs) == 1 { + fmt.Printf("Requested compact partitions in message %s\n", smsgs[0].Cid()) + } else { + fmt.Printf("Requested compact partitions in %d messages\n\n", len(smsgs)) + for _, v := range smsgs { + fmt.Println(v.Cid()) + } + } + + for _, v := range smsgs { + wait, err := fullNodeAPI.StateWaitMsg(ctx, v.Cid(), 2) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode.IsError() { + fmt.Println(cctx.App.Writer, "compact partitions msg %s failed!", v.Cid()) + return err + } + } + + return nil + } + + for i, v := range msgs { + fmt.Printf("total of %d CompactPartitions msgs would be sent\n", len(msgs)) + + estMsg, err := fullNodeAPI.GasEstimateMessageGas(ctx, v, nil, types.EmptyTSK) + if err != nil { + return err + } + + fmt.Printf("msg %d would cost up to %s\n", i+1, types.FIL(estMsg.RequiredFunds())) + } + + return nil + + }, + } +} + +func TerminateSectorCmd(getActorAddress ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "terminate", + Usage: "Forcefully terminate a sector (WARNING: This means losing power and pay a one-time termination penalty(including collateral) for the terminated sector)", + ArgsUsage: "[sectorNum1 sectorNum2 ...]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "actor", + Usage: "specify the address of miner actor", + }, + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "pass this flag if you know what you are doing", + }, + &cli.StringFlag{ + Name: "from", + Usage: "specify the address to send the terminate message from", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.NArg() < 1 { + return lcli.ShowHelp(cctx, fmt.Errorf("at least one sector must be specified")) + } + + var maddr address.Address + if act := cctx.String("actor"); act != "" { + var err error + maddr, err = address.NewFromString(act) + if err != nil { + return fmt.Errorf("parsing address %s: %w", act, err) + } + } + + if !cctx.Bool("really-do-it") { + return fmt.Errorf("this is a command for advanced users, only use it if you are sure of what you are doing") + } + + nodeApi, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + if maddr.Empty() { + maddr, err = getActorAddress(cctx) + if err != nil { + return err + } + } + + mi, err := nodeApi.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + terminationDeclarationParams := []miner2.TerminationDeclaration{} + + for _, sn := range cctx.Args().Slice() { + sectorNum, err := strconv.ParseUint(sn, 10, 64) + if err != nil { + return fmt.Errorf("could not parse sector number: %w", err) + } + + sectorbit := bitfield.New() + sectorbit.Set(sectorNum) + + loca, err := nodeApi.StateSectorPartition(ctx, maddr, abi.SectorNumber(sectorNum), types.EmptyTSK) + if err != nil { + return fmt.Errorf("get state sector partition %s", err) + } + + para := miner2.TerminationDeclaration{ + Deadline: loca.Deadline, + Partition: loca.Partition, + Sectors: sectorbit, + } + + terminationDeclarationParams = append(terminationDeclarationParams, para) + } + + terminateSectorParams := &miner2.TerminateSectorsParams{ + Terminations: terminationDeclarationParams, + } + + sp, err := actors.SerializeParams(terminateSectorParams) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + var fromAddr address.Address + if from := cctx.String("from"); from != "" { + var err error + fromAddr, err = address.NewFromString(from) + if err != nil { + return fmt.Errorf("parsing address %s: %w", from, err) + } + } else { + fromAddr = mi.Worker + } + + smsg, err := nodeApi.MpoolPushMessage(ctx, &types.Message{ + From: fromAddr, + To: maddr, + Method: builtin.MethodsMiner.TerminateSectors, + + Value: big.Zero(), + Params: sp, + }, nil) + if err != nil { + return xerrors.Errorf("mpool push message: %w", err) + } + + fmt.Println("sent termination message:", smsg.Cid()) + + wait, err := nodeApi.StateWaitMsg(ctx, smsg.Cid(), uint64(cctx.Int("confidence"))) + if err != nil { + return err + } + + if wait.Receipt.ExitCode.IsError() { + return fmt.Errorf("terminate sectors message returned exit %d", wait.Receipt.ExitCode) + } + + return nil + }, + } +} diff --git a/cli/spcli/statemeta.go b/cli/spcli/statemeta.go new file mode 100644 index 00000000000..72de87807b8 --- /dev/null +++ b/cli/spcli/statemeta.go @@ -0,0 +1,95 @@ +package spcli + +import ( + "github.com/fatih/color" + + sealing "github.com/filecoin-project/lotus/storage/pipeline" +) + +type StateMeta struct { + I int + Col color.Attribute + State sealing.SectorState +} + +var StateOrder = map[sealing.SectorState]StateMeta{} +var StateList = []StateMeta{ + {Col: 39, State: "Total"}, + {Col: color.FgGreen, State: sealing.Proving}, + {Col: color.FgGreen, State: sealing.Available}, + {Col: color.FgGreen, State: sealing.UpdateActivating}, + + {Col: color.FgMagenta, State: sealing.ReceiveSector}, + + {Col: color.FgBlue, State: sealing.Empty}, + {Col: color.FgBlue, State: sealing.WaitDeals}, + {Col: color.FgBlue, State: sealing.AddPiece}, + {Col: color.FgBlue, State: sealing.SnapDealsWaitDeals}, + {Col: color.FgBlue, State: sealing.SnapDealsAddPiece}, + + {Col: color.FgRed, State: sealing.UndefinedSectorState}, + {Col: color.FgYellow, State: sealing.Packing}, + {Col: color.FgYellow, State: sealing.GetTicket}, + {Col: color.FgYellow, State: sealing.PreCommit1}, + {Col: color.FgYellow, State: sealing.PreCommit2}, + {Col: color.FgYellow, State: sealing.PreCommitting}, + {Col: color.FgYellow, State: sealing.PreCommitWait}, + {Col: color.FgYellow, State: sealing.SubmitPreCommitBatch}, + {Col: color.FgYellow, State: sealing.PreCommitBatchWait}, + {Col: color.FgYellow, State: sealing.WaitSeed}, + {Col: color.FgYellow, State: sealing.Committing}, + {Col: color.FgYellow, State: sealing.CommitFinalize}, + {Col: color.FgYellow, State: sealing.SubmitCommit}, + {Col: color.FgYellow, State: sealing.CommitWait}, + {Col: color.FgYellow, State: sealing.SubmitCommitAggregate}, + {Col: color.FgYellow, State: sealing.CommitAggregateWait}, + {Col: color.FgYellow, State: sealing.FinalizeSector}, + {Col: color.FgYellow, State: sealing.SnapDealsPacking}, + {Col: color.FgYellow, State: sealing.UpdateReplica}, + {Col: color.FgYellow, State: sealing.ProveReplicaUpdate}, + {Col: color.FgYellow, State: sealing.SubmitReplicaUpdate}, + {Col: color.FgYellow, State: sealing.ReplicaUpdateWait}, + {Col: color.FgYellow, State: sealing.WaitMutable}, + {Col: color.FgYellow, State: sealing.FinalizeReplicaUpdate}, + {Col: color.FgYellow, State: sealing.ReleaseSectorKey}, + + {Col: color.FgCyan, State: sealing.Terminating}, + {Col: color.FgCyan, State: sealing.TerminateWait}, + {Col: color.FgCyan, State: sealing.TerminateFinality}, + {Col: color.FgCyan, State: sealing.TerminateFailed}, + {Col: color.FgCyan, State: sealing.Removing}, + {Col: color.FgCyan, State: sealing.Removed}, + {Col: color.FgCyan, State: sealing.AbortUpgrade}, + + {Col: color.FgRed, State: sealing.FailedUnrecoverable}, + {Col: color.FgRed, State: sealing.AddPieceFailed}, + {Col: color.FgRed, State: sealing.SealPreCommit1Failed}, + {Col: color.FgRed, State: sealing.SealPreCommit2Failed}, + {Col: color.FgRed, State: sealing.PreCommitFailed}, + {Col: color.FgRed, State: sealing.ComputeProofFailed}, + {Col: color.FgRed, State: sealing.RemoteCommitFailed}, + {Col: color.FgRed, State: sealing.CommitFailed}, + {Col: color.FgRed, State: sealing.CommitFinalizeFailed}, + {Col: color.FgRed, State: sealing.PackingFailed}, + {Col: color.FgRed, State: sealing.FinalizeFailed}, + {Col: color.FgRed, State: sealing.Faulty}, + {Col: color.FgRed, State: sealing.FaultReported}, + {Col: color.FgRed, State: sealing.FaultedFinal}, + {Col: color.FgRed, State: sealing.RemoveFailed}, + {Col: color.FgRed, State: sealing.DealsExpired}, + {Col: color.FgRed, State: sealing.RecoverDealIDs}, + {Col: color.FgRed, State: sealing.SnapDealsAddPieceFailed}, + {Col: color.FgRed, State: sealing.SnapDealsDealsExpired}, + {Col: color.FgRed, State: sealing.ReplicaUpdateFailed}, + {Col: color.FgRed, State: sealing.ReleaseSectorKeyFailed}, + {Col: color.FgRed, State: sealing.FinalizeReplicaUpdateFailed}, +} + +func init() { + for i, state := range StateList { + StateOrder[state.State] = StateMeta{ + I: i, + Col: state.Col, + } + } +} diff --git a/cli/spcli/util.go b/cli/spcli/util.go new file mode 100644 index 00000000000..71ac371fec7 --- /dev/null +++ b/cli/spcli/util.go @@ -0,0 +1,9 @@ +package spcli + +import ( + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/go-address" +) + +type ActorAddressGetter func(cctx *cli.Context) (address address.Address, err error) diff --git a/cli/state.go b/cli/state.go index f7d7e7127a9..343e68b5389 100644 --- a/cli/state.go +++ b/cli/state.go @@ -17,10 +17,8 @@ import ( "text/tabwriter" "time" - "github.com/fatih/color" "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" - "github.com/multiformats/go-multiaddr" "github.com/urfave/cli/v2" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" @@ -47,43 +45,6 @@ import ( cliutil "github.com/filecoin-project/lotus/cli/util" ) -var StateCmd = &cli.Command{ - Name: "state", - Usage: "Interact with and query filecoin chain state", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "tipset", - Usage: "specify tipset to call method on (pass comma separated array of cids)", - }, - }, - Subcommands: []*cli.Command{ - StatePowerCmd, - StateSectorsCmd, - StateActiveSectorsCmd, - StateListActorsCmd, - StateListMinersCmd, - StateCircSupplyCmd, - StateSectorCmd, - StateGetActorCmd, - StateLookupIDCmd, - StateReplayCmd, - StateSectorSizeCmd, - StateReadStateCmd, - StateListMessagesCmd, - StateComputeStateCmd, - StateCallCmd, - StateGetDealSetCmd, - StateWaitMsgCmd, - StateSearchMsgCmd, - StateMinerInfo, - StateMarketCmd, - StateExecTraceCmd, - StateNtwkVersionCmd, - StateMinerProvingDeadlineCmd, - StateSysActorCIDsCmd, - }, -} - var StateMinerProvingDeadlineCmd = &cli.Command{ Name: "miner-proving-deadline", Usage: "Retrieve information about a given miner's proving deadline", @@ -127,114 +88,6 @@ var StateMinerProvingDeadlineCmd = &cli.Command{ }, } -var StateMinerInfo = &cli.Command{ - Name: "miner-info", - Usage: "Retrieve miner information", - ArgsUsage: "[minerAddress]", - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := ReqContext(cctx) - - if cctx.NArg() != 1 { - return IncorrectNumArgs(cctx) - } - - addr, err := address.NewFromString(cctx.Args().First()) - if err != nil { - return err - } - - ts, err := LoadTipSet(ctx, cctx, api) - if err != nil { - return err - } - - mi, err := api.StateMinerInfo(ctx, addr, ts.Key()) - if err != nil { - return err - } - - availableBalance, err := api.StateMinerAvailableBalance(ctx, addr, ts.Key()) - if err != nil { - return xerrors.Errorf("getting miner available balance: %w", err) - } - fmt.Printf("Available Balance: %s\n", types.FIL(availableBalance)) - fmt.Printf("Owner:\t%s\n", mi.Owner) - fmt.Printf("Worker:\t%s\n", mi.Worker) - for i, controlAddress := range mi.ControlAddresses { - fmt.Printf("Control %d: \t%s\n", i, controlAddress) - } - if mi.Beneficiary != address.Undef { - fmt.Printf("Beneficiary:\t%s\n", mi.Beneficiary) - if mi.Beneficiary != mi.Owner { - fmt.Printf("Beneficiary Quota:\t%s\n", mi.BeneficiaryTerm.Quota) - fmt.Printf("Beneficiary Used Quota:\t%s\n", mi.BeneficiaryTerm.UsedQuota) - fmt.Printf("Beneficiary Expiration:\t%s\n", mi.BeneficiaryTerm.Expiration) - } - } - if mi.PendingBeneficiaryTerm != nil { - fmt.Printf("Pending Beneficiary Term:\n") - fmt.Printf("New Beneficiary:\t%s\n", mi.PendingBeneficiaryTerm.NewBeneficiary) - fmt.Printf("New Quota:\t%s\n", mi.PendingBeneficiaryTerm.NewQuota) - fmt.Printf("New Expiration:\t%s\n", mi.PendingBeneficiaryTerm.NewExpiration) - fmt.Printf("Approved By Beneficiary:\t%t\n", mi.PendingBeneficiaryTerm.ApprovedByBeneficiary) - fmt.Printf("Approved By Nominee:\t%t\n", mi.PendingBeneficiaryTerm.ApprovedByNominee) - } - - fmt.Printf("PeerID:\t%s\n", mi.PeerId) - fmt.Printf("Multiaddrs:\t") - for _, addr := range mi.Multiaddrs { - a, err := multiaddr.NewMultiaddrBytes(addr) - if err != nil { - return xerrors.Errorf("undecodable listen address: %w", err) - } - fmt.Printf("%s ", a) - } - fmt.Println() - fmt.Printf("Consensus Fault End:\t%d\n", mi.ConsensusFaultElapsed) - - fmt.Printf("SectorSize:\t%s (%d)\n", types.SizeStr(types.NewInt(uint64(mi.SectorSize))), mi.SectorSize) - pow, err := api.StateMinerPower(ctx, addr, ts.Key()) - if err != nil { - return err - } - - fmt.Printf("Byte Power: %s / %s (%0.4f%%)\n", - color.BlueString(types.SizeStr(pow.MinerPower.RawBytePower)), - types.SizeStr(pow.TotalPower.RawBytePower), - types.BigDivFloat( - types.BigMul(pow.MinerPower.RawBytePower, big.NewInt(100)), - pow.TotalPower.RawBytePower, - ), - ) - - fmt.Printf("Actual Power: %s / %s (%0.4f%%)\n", - color.GreenString(types.DeciStr(pow.MinerPower.QualityAdjPower)), - types.DeciStr(pow.TotalPower.QualityAdjPower), - types.BigDivFloat( - types.BigMul(pow.MinerPower.QualityAdjPower, big.NewInt(100)), - pow.TotalPower.QualityAdjPower, - ), - ) - - fmt.Println() - - cd, err := api.StateMinerProvingDeadline(ctx, addr, ts.Key()) - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - fmt.Printf("Proving Period Start:\t%s\n", cliutil.EpochTime(cd.CurrentEpoch, cd.PeriodStart)) - - return nil - }, -} - func ParseTipSetString(ts string) ([]cid.Cid, error) { strs := strings.Split(ts, ",") diff --git a/cli/wallet.go b/cli/wallet.go index faf7bc23955..4af8dca58b8 100644 --- a/cli/wallet.go +++ b/cli/wallet.go @@ -27,7 +27,7 @@ import ( "github.com/filecoin-project/lotus/lib/tablewriter" ) -var walletCmd = &cli.Command{ +var WalletCmd = &cli.Command{ Name: "wallet", Usage: "Manage wallet", Subcommands: []*cli.Command{ diff --git a/cmd/curio/config.go b/cmd/curio/config.go index 20f2d059788..2938118adea 100644 --- a/cmd/curio/config.go +++ b/cmd/curio/config.go @@ -31,7 +31,6 @@ var configCmd = &cli.Command{ configViewCmd, configRmCmd, configEditCmd, - configMigrateCmd, configNewCmd, }, } diff --git a/cmd/curio/main.go b/cmd/curio/main.go index 8c64af37984..2175d24c5f8 100644 --- a/cmd/curio/main.go +++ b/cmd/curio/main.go @@ -25,6 +25,10 @@ import ( var log = logging.Logger("main") +const ( + FlagMinerRepo = "miner-repo" +) + func setupCloseHandler() { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) @@ -49,7 +53,6 @@ func main() { testCmd, webCmd, guidedsetup.GuidedsetupCmd, - configMigrateCmd, sealCmd, } diff --git a/cmd/curio/migrate.go b/cmd/curio/migrate.go index 5cf55bcb42a..06ab7d0f9a3 100644 --- a/cmd/curio/migrate.go +++ b/cmd/curio/migrate.go @@ -1,71 +1 @@ package main - -import ( - "fmt" - - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/cmd/curio/guidedsetup" - "github.com/filecoin-project/lotus/node/repo" -) - -var configMigrateCmd = &cli.Command{ - Name: "from-miner", - Usage: "Express a database config (for curio) from an existing miner.", - Description: "Express a database config (for curio) from an existing miner.", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: FlagMinerRepo, - Aliases: []string{FlagMinerRepoDeprecation}, - EnvVars: []string{"LOTUS_MINER_PATH", "LOTUS_STORAGE_PATH"}, - Value: "~/.lotusminer", - Usage: fmt.Sprintf("Specify miner repo path. flag(%s) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON", FlagMinerRepoDeprecation), - }, - &cli.StringFlag{ - Name: "repo", - EnvVars: []string{"LOTUS_PATH"}, - Hidden: true, - Value: "~/.lotus", - }, - &cli.StringFlag{ - Name: "to-layer", - Aliases: []string{"t"}, - Usage: "The layer name for this data push. 'base' is recommended for single-miner setup.", - }, - &cli.BoolFlag{ - Name: "overwrite", - Aliases: []string{"o"}, - Usage: "Use this with --to-layer to replace an existing layer", - }, - }, - Action: fromMiner, -} - -const ( - FlagMinerRepo = "miner-repo" -) - -const FlagMinerRepoDeprecation = "storagerepo" - -func fromMiner(cctx *cli.Context) (err error) { - minerRepoPath := cctx.String(FlagMinerRepo) - layerName := cctx.String("to-layer") - overwrite := cctx.Bool("overwrite") - - // Populate API Key - _, header, err := cliutil.GetRawAPI(cctx, repo.FullNode, "v0") - if err != nil { - return fmt.Errorf("cannot read API: %w", err) - } - - ainfo, err := cliutil.GetAPIInfo(&cli.Context{}, repo.FullNode) - if err != nil { - return xerrors.Errorf(`could not get API info for FullNode: %w - Set the environment variable to the value of "lotus auth api-info --perm=admin"`, err) - } - chainApiInfo := header.Get("Authorization")[7:] + ":" + ainfo.Addr - _, err = guidedsetup.SaveConfigToLayer(minerRepoPath, layerName, overwrite, chainApiInfo) - return err -} diff --git a/cmd/curio/run.go b/cmd/curio/run.go index 35fdf4a4d03..5974a540503 100644 --- a/cmd/curio/run.go +++ b/cmd/curio/run.go @@ -5,7 +5,6 @@ import ( "fmt" "os" "strings" - "time" "github.com/pkg/errors" "github.com/urfave/cli/v2" @@ -128,7 +127,7 @@ var runCmd = &cli.Command{ if err != nil { return nil } - defer taskEngine.GracefullyTerminate(time.Hour) + defer taskEngine.GracefullyTerminate() err = rpc.ListenAndServe(ctx, dependencies, shutdownChan) // Monitor for shutdown. if err != nil { diff --git a/cmd/lotus-miner/actor.go b/cmd/lotus-miner/actor.go index 6d76cc07fdc..1ff613fc15a 100644 --- a/cmd/lotus-miner/actor.go +++ b/cmd/lotus-miner/actor.go @@ -1,38 +1,20 @@ package main import ( - "bytes" "fmt" "os" - "strconv" "strings" "github.com/fatih/color" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" "github.com/urfave/cli/v2" - "golang.org/x/xerrors" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - rlepluslazy "github.com/filecoin-project/go-bitfield/rle" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" - "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/actors/adt" + lapi "github.com/filecoin-project/lotus/api" builtin2 "github.com/filecoin-project/lotus/chain/actors/builtin" - lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/cli/spcli" "github.com/filecoin-project/lotus/lib/tablewriter" ) @@ -40,399 +22,37 @@ var actorCmd = &cli.Command{ Name: "actor", Usage: "manipulate the miner actor", Subcommands: []*cli.Command{ - actorSetAddrsCmd, - actorWithdrawCmd, - actorRepayDebtCmd, - actorSetPeeridCmd, - actorSetOwnerCmd, - actorControl, - actorProposeChangeWorker, - actorConfirmChangeWorker, - actorCompactAllocatedCmd, - actorProposeChangeBeneficiary, - actorConfirmChangeBeneficiary, + spcli.ActorSetAddrsCmd(LMActorGetter), + spcli.ActorWithdrawCmd(LMActorGetter), + spcli.ActorRepayDebtCmd(LMActorGetter), + spcli.ActorSetPeeridCmd(LMActorGetter), + spcli.ActorSetOwnerCmd(LMConfigOrActorGetter), + spcli.ActorControlCmd(LMConfigOrActorGetter, actorControlListCmd), + spcli.ActorProposeChangeWorkerCmd(LMActorGetter), + spcli.ActorConfirmChangeWorkerCmd(LMActorGetter), + spcli.ActorCompactAllocatedCmd(LMActorGetter), + spcli.ActorProposeChangeBeneficiaryCmd(LMActorGetter), + spcli.ActorConfirmChangeBeneficiaryCmd(LMConfigOrActorGetter), }, } -var actorSetAddrsCmd = &cli.Command{ - Name: "set-addresses", - Aliases: []string{"set-addrs"}, - Usage: "set addresses that your miner can be publicly dialed on", - ArgsUsage: "", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "from", - Usage: "optionally specify the account to send the message from", - }, - &cli.Int64Flag{ - Name: "gas-limit", - Usage: "set gas limit", - Value: 0, - }, - &cli.BoolFlag{ - Name: "unset", - Usage: "unset address", - Value: false, - }, - }, - Action: func(cctx *cli.Context) error { - args := cctx.Args().Slice() - unset := cctx.Bool("unset") - if len(args) == 0 && !unset { - return cli.ShowSubcommandHelp(cctx) - } - if len(args) > 0 && unset { - return fmt.Errorf("unset can only be used with no arguments") - } - - minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer closer() - - api, acloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - var addrs []abi.Multiaddrs - for _, a := range args { - maddr, err := ma.NewMultiaddr(a) - if err != nil { - return fmt.Errorf("failed to parse %q as a multiaddr: %w", a, err) - } - - maddrNop2p, strip := ma.SplitFunc(maddr, func(c ma.Component) bool { - return c.Protocol().Code == ma.P_P2P - }) - - if strip != nil { - fmt.Println("Stripping peerid ", strip, " from ", maddr) - } - addrs = append(addrs, maddrNop2p.Bytes()) - } - - maddr, err := minerApi.ActorAddress(ctx) - if err != nil { - return err - } - - minfo, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - fromAddr := minfo.Worker - if from := cctx.String("from"); from != "" { - addr, err := address.NewFromString(from) - if err != nil { - return err - } - - fromAddr = addr - } - - fromId, err := api.StateLookupID(ctx, fromAddr, types.EmptyTSK) - if err != nil { - return err - } - - if !isController(minfo, fromId) { - return xerrors.Errorf("sender isn't a controller of miner: %s", fromId) - } - - params, err := actors.SerializeParams(&miner.ChangeMultiaddrsParams{NewMultiaddrs: addrs}) - if err != nil { - return err - } - - gasLimit := cctx.Int64("gas-limit") - - smsg, err := api.MpoolPushMessage(ctx, &types.Message{ - To: maddr, - From: fromId, - Value: types.NewInt(0), - GasLimit: gasLimit, - Method: builtin.MethodsMiner.ChangeMultiaddrs, - Params: params, - }, nil) - if err != nil { - return err - } - - fmt.Printf("Requested multiaddrs change in message %s\n", smsg.Cid()) - return nil - - }, -} - -var actorSetPeeridCmd = &cli.Command{ - Name: "set-peer-id", - Usage: "set the peer id of your miner", - ArgsUsage: "", - Flags: []cli.Flag{ - &cli.Int64Flag{ - Name: "gas-limit", - Usage: "set gas limit", - Value: 0, - }, - }, - Action: func(cctx *cli.Context) error { - minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer closer() - - api, acloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - if cctx.NArg() != 1 { - return lcli.IncorrectNumArgs(cctx) - } - - pid, err := peer.Decode(cctx.Args().Get(0)) - if err != nil { - return fmt.Errorf("failed to parse input as a peerId: %w", err) - } - - maddr, err := minerApi.ActorAddress(ctx) - if err != nil { - return err - } - - minfo, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - params, err := actors.SerializeParams(&miner.ChangePeerIDParams{NewID: abi.PeerID(pid)}) - if err != nil { - return err - } - - gasLimit := cctx.Int64("gas-limit") - - smsg, err := api.MpoolPushMessage(ctx, &types.Message{ - To: maddr, - From: minfo.Worker, - Value: types.NewInt(0), - GasLimit: gasLimit, - Method: builtin.MethodsMiner.ChangePeerID, - Params: params, - }, nil) - if err != nil { - return err - } - - fmt.Printf("Requested peerid change in message %s\n", smsg.Cid()) - return nil - - }, -} - -var actorWithdrawCmd = &cli.Command{ - Name: "withdraw", - Usage: "withdraw available balance to beneficiary", - ArgsUsage: "[amount (FIL)]", - Flags: []cli.Flag{ - &cli.IntFlag{ - Name: "confidence", - Usage: "number of block confirmations to wait for", - Value: int(build.MessageConfidence), - }, - &cli.BoolFlag{ - Name: "beneficiary", - Usage: "send withdraw message from the beneficiary address", - }, - }, - Action: func(cctx *cli.Context) error { - amount := abi.NewTokenAmount(0) - - if cctx.Args().Present() { - f, err := types.ParseFIL(cctx.Args().First()) - if err != nil { - return xerrors.Errorf("parsing 'amount' argument: %w", err) - } - - amount = abi.TokenAmount(f) - } - - minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer closer() - - api, acloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - var res cid.Cid - if cctx.IsSet("beneficiary") { - res, err = minerApi.BeneficiaryWithdrawBalance(ctx, amount) - } else { - res, err = minerApi.ActorWithdrawBalance(ctx, amount) - } - if err != nil { - return err - } - - fmt.Printf("Requested withdrawal in message %s\nwaiting for it to be included in a block..\n", res) - - // wait for it to get mined into a block - wait, err := api.StateWaitMsg(ctx, res, uint64(cctx.Int("confidence"))) - if err != nil { - return xerrors.Errorf("Timeout waiting for withdrawal message %s", res) - } - - if wait.Receipt.ExitCode.IsError() { - return xerrors.Errorf("Failed to execute withdrawal message %s: %w", wait.Message, wait.Receipt.ExitCode.Error()) - } - - nv, err := api.StateNetworkVersion(ctx, wait.TipSet) - if err != nil { - return err - } - - if nv >= network.Version14 { - var withdrawn abi.TokenAmount - if err := withdrawn.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { - return err - } - - fmt.Printf("Successfully withdrew %s \n", types.FIL(withdrawn)) - if withdrawn.LessThan(amount) { - fmt.Printf("Note that this is less than the requested amount of %s\n", types.FIL(amount)) - } - } - - return nil - }, +func LMConfigOrActorGetter(cctx *cli.Context) (address.Address, error) { + ctx := lcli.ReqContext(cctx) + return getActorAddress(ctx, cctx) } -var actorRepayDebtCmd = &cli.Command{ - Name: "repay-debt", - Usage: "pay down a miner's debt", - ArgsUsage: "[amount (FIL)]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "from", - Usage: "optionally specify the account to send funds from", - }, - }, - Action: func(cctx *cli.Context) error { - minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer closer() - - api, acloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - maddr, err := minerApi.ActorAddress(ctx) - if err != nil { - return err - } - - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - var amount abi.TokenAmount - if cctx.Args().Present() { - f, err := types.ParseFIL(cctx.Args().First()) - if err != nil { - return xerrors.Errorf("parsing 'amount' argument: %w", err) - } - - amount = abi.TokenAmount(f) - } else { - mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api))) - - mst, err := lminer.Load(store, mact) - if err != nil { - return err - } - - amount, err = mst.FeeDebt() - if err != nil { - return err - } - - } - - fromAddr := mi.Worker - if from := cctx.String("from"); from != "" { - addr, err := address.NewFromString(from) - if err != nil { - return err - } - - fromAddr = addr - } - - fromId, err := api.StateLookupID(ctx, fromAddr, types.EmptyTSK) - if err != nil { - return err - } - - if !isController(mi, fromId) { - return xerrors.Errorf("sender isn't a controller of miner: %s", fromId) - } - - smsg, err := api.MpoolPushMessage(ctx, &types.Message{ - To: maddr, - From: fromId, - Value: amount, - Method: builtin.MethodsMiner.RepayDebt, - Params: nil, - }, nil) - if err != nil { - return err - } - - fmt.Printf("Sent repay debt message %s\n", smsg.Cid()) - - return nil - }, -} +func getControlAddresses(cctx *cli.Context, actor address.Address) (lapi.AddressConfig, error) { + minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return lapi.AddressConfig{}, err + } + defer closer() -var actorControl = &cli.Command{ - Name: "control", - Usage: "Manage control addresses", - Subcommands: []*cli.Command{ - actorControlList, - actorControlSet, - }, + ctx := lcli.ReqContext(cctx) + return minerApi.ActorAddressConfig(ctx) } -var actorControlList = &cli.Command{ +var actorControlListCmd = &cli.Command{ Name: "list", Usage: "Get currently set control addresses", Flags: []cli.Flag{ @@ -441,12 +61,6 @@ var actorControlList = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer closer() - api, acloser, err := lcli.GetFullNodeAPIV1(cctx) if err != nil { return err @@ -455,7 +69,7 @@ var actorControlList = &cli.Command{ ctx := lcli.ReqContext(cctx) - maddr, err := getActorAddress(ctx, cctx) + maddr, err := LMActorOrEnvGetter(cctx) if err != nil { return err } @@ -473,11 +87,10 @@ var actorControlList = &cli.Command{ tablewriter.Col("balance"), ) - ac, err := minerApi.ActorAddressConfig(ctx) + ac, err := getControlAddresses(cctx, maddr) if err != nil { return err } - commit := map[address.Address]struct{}{} precommit := map[address.Address]struct{}{} terminate := map[address.Address]struct{}{} @@ -600,853 +213,3 @@ var actorControlList = &cli.Command{ return tw.Flush(os.Stdout) }, } - -var actorControlSet = &cli.Command{ - Name: "set", - Usage: "Set control address(-es)", - ArgsUsage: "[...address]", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "really-do-it", - Usage: "Actually send transaction performing the action", - Value: false, - }, - }, - Action: func(cctx *cli.Context) error { - minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer closer() - - api, acloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - maddr, err := minerApi.ActorAddress(ctx) - if err != nil { - return err - } - - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - del := map[address.Address]struct{}{} - existing := map[address.Address]struct{}{} - for _, controlAddress := range mi.ControlAddresses { - ka, err := api.StateAccountKey(ctx, controlAddress, types.EmptyTSK) - if err != nil { - return err - } - - del[ka] = struct{}{} - existing[ka] = struct{}{} - } - - var toSet []address.Address - - for i, as := range cctx.Args().Slice() { - a, err := address.NewFromString(as) - if err != nil { - return xerrors.Errorf("parsing address %d: %w", i, err) - } - - ka, err := api.StateAccountKey(ctx, a, types.EmptyTSK) - if err != nil { - return err - } - - // make sure the address exists on chain - _, err = api.StateLookupID(ctx, ka, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("looking up %s: %w", ka, err) - } - - delete(del, ka) - toSet = append(toSet, ka) - } - - for a := range del { - fmt.Println("Remove", a) - } - for _, a := range toSet { - if _, exists := existing[a]; !exists { - fmt.Println("Add", a) - } - } - - if !cctx.Bool("really-do-it") { - fmt.Println("Pass --really-do-it to actually execute this action") - return nil - } - - cwp := &miner.ChangeWorkerAddressParams{ - NewWorker: mi.Worker, - NewControlAddrs: toSet, - } - - sp, err := actors.SerializeParams(cwp) - if err != nil { - return xerrors.Errorf("serializing params: %w", err) - } - - smsg, err := api.MpoolPushMessage(ctx, &types.Message{ - From: mi.Owner, - To: maddr, - Method: builtin.MethodsMiner.ChangeWorkerAddress, - - Value: big.Zero(), - Params: sp, - }, nil) - if err != nil { - return xerrors.Errorf("mpool push: %w", err) - } - - fmt.Println("Message CID:", smsg.Cid()) - - return nil - }, -} - -var actorSetOwnerCmd = &cli.Command{ - Name: "set-owner", - Usage: "Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner)", - ArgsUsage: "[newOwnerAddress senderAddress]", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "really-do-it", - Usage: "Actually send transaction performing the action", - Value: false, - }, - }, - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 2 { - return lcli.IncorrectNumArgs(cctx) - } - - if !cctx.Bool("really-do-it") { - fmt.Println("Pass --really-do-it to actually execute this action") - return nil - } - - api, acloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - na, err := address.NewFromString(cctx.Args().First()) - if err != nil { - return err - } - - newAddrId, err := api.StateLookupID(ctx, na, types.EmptyTSK) - if err != nil { - return err - } - - fa, err := address.NewFromString(cctx.Args().Get(1)) - if err != nil { - return err - } - - fromAddrId, err := api.StateLookupID(ctx, fa, types.EmptyTSK) - if err != nil { - return err - } - - maddr, err := getActorAddress(ctx, cctx) - if err != nil { - return err - } - - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - if fromAddrId != mi.Owner && fromAddrId != newAddrId { - return xerrors.New("from address must either be the old owner or the new owner") - } - - sp, err := actors.SerializeParams(&newAddrId) - if err != nil { - return xerrors.Errorf("serializing params: %w", err) - } - - smsg, err := api.MpoolPushMessage(ctx, &types.Message{ - From: fromAddrId, - To: maddr, - Method: builtin.MethodsMiner.ChangeOwnerAddress, - Value: big.Zero(), - Params: sp, - }, nil) - if err != nil { - return xerrors.Errorf("mpool push: %w", err) - } - - fmt.Println("Message CID:", smsg.Cid()) - - // wait for it to get mined into a block - wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) - if err != nil { - return err - } - - // check it executed successfully - if wait.Receipt.ExitCode.IsError() { - fmt.Println("owner change failed!") - return err - } - - fmt.Println("message succeeded!") - - return nil - }, -} - -var actorProposeChangeWorker = &cli.Command{ - Name: "propose-change-worker", - Usage: "Propose a worker address change", - ArgsUsage: "[address]", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "really-do-it", - Usage: "Actually send transaction performing the action", - Value: false, - }, - }, - Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return fmt.Errorf("must pass address of new worker address") - } - - minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer closer() - - api, acloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - na, err := address.NewFromString(cctx.Args().First()) - if err != nil { - return err - } - - newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) - if err != nil { - return err - } - - maddr, err := minerApi.ActorAddress(ctx) - if err != nil { - return err - } - - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - if mi.NewWorker.Empty() { - if mi.Worker == newAddr { - return fmt.Errorf("worker address already set to %s", na) - } - } else { - if mi.NewWorker == newAddr { - return fmt.Errorf("change to worker address %s already pending", na) - } - } - - if !cctx.Bool("really-do-it") { - fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action") - return nil - } - - cwp := &miner.ChangeWorkerAddressParams{ - NewWorker: newAddr, - NewControlAddrs: mi.ControlAddresses, - } - - sp, err := actors.SerializeParams(cwp) - if err != nil { - return xerrors.Errorf("serializing params: %w", err) - } - - smsg, err := api.MpoolPushMessage(ctx, &types.Message{ - From: mi.Owner, - To: maddr, - Method: builtin.MethodsMiner.ChangeWorkerAddress, - Value: big.Zero(), - Params: sp, - }, nil) - if err != nil { - return xerrors.Errorf("mpool push: %w", err) - } - - fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", smsg.Cid()) - - // wait for it to get mined into a block - wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) - if err != nil { - return err - } - - // check it executed successfully - if wait.Receipt.ExitCode.IsError() { - return fmt.Errorf("propose worker change failed") - } - - mi, err = api.StateMinerInfo(ctx, maddr, wait.TipSet) - if err != nil { - return err - } - if mi.NewWorker != newAddr { - return fmt.Errorf("Proposed worker address change not reflected on chain: expected '%s', found '%s'", na, mi.NewWorker) - } - - fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully sent, change happens at height %d.\n", na, mi.WorkerChangeEpoch) - fmt.Fprintf(cctx.App.Writer, "If you have no active deadlines, call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch) - - return nil - }, -} - -var actorProposeChangeBeneficiary = &cli.Command{ - Name: "propose-change-beneficiary", - Usage: "Propose a beneficiary address change", - ArgsUsage: "[beneficiaryAddress quota expiration]", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "really-do-it", - Usage: "Actually send transaction performing the action", - Value: false, - }, - &cli.BoolFlag{ - Name: "overwrite-pending-change", - Usage: "Overwrite the current beneficiary change proposal", - Value: false, - }, - &cli.StringFlag{ - Name: "actor", - Usage: "specify the address of miner actor", - }, - }, - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 3 { - return lcli.IncorrectNumArgs(cctx) - } - - api, acloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return xerrors.Errorf("getting fullnode api: %w", err) - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - na, err := address.NewFromString(cctx.Args().Get(0)) - if err != nil { - return xerrors.Errorf("parsing beneficiary address: %w", err) - } - - newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("looking up new beneficiary address: %w", err) - } - - quota, err := types.ParseFIL(cctx.Args().Get(1)) - if err != nil { - return xerrors.Errorf("parsing quota: %w", err) - } - - expiration, err := strconv.ParseInt(cctx.Args().Get(2), 10, 64) - if err != nil { - return xerrors.Errorf("parsing expiration: %w", err) - } - - maddr, err := getActorAddress(ctx, cctx) - if err != nil { - return xerrors.Errorf("getting miner address: %w", err) - } - - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - if mi.Beneficiary == mi.Owner && newAddr == mi.Owner { - return fmt.Errorf("beneficiary %s already set to owner address", mi.Beneficiary) - } - - if mi.PendingBeneficiaryTerm != nil { - fmt.Println("WARNING: replacing Pending Beneficiary Term of:") - fmt.Println("Beneficiary: ", mi.PendingBeneficiaryTerm.NewBeneficiary) - fmt.Println("Quota:", mi.PendingBeneficiaryTerm.NewQuota) - fmt.Println("Expiration Epoch:", mi.PendingBeneficiaryTerm.NewExpiration) - - if !cctx.Bool("overwrite-pending-change") { - return fmt.Errorf("must pass --overwrite-pending-change to replace current pending beneficiary change. Please review CAREFULLY") - } - } - - if !cctx.Bool("really-do-it") { - fmt.Println("Pass --really-do-it to actually execute this action. Review what you're about to approve CAREFULLY please") - return nil - } - - params := &miner.ChangeBeneficiaryParams{ - NewBeneficiary: newAddr, - NewQuota: abi.TokenAmount(quota), - NewExpiration: abi.ChainEpoch(expiration), - } - - sp, err := actors.SerializeParams(params) - if err != nil { - return xerrors.Errorf("serializing params: %w", err) - } - - smsg, err := api.MpoolPushMessage(ctx, &types.Message{ - From: mi.Owner, - To: maddr, - Method: builtin.MethodsMiner.ChangeBeneficiary, - Value: big.Zero(), - Params: sp, - }, nil) - if err != nil { - return xerrors.Errorf("mpool push: %w", err) - } - - fmt.Println("Propose Message CID:", smsg.Cid()) - - // wait for it to get mined into a block - wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) - if err != nil { - return xerrors.Errorf("waiting for message to be included in block: %w", err) - } - - // check it executed successfully - if wait.Receipt.ExitCode.IsError() { - return fmt.Errorf("propose beneficiary change failed") - } - - updatedMinerInfo, err := api.StateMinerInfo(ctx, maddr, wait.TipSet) - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - if updatedMinerInfo.PendingBeneficiaryTerm == nil && updatedMinerInfo.Beneficiary == newAddr { - fmt.Println("Beneficiary address successfully changed") - } else { - fmt.Println("Beneficiary address change awaiting additional confirmations") - } - - return nil - }, -} - -var actorConfirmChangeWorker = &cli.Command{ - Name: "confirm-change-worker", - Usage: "Confirm a worker address change", - ArgsUsage: "[address]", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "really-do-it", - Usage: "Actually send transaction performing the action", - Value: false, - }, - }, - Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return fmt.Errorf("must pass address of new worker address") - } - - minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer closer() - - api, acloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - na, err := address.NewFromString(cctx.Args().First()) - if err != nil { - return err - } - - newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) - if err != nil { - return err - } - - maddr, err := minerApi.ActorAddress(ctx) - if err != nil { - return err - } - - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - if mi.NewWorker.Empty() { - return xerrors.Errorf("no worker key change proposed") - } else if mi.NewWorker != newAddr { - return xerrors.Errorf("worker key %s does not match current worker key proposal %s", newAddr, mi.NewWorker) - } - - if head, err := api.ChainHead(ctx); err != nil { - return xerrors.Errorf("failed to get the chain head: %w", err) - } else if head.Height() < mi.WorkerChangeEpoch { - return xerrors.Errorf("worker key change cannot be confirmed until %d, current height is %d", mi.WorkerChangeEpoch, head.Height()) - } - - if !cctx.Bool("really-do-it") { - fmt.Println("Pass --really-do-it to actually execute this action") - return nil - } - - smsg, err := api.MpoolPushMessage(ctx, &types.Message{ - From: mi.Owner, - To: maddr, - Method: builtin.MethodsMiner.ConfirmChangeWorkerAddress, - Value: big.Zero(), - }, nil) - if err != nil { - return xerrors.Errorf("mpool push: %w", err) - } - - fmt.Println("Confirm Message CID:", smsg.Cid()) - - // wait for it to get mined into a block - wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) - if err != nil { - return err - } - - // check it executed successfully - if wait.Receipt.ExitCode.IsError() { - fmt.Fprintln(cctx.App.Writer, "Worker change failed!") - return err - } - - mi, err = api.StateMinerInfo(ctx, maddr, wait.TipSet) - if err != nil { - return err - } - if mi.Worker != newAddr { - return fmt.Errorf("Confirmed worker address change not reflected on chain: expected '%s', found '%s'", newAddr, mi.Worker) - } - - return nil - }, -} - -var actorConfirmChangeBeneficiary = &cli.Command{ - Name: "confirm-change-beneficiary", - Usage: "Confirm a beneficiary address change", - ArgsUsage: "[minerID]", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "really-do-it", - Usage: "Actually send transaction performing the action", - Value: false, - }, - &cli.BoolFlag{ - Name: "existing-beneficiary", - Usage: "send confirmation from the existing beneficiary address", - }, - &cli.BoolFlag{ - Name: "new-beneficiary", - Usage: "send confirmation from the new beneficiary address", - }, - }, - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { - return lcli.IncorrectNumArgs(cctx) - } - - api, acloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return xerrors.Errorf("getting fullnode api: %w", err) - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - maddr, err := address.NewFromString(cctx.Args().First()) - if err != nil { - return xerrors.Errorf("parsing beneficiary address: %w", err) - } - - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - if mi.PendingBeneficiaryTerm == nil { - return fmt.Errorf("no pending beneficiary term found for miner %s", maddr) - } - - if (cctx.IsSet("existing-beneficiary") && cctx.IsSet("new-beneficiary")) || (!cctx.IsSet("existing-beneficiary") && !cctx.IsSet("new-beneficiary")) { - return lcli.ShowHelp(cctx, fmt.Errorf("must pass exactly one of --existing-beneficiary or --new-beneficiary")) - } - - var fromAddr address.Address - if cctx.IsSet("existing-beneficiary") { - if mi.PendingBeneficiaryTerm.ApprovedByBeneficiary { - return fmt.Errorf("beneficiary change already approved by current beneficiary") - } - fromAddr = mi.Beneficiary - } else { - if mi.PendingBeneficiaryTerm.ApprovedByNominee { - return fmt.Errorf("beneficiary change already approved by new beneficiary") - } - fromAddr = mi.PendingBeneficiaryTerm.NewBeneficiary - } - - fmt.Println("Confirming Pending Beneficiary Term of:") - fmt.Println("Beneficiary: ", mi.PendingBeneficiaryTerm.NewBeneficiary) - fmt.Println("Quota:", mi.PendingBeneficiaryTerm.NewQuota) - fmt.Println("Expiration Epoch:", mi.PendingBeneficiaryTerm.NewExpiration) - - if !cctx.Bool("really-do-it") { - fmt.Println("Pass --really-do-it to actually execute this action. Review what you're about to approve CAREFULLY please") - return nil - } - - params := &miner.ChangeBeneficiaryParams{ - NewBeneficiary: mi.PendingBeneficiaryTerm.NewBeneficiary, - NewQuota: mi.PendingBeneficiaryTerm.NewQuota, - NewExpiration: mi.PendingBeneficiaryTerm.NewExpiration, - } - - sp, err := actors.SerializeParams(params) - if err != nil { - return xerrors.Errorf("serializing params: %w", err) - } - - smsg, err := api.MpoolPushMessage(ctx, &types.Message{ - From: fromAddr, - To: maddr, - Method: builtin.MethodsMiner.ChangeBeneficiary, - Value: big.Zero(), - Params: sp, - }, nil) - if err != nil { - return xerrors.Errorf("mpool push: %w", err) - } - - fmt.Println("Confirm Message CID:", smsg.Cid()) - - // wait for it to get mined into a block - wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) - if err != nil { - return xerrors.Errorf("waiting for message to be included in block: %w", err) - } - - // check it executed successfully - if wait.Receipt.ExitCode.IsError() { - return fmt.Errorf("confirm beneficiary change failed with code %d", wait.Receipt.ExitCode) - } - - updatedMinerInfo, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - if updatedMinerInfo.PendingBeneficiaryTerm == nil && updatedMinerInfo.Beneficiary == mi.PendingBeneficiaryTerm.NewBeneficiary { - fmt.Println("Beneficiary address successfully changed") - } else { - fmt.Println("Beneficiary address change awaiting additional confirmations") - } - - return nil - }, -} - -var actorCompactAllocatedCmd = &cli.Command{ - Name: "compact-allocated", - Usage: "compact allocated sectors bitfield", - Flags: []cli.Flag{ - &cli.Uint64Flag{ - Name: "mask-last-offset", - Usage: "Mask sector IDs from 0 to 'highest_allocated - offset'", - }, - &cli.Uint64Flag{ - Name: "mask-upto-n", - Usage: "Mask sector IDs from 0 to 'n'", - }, - &cli.BoolFlag{ - Name: "really-do-it", - Usage: "Actually send transaction performing the action", - Value: false, - }, - }, - Action: func(cctx *cli.Context) error { - if !cctx.Bool("really-do-it") { - fmt.Println("Pass --really-do-it to actually execute this action") - return nil - } - - if !cctx.Args().Present() { - return fmt.Errorf("must pass address of new owner address") - } - - minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer closer() - - api, acloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - maddr, err := minerApi.ActorAddress(ctx) - if err != nil { - return err - } - - mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api))) - - mst, err := lminer.Load(store, mact) - if err != nil { - return err - } - - allocs, err := mst.GetAllocatedSectors() - if err != nil { - return err - } - - var maskBf bitfield.BitField - - { - exclusiveFlags := []string{"mask-last-offset", "mask-upto-n"} - hasFlag := false - for _, f := range exclusiveFlags { - if hasFlag && cctx.IsSet(f) { - return xerrors.Errorf("more than one 'mask` flag set") - } - hasFlag = hasFlag || cctx.IsSet(f) - } - } - switch { - case cctx.IsSet("mask-last-offset"): - last, err := allocs.Last() - if err != nil { - return err - } - - m := cctx.Uint64("mask-last-offset") - if last <= m+1 { - return xerrors.Errorf("highest allocated sector lower than mask offset %d: %d", m+1, last) - } - // securty to not brick a miner - if last > 1<<60 { - return xerrors.Errorf("very high last sector number, refusing to mask: %d", last) - } - - maskBf, err = bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{ - Runs: []rlepluslazy.Run{{Val: true, Len: last - m}}}) - if err != nil { - return xerrors.Errorf("forming bitfield: %w", err) - } - case cctx.IsSet("mask-upto-n"): - n := cctx.Uint64("mask-upto-n") - maskBf, err = bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{ - Runs: []rlepluslazy.Run{{Val: true, Len: n}}}) - if err != nil { - return xerrors.Errorf("forming bitfield: %w", err) - } - default: - return xerrors.Errorf("no 'mask' flags set") - } - - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - params := &miner.CompactSectorNumbersParams{ - MaskSectorNumbers: maskBf, - } - - sp, err := actors.SerializeParams(params) - if err != nil { - return xerrors.Errorf("serializing params: %w", err) - } - - smsg, err := api.MpoolPushMessage(ctx, &types.Message{ - From: mi.Worker, - To: maddr, - Method: builtin.MethodsMiner.CompactSectorNumbers, - Value: big.Zero(), - Params: sp, - }, nil) - if err != nil { - return xerrors.Errorf("mpool push: %w", err) - } - - fmt.Println("CompactSectorNumbers Message CID:", smsg.Cid()) - - // wait for it to get mined into a block - wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) - if err != nil { - return err - } - - // check it executed successfully - if wait.Receipt.ExitCode.IsError() { - fmt.Println("Propose owner change failed!") - return err - } - - return nil - }, -} - -func isController(mi api.MinerInfo, addr address.Address) bool { - if addr == mi.Owner || addr == mi.Worker { - return true - } - - for _, ca := range mi.ControlAddresses { - if addr == ca { - return true - } - } - - return false -} diff --git a/cmd/lotus-miner/actor_test.go b/cmd/lotus-miner/actor_test.go index dfb4522137c..5f9e923e680 100644 --- a/cmd/lotus-miner/actor_test.go +++ b/cmd/lotus-miner/actor_test.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cli/spcli" "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/node/repo" ) @@ -67,7 +68,7 @@ func TestWorkerKeyChange(t *testing.T) { // Initialize wallet. kit.SendFunds(ctx, t, client1, newKey, abi.NewTokenAmount(0)) - require.NoError(t, run(actorProposeChangeWorker, "--really-do-it", newKey.String())) + require.NoError(t, run(spcli.ActorProposeChangeWorkerCmd(LMActorGetter), "--really-do-it", newKey.String())) result := output.String() output.Reset() @@ -82,12 +83,12 @@ func TestWorkerKeyChange(t *testing.T) { require.NotZero(t, targetEpoch) // Too early. - require.Error(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String())) + require.Error(t, run(spcli.ActorConfirmChangeWorkerCmd(LMActorGetter), "--really-do-it", newKey.String())) output.Reset() client1.WaitTillChain(ctx, kit.HeightAtLeast(abi.ChainEpoch(targetEpoch))) - require.NoError(t, run(actorConfirmChangeWorker, "--really-do-it", newKey.String())) + require.NoError(t, run(spcli.ActorConfirmChangeWorkerCmd(LMActorGetter), "--really-do-it", newKey.String())) output.Reset() head, err := client1.ChainHead(ctx) diff --git a/cmd/lotus-miner/info.go b/cmd/lotus-miner/info.go index 6d8ade340ef..52b230daab0 100644 --- a/cmd/lotus-miner/info.go +++ b/cmd/lotus-miner/info.go @@ -29,6 +29,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/reward" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/cli/spcli" cliutil "github.com/filecoin-project/lotus/cli/util" "github.com/filecoin-project/lotus/journal/alerting" sealing "github.com/filecoin-project/lotus/storage/pipeline" @@ -369,94 +370,6 @@ func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v1api.Full return nil } -type stateMeta struct { - i int - col color.Attribute - state sealing.SectorState -} - -var stateOrder = map[sealing.SectorState]stateMeta{} -var stateList = []stateMeta{ - {col: 39, state: "Total"}, - {col: color.FgGreen, state: sealing.Proving}, - {col: color.FgGreen, state: sealing.Available}, - {col: color.FgGreen, state: sealing.UpdateActivating}, - - {col: color.FgMagenta, state: sealing.ReceiveSector}, - - {col: color.FgBlue, state: sealing.Empty}, - {col: color.FgBlue, state: sealing.WaitDeals}, - {col: color.FgBlue, state: sealing.AddPiece}, - {col: color.FgBlue, state: sealing.SnapDealsWaitDeals}, - {col: color.FgBlue, state: sealing.SnapDealsAddPiece}, - - {col: color.FgRed, state: sealing.UndefinedSectorState}, - {col: color.FgYellow, state: sealing.Packing}, - {col: color.FgYellow, state: sealing.GetTicket}, - {col: color.FgYellow, state: sealing.PreCommit1}, - {col: color.FgYellow, state: sealing.PreCommit2}, - {col: color.FgYellow, state: sealing.PreCommitting}, - {col: color.FgYellow, state: sealing.PreCommitWait}, - {col: color.FgYellow, state: sealing.SubmitPreCommitBatch}, - {col: color.FgYellow, state: sealing.PreCommitBatchWait}, - {col: color.FgYellow, state: sealing.WaitSeed}, - {col: color.FgYellow, state: sealing.Committing}, - {col: color.FgYellow, state: sealing.CommitFinalize}, - {col: color.FgYellow, state: sealing.SubmitCommit}, - {col: color.FgYellow, state: sealing.CommitWait}, - {col: color.FgYellow, state: sealing.SubmitCommitAggregate}, - {col: color.FgYellow, state: sealing.CommitAggregateWait}, - {col: color.FgYellow, state: sealing.FinalizeSector}, - {col: color.FgYellow, state: sealing.SnapDealsPacking}, - {col: color.FgYellow, state: sealing.UpdateReplica}, - {col: color.FgYellow, state: sealing.ProveReplicaUpdate}, - {col: color.FgYellow, state: sealing.SubmitReplicaUpdate}, - {col: color.FgYellow, state: sealing.ReplicaUpdateWait}, - {col: color.FgYellow, state: sealing.WaitMutable}, - {col: color.FgYellow, state: sealing.FinalizeReplicaUpdate}, - {col: color.FgYellow, state: sealing.ReleaseSectorKey}, - - {col: color.FgCyan, state: sealing.Terminating}, - {col: color.FgCyan, state: sealing.TerminateWait}, - {col: color.FgCyan, state: sealing.TerminateFinality}, - {col: color.FgCyan, state: sealing.TerminateFailed}, - {col: color.FgCyan, state: sealing.Removing}, - {col: color.FgCyan, state: sealing.Removed}, - {col: color.FgCyan, state: sealing.AbortUpgrade}, - - {col: color.FgRed, state: sealing.FailedUnrecoverable}, - {col: color.FgRed, state: sealing.AddPieceFailed}, - {col: color.FgRed, state: sealing.SealPreCommit1Failed}, - {col: color.FgRed, state: sealing.SealPreCommit2Failed}, - {col: color.FgRed, state: sealing.PreCommitFailed}, - {col: color.FgRed, state: sealing.ComputeProofFailed}, - {col: color.FgRed, state: sealing.RemoteCommitFailed}, - {col: color.FgRed, state: sealing.CommitFailed}, - {col: color.FgRed, state: sealing.CommitFinalizeFailed}, - {col: color.FgRed, state: sealing.PackingFailed}, - {col: color.FgRed, state: sealing.FinalizeFailed}, - {col: color.FgRed, state: sealing.Faulty}, - {col: color.FgRed, state: sealing.FaultReported}, - {col: color.FgRed, state: sealing.FaultedFinal}, - {col: color.FgRed, state: sealing.RemoveFailed}, - {col: color.FgRed, state: sealing.DealsExpired}, - {col: color.FgRed, state: sealing.RecoverDealIDs}, - {col: color.FgRed, state: sealing.SnapDealsAddPieceFailed}, - {col: color.FgRed, state: sealing.SnapDealsDealsExpired}, - {col: color.FgRed, state: sealing.ReplicaUpdateFailed}, - {col: color.FgRed, state: sealing.ReleaseSectorKeyFailed}, - {col: color.FgRed, state: sealing.FinalizeReplicaUpdateFailed}, -} - -func init() { - for i, state := range stateList { - stateOrder[state.state] = stateMeta{ - i: i, - col: state.col, - } - } -} - func sectorsInfo(ctx context.Context, mapi api.StorageMiner) error { summary, err := mapi.SectorsSummary(ctx) if err != nil { @@ -471,17 +384,17 @@ func sectorsInfo(ctx context.Context, mapi api.StorageMiner) error { } buckets["Total"] = total - var sorted []stateMeta + var sorted []spcli.StateMeta for state, i := range buckets { - sorted = append(sorted, stateMeta{i: i, state: state}) + sorted = append(sorted, spcli.StateMeta{I: i, State: state}) } sort.Slice(sorted, func(i, j int) bool { - return stateOrder[sorted[i].state].i < stateOrder[sorted[j].state].i + return spcli.StateOrder[sorted[i].State].I < spcli.StateOrder[sorted[j].State].I }) for _, s := range sorted { - _, _ = color.New(stateOrder[s.state].col).Printf("\t%s: %d\n", s.state, s.i) + _, _ = color.New(spcli.StateOrder[s.State].Col).Printf("\t%s: %d\n", s.State, s.I) } return nil diff --git a/cmd/lotus-miner/info_all.go b/cmd/lotus-miner/info_all.go index 2cf07385c00..5b83467a2f8 100644 --- a/cmd/lotus-miner/info_all.go +++ b/cmd/lotus-miner/info_all.go @@ -8,6 +8,7 @@ import ( "github.com/urfave/cli/v2" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/cli/spcli" ) var _test = false @@ -82,17 +83,17 @@ var infoAllCmd = &cli.Command{ } fmt.Println("\n#: Proving Info") - if err := provingInfoCmd.Action(cctx); err != nil { + if err := spcli.ProvingInfoCmd(LMActorOrEnvGetter).Action(cctx); err != nil { fmt.Println("ERROR: ", err) } fmt.Println("\n#: Proving Deadlines") - if err := provingDeadlinesCmd.Action(cctx); err != nil { + if err := spcli.ProvingDeadlinesCmd(LMActorOrEnvGetter).Action(cctx); err != nil { fmt.Println("ERROR: ", err) } fmt.Println("\n#: Proving Faults") - if err := provingFaultsCmd.Action(cctx); err != nil { + if err := spcli.ProvingFaultsCmd(LMActorOrEnvGetter).Action(cctx); err != nil { fmt.Println("ERROR: ", err) } @@ -237,7 +238,7 @@ var infoAllCmd = &cli.Command{ fmt.Printf("\n##: Sector %d Status\n", s) fs := &flag.FlagSet{} - for _, f := range sectorsStatusCmd.Flags { + for _, f := range spcli.SectorsStatusCmd(LMActorOrEnvGetter, getOnDiskInfo).Flags { if err := f.Apply(fs); err != nil { fmt.Println("ERROR: ", err) } @@ -246,7 +247,7 @@ var infoAllCmd = &cli.Command{ fmt.Println("ERROR: ", err) } - if err := sectorsStatusCmd.Action(cli.NewContext(cctx.App, fs, cctx)); err != nil { + if err := spcli.SectorsStatusCmd(LMActorOrEnvGetter, getOnDiskInfo).Action(cli.NewContext(cctx.App, fs, cctx)); err != nil { fmt.Println("ERROR: ", err) } diff --git a/cmd/lotus-miner/main.go b/cmd/lotus-miner/main.go index 911e98e260a..1fc7abfa8da 100644 --- a/cmd/lotus-miner/main.go +++ b/cmd/lotus-miner/main.go @@ -197,3 +197,17 @@ func setHidden(cmd *cli.Command) *cli.Command { cmd.Hidden = true return cmd } + +func LMActorOrEnvGetter(cctx *cli.Context) (address.Address, error) { + return getActorAddress(cctx.Context, cctx) +} + +func LMActorGetter(cctx *cli.Context) (address.Address, error) { + minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return address.Undef, err + } + defer closer() + + return minerApi.ActorAddress(cctx.Context) +} diff --git a/cmd/lotus-miner/precommits-info.go b/cmd/lotus-miner/precommits-info.go deleted file mode 100644 index 3f9e8c92742..00000000000 --- a/cmd/lotus-miner/precommits-info.go +++ /dev/null @@ -1,56 +0,0 @@ -package main - -import ( - "fmt" - "sort" - - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/urfave/cli/v2" - - "github.com/filecoin-project/specs-actors/v7/actors/util/adt" - - "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - lcli "github.com/filecoin-project/lotus/cli" -) - -var sectorPreCommitsCmd = &cli.Command{ - Name: "precommits", - Usage: "Print on-chain precommit info", - Action: func(cctx *cli.Context) error { - ctx := lcli.ReqContext(cctx) - mapi, closer, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - maddr, err := getActorAddress(ctx, cctx) - if err != nil { - return err - } - mact, err := mapi.StateGetActor(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(mapi))) - mst, err := miner.Load(store, mact) - if err != nil { - return err - } - preCommitSector := make([]miner.SectorPreCommitOnChainInfo, 0) - err = mst.ForEachPrecommittedSector(func(info miner.SectorPreCommitOnChainInfo) error { - preCommitSector = append(preCommitSector, info) - return err - }) - less := func(i, j int) bool { - return preCommitSector[i].Info.SectorNumber <= preCommitSector[j].Info.SectorNumber - } - sort.Slice(preCommitSector, less) - for _, info := range preCommitSector { - fmt.Printf("%s: %s\n", info.Info.SectorNumber, info.PreCommitEpoch) - } - - return nil - }, -} diff --git a/cmd/lotus-miner/proving.go b/cmd/lotus-miner/proving.go index 575dded5a7c..9048da8e2b3 100644 --- a/cmd/lotus-miner/proving.go +++ b/cmd/lotus-miner/proving.go @@ -1,12 +1,10 @@ package main import ( - "bytes" "encoding/json" "fmt" "os" "strconv" - "strings" "sync" "text/tabwriter" "time" @@ -17,18 +15,13 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/proof" - "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" - cliutil "github.com/filecoin-project/lotus/cli/util" + "github.com/filecoin-project/lotus/cli/spcli" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -36,10 +29,10 @@ var provingCmd = &cli.Command{ Name: "proving", Usage: "View proving information", Subcommands: []*cli.Command{ - provingInfoCmd, - provingDeadlinesCmd, - provingDeadlineInfoCmd, - provingFaultsCmd, + spcli.ProvingInfoCmd(LMActorOrEnvGetter), + spcli.ProvingDeadlinesCmd(LMActorOrEnvGetter), + spcli.ProvingDeadlineInfoCmd(LMActorOrEnvGetter), + spcli.ProvingFaultsCmd(LMActorOrEnvGetter), provingCheckProvableCmd, workersCmd(false), provingComputeCmd, @@ -47,422 +40,6 @@ var provingCmd = &cli.Command{ }, } -var provingFaultsCmd = &cli.Command{ - Name: "faults", - Usage: "View the currently known proving faulty sectors information", - Action: func(cctx *cli.Context) error { - api, acloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(api)) - - maddr, err := getActorAddress(ctx, cctx) - if err != nil { - return err - } - - mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - mas, err := miner.Load(stor, mact) - if err != nil { - return err - } - - fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr)) - - tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) - _, _ = fmt.Fprintln(tw, "deadline\tpartition\tsectors") - err = mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error { - return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error { - faults, err := part.FaultySectors() - if err != nil { - return err - } - return faults.ForEach(func(num uint64) error { - _, _ = fmt.Fprintf(tw, "%d\t%d\t%d\n", dlIdx, partIdx, num) - return nil - }) - }) - }) - if err != nil { - return err - } - return tw.Flush() - }, -} - -var provingInfoCmd = &cli.Command{ - Name: "info", - Usage: "View current state information", - Action: func(cctx *cli.Context) error { - api, acloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - maddr, err := getActorAddress(ctx, cctx) - if err != nil { - return err - } - - head, err := api.ChainHead(ctx) - if err != nil { - return xerrors.Errorf("getting chain head: %w", err) - } - - mact, err := api.StateGetActor(ctx, maddr, head.Key()) - if err != nil { - return err - } - - stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(api)) - - mas, err := miner.Load(stor, mact) - if err != nil { - return err - } - - cd, err := api.StateMinerProvingDeadline(ctx, maddr, head.Key()) - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr)) - - proving := uint64(0) - faults := uint64(0) - recovering := uint64(0) - curDeadlineSectors := uint64(0) - - if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error { - return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error { - if bf, err := part.LiveSectors(); err != nil { - return err - } else if count, err := bf.Count(); err != nil { - return err - } else { - proving += count - if dlIdx == cd.Index { - curDeadlineSectors += count - } - } - - if bf, err := part.FaultySectors(); err != nil { - return err - } else if count, err := bf.Count(); err != nil { - return err - } else { - faults += count - } - - if bf, err := part.RecoveringSectors(); err != nil { - return err - } else if count, err := bf.Count(); err != nil { - return err - } else { - recovering += count - } - - return nil - }) - }); err != nil { - return xerrors.Errorf("walking miner deadlines and partitions: %w", err) - } - - var faultPerc float64 - if proving > 0 { - faultPerc = float64(faults * 100 / proving) - } - - fmt.Printf("Current Epoch: %d\n", cd.CurrentEpoch) - - fmt.Printf("Proving Period Boundary: %d\n", cd.PeriodStart%cd.WPoStProvingPeriod) - fmt.Printf("Proving Period Start: %s\n", cliutil.EpochTimeTs(cd.CurrentEpoch, cd.PeriodStart, head)) - fmt.Printf("Next Period Start: %s\n\n", cliutil.EpochTimeTs(cd.CurrentEpoch, cd.PeriodStart+cd.WPoStProvingPeriod, head)) - - fmt.Printf("Faults: %d (%.2f%%)\n", faults, faultPerc) - fmt.Printf("Recovering: %d\n", recovering) - - fmt.Printf("Deadline Index: %d\n", cd.Index) - fmt.Printf("Deadline Sectors: %d\n", curDeadlineSectors) - fmt.Printf("Deadline Open: %s\n", cliutil.EpochTime(cd.CurrentEpoch, cd.Open)) - fmt.Printf("Deadline Close: %s\n", cliutil.EpochTime(cd.CurrentEpoch, cd.Close)) - fmt.Printf("Deadline Challenge: %s\n", cliutil.EpochTime(cd.CurrentEpoch, cd.Challenge)) - fmt.Printf("Deadline FaultCutoff: %s\n", cliutil.EpochTime(cd.CurrentEpoch, cd.FaultCutoff)) - return nil - }, -} - -var provingDeadlinesCmd = &cli.Command{ - Name: "deadlines", - Usage: "View the current proving period deadlines information", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "all", - Usage: "Count all sectors (only live sectors are counted by default)", - Aliases: []string{"a"}, - }, - }, - Action: func(cctx *cli.Context) error { - api, acloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - maddr, err := getActorAddress(ctx, cctx) - if err != nil { - return err - } - - deadlines, err := api.StateMinerDeadlines(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting deadlines: %w", err) - } - - di, err := api.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting deadlines: %w", err) - } - - head, err := api.ChainHead(ctx) - if err != nil { - return err - } - - fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr)) - - tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) - _, _ = fmt.Fprintln(tw, "deadline\topen\tpartitions\tsectors (faults)\tproven partitions") - - for dlIdx, deadline := range deadlines { - partitions, err := api.StateMinerPartitions(ctx, maddr, uint64(dlIdx), types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting partitions for deadline %d: %w", dlIdx, err) - } - - provenPartitions, err := deadline.PostSubmissions.Count() - if err != nil { - return err - } - - sectors := uint64(0) - faults := uint64(0) - var partitionCount int - - for _, partition := range partitions { - if !cctx.Bool("all") { - sc, err := partition.LiveSectors.Count() - if err != nil { - return err - } - - if sc > 0 { - partitionCount++ - } - - sectors += sc - } else { - sc, err := partition.AllSectors.Count() - if err != nil { - return err - } - - partitionCount++ - sectors += sc - } - - fc, err := partition.FaultySectors.Count() - if err != nil { - return err - } - - faults += fc - } - - var cur string - if di.Index == uint64(dlIdx) { - cur += "\t(current)" - } - - _, _ = fmt.Fprintf(tw, "%d\t%s\t%d\t%d (%d)\t%d%s\n", dlIdx, deadlineOpenTime(head, uint64(dlIdx), di), - partitionCount, sectors, faults, provenPartitions, cur) - } - - return tw.Flush() - }, -} - -func deadlineOpenTime(ts *types.TipSet, dlIdx uint64, di *dline.Info) string { - gapIdx := dlIdx - di.Index - gapHeight := uint64(di.WPoStProvingPeriod) / di.WPoStPeriodDeadlines * gapIdx - - openHeight := di.Open + abi.ChainEpoch(gapHeight) - genesisBlockTimestamp := ts.MinTimestamp() - uint64(ts.Height())*build.BlockDelaySecs - - return time.Unix(int64(genesisBlockTimestamp+build.BlockDelaySecs*uint64(openHeight)), 0).Format(time.TimeOnly) -} - -var provingDeadlineInfoCmd = &cli.Command{ - Name: "deadline", - Usage: "View the current proving period deadline information by its index", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "sector-nums", - Aliases: []string{"n"}, - Usage: "Print sector/fault numbers belonging to this deadline", - }, - &cli.BoolFlag{ - Name: "bitfield", - Aliases: []string{"b"}, - Usage: "Print partition bitfield stats", - }, - }, - ArgsUsage: "", - Action: func(cctx *cli.Context) error { - - if cctx.NArg() != 1 { - return lcli.IncorrectNumArgs(cctx) - } - - dlIdx, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64) - if err != nil { - return xerrors.Errorf("could not parse deadline index: %w", err) - } - - api, acloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - maddr, err := getActorAddress(ctx, cctx) - if err != nil { - return err - } - - deadlines, err := api.StateMinerDeadlines(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting deadlines: %w", err) - } - - di, err := api.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting deadlines: %w", err) - } - - partitions, err := api.StateMinerPartitions(ctx, maddr, dlIdx, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting partitions for deadline %d: %w", dlIdx, err) - } - - head, err := api.ChainHead(ctx) - if err != nil { - return err - } - - provenPartitions, err := deadlines[dlIdx].PostSubmissions.Count() - if err != nil { - return err - } - - fmt.Printf("Deadline Index: %d\n", dlIdx) - fmt.Printf("Deadline Open: %s\n", deadlineOpenTime(head, dlIdx, di)) - fmt.Printf("Partitions: %d\n", len(partitions)) - fmt.Printf("Proven Partitions: %d\n", provenPartitions) - fmt.Printf("Current: %t\n\n", di.Index == dlIdx) - - for pIdx, partition := range partitions { - fmt.Printf("Partition Index: %d\n", pIdx) - - printStats := func(bf bitfield.BitField, name string) error { - count, err := bf.Count() - if err != nil { - return err - } - - rit, err := bf.RunIterator() - if err != nil { - return err - } - - if cctx.Bool("bitfield") { - var ones, zeros, oneRuns, zeroRuns, invalid uint64 - for rit.HasNext() { - r, err := rit.NextRun() - if err != nil { - return xerrors.Errorf("next run: %w", err) - } - if !r.Valid() { - invalid++ - } - if r.Val { - ones += r.Len - oneRuns++ - } else { - zeros += r.Len - zeroRuns++ - } - } - - var buf bytes.Buffer - if err := bf.MarshalCBOR(&buf); err != nil { - return err - } - sz := len(buf.Bytes()) - szstr := types.SizeStr(types.NewInt(uint64(sz))) - - fmt.Printf("\t%s Sectors:%s%d (bitfield - runs %d+%d=%d - %d 0s %d 1s - %d inv - %s %dB)\n", name, strings.Repeat(" ", 18-len(name)), count, zeroRuns, oneRuns, zeroRuns+oneRuns, zeros, ones, invalid, szstr, sz) - } else { - fmt.Printf("\t%s Sectors:%s%d\n", name, strings.Repeat(" ", 18-len(name)), count) - } - - if cctx.Bool("sector-nums") { - nums, err := bf.All(count) - if err != nil { - return err - } - fmt.Printf("\t%s Sector Numbers:%s%v\n", name, strings.Repeat(" ", 12-len(name)), nums) - } - - return nil - } - - if err := printStats(partition.AllSectors, "All"); err != nil { - return err - } - if err := printStats(partition.LiveSectors, "Live"); err != nil { - return err - } - if err := printStats(partition.ActiveSectors, "Active"); err != nil { - return err - } - if err := printStats(partition.FaultySectors, "Faulty"); err != nil { - return err - } - if err := printStats(partition.RecoveringSectors, "Recovering"); err != nil { - return err - } - } - return nil - }, -} - var provingCheckProvableCmd = &cli.Command{ Name: "check", Usage: "Check sectors provable", diff --git a/cmd/lotus-miner/sectors.go b/cmd/lotus-miner/sectors.go index 3e4439eb87a..a3ffb833594 100644 --- a/cmd/lotus-miner/sectors.go +++ b/cmd/lotus-miner/sectors.go @@ -3,10 +3,7 @@ package main import ( "bufio" "encoding/csv" - "encoding/json" - "errors" "fmt" - "math" "os" "sort" "strconv" @@ -23,18 +20,16 @@ import ( "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/cli/spcli" cliutil "github.com/filecoin-project/lotus/cli/util" "github.com/filecoin-project/lotus/lib/result" "github.com/filecoin-project/lotus/lib/strle" @@ -48,16 +43,16 @@ var sectorsCmd = &cli.Command{ Name: "sectors", Usage: "interact with sector store", Subcommands: []*cli.Command{ - sectorsStatusCmd, + spcli.SectorsStatusCmd(LMActorOrEnvGetter, getOnDiskInfo), sectorsListCmd, sectorsRefsCmd, sectorsUpdateCmd, sectorsPledgeCmd, sectorsNumbersCmd, - sectorPreCommitsCmd, - sectorsCheckExpireCmd, + spcli.SectorPreCommitsCmd(LMActorOrEnvGetter), + spcli.SectorsCheckExpireCmd(LMActorOrEnvGetter), sectorsExpiredCmd, - sectorsExtendCmd, + spcli.SectorsExtendCmd(LMActorOrEnvGetter), sectorsTerminateCmd, sectorsRemoveCmd, sectorsSnapUpCmd, @@ -67,11 +62,20 @@ var sectorsCmd = &cli.Command{ sectorsCapacityCollateralCmd, sectorsBatching, sectorsRefreshPieceMatchingCmd, - sectorsCompactPartitionsCmd, + spcli.SectorsCompactPartitionsCmd(LMActorOrEnvGetter), sectorsUnsealCmd, }, } +func getOnDiskInfo(cctx *cli.Context, id abi.SectorNumber, onChainInfo bool) (api.SectorInfo, error) { + minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return api.SectorInfo{}, err + } + defer closer() + return minerApi.SectorsStatus(cctx.Context, id, onChainInfo) +} + var sectorsPledgeCmd = &cli.Command{ Name: "pledge", Usage: "store random data in a sector", @@ -94,187 +98,6 @@ var sectorsPledgeCmd = &cli.Command{ }, } -var sectorsStatusCmd = &cli.Command{ - Name: "status", - Usage: "Get the seal status of a sector by its number", - ArgsUsage: "", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "log", - Usage: "display event log", - Aliases: []string{"l"}, - }, - &cli.BoolFlag{ - Name: "on-chain-info", - Usage: "show sector on chain info", - Aliases: []string{"c"}, - }, - &cli.BoolFlag{ - Name: "partition-info", - Usage: "show partition related info", - Aliases: []string{"p"}, - }, - &cli.BoolFlag{ - Name: "proof", - Usage: "print snark proof bytes as hex", - }, - }, - Action: func(cctx *cli.Context) error { - minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - if cctx.NArg() != 1 { - return lcli.IncorrectNumArgs(cctx) - } - - id, err := strconv.ParseUint(cctx.Args().First(), 10, 64) - if err != nil { - return err - } - - onChainInfo := cctx.Bool("on-chain-info") - status, err := minerApi.SectorsStatus(ctx, abi.SectorNumber(id), onChainInfo) - if err != nil { - return err - } - - fmt.Printf("SectorID:\t%d\n", status.SectorID) - fmt.Printf("Status:\t\t%s\n", status.State) - fmt.Printf("CIDcommD:\t%s\n", status.CommD) - fmt.Printf("CIDcommR:\t%s\n", status.CommR) - fmt.Printf("Ticket:\t\t%x\n", status.Ticket.Value) - fmt.Printf("TicketH:\t%d\n", status.Ticket.Epoch) - fmt.Printf("Seed:\t\t%x\n", status.Seed.Value) - fmt.Printf("SeedH:\t\t%d\n", status.Seed.Epoch) - fmt.Printf("Precommit:\t%s\n", status.PreCommitMsg) - fmt.Printf("Commit:\t\t%s\n", status.CommitMsg) - if cctx.Bool("proof") { - fmt.Printf("Proof:\t\t%x\n", status.Proof) - } - fmt.Printf("Deals:\t\t%v\n", status.Deals) - fmt.Printf("Retries:\t%d\n", status.Retries) - if status.LastErr != "" { - fmt.Printf("Last Error:\t\t%s\n", status.LastErr) - } - - if onChainInfo { - fmt.Printf("\nSector On Chain Info\n") - fmt.Printf("SealProof:\t\t%x\n", status.SealProof) - fmt.Printf("Activation:\t\t%v\n", status.Activation) - fmt.Printf("Expiration:\t\t%v\n", status.Expiration) - fmt.Printf("DealWeight:\t\t%v\n", status.DealWeight) - fmt.Printf("VerifiedDealWeight:\t\t%v\n", status.VerifiedDealWeight) - fmt.Printf("InitialPledge:\t\t%v\n", types.FIL(status.InitialPledge)) - fmt.Printf("\nExpiration Info\n") - fmt.Printf("OnTime:\t\t%v\n", status.OnTime) - fmt.Printf("Early:\t\t%v\n", status.Early) - } - - if cctx.Bool("partition-info") { - fullApi, nCloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer nCloser() - - maddr, err := getActorAddress(ctx, cctx) - if err != nil { - return err - } - - mact, err := fullApi.StateGetActor(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(fullApi), blockstore.NewMemory()) - mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact) - if err != nil { - return err - } - - errFound := errors.New("found") - if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error { - return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error { - pas, err := part.AllSectors() - if err != nil { - return err - } - - set, err := pas.IsSet(id) - if err != nil { - return err - } - if set { - fmt.Printf("\nDeadline:\t%d\n", dlIdx) - fmt.Printf("Partition:\t%d\n", partIdx) - - checkIn := func(name string, bg func() (bitfield.BitField, error)) error { - bf, err := bg() - if err != nil { - return err - } - - set, err := bf.IsSet(id) - if err != nil { - return err - } - setstr := "no" - if set { - setstr = "yes" - } - fmt.Printf("%s: \t%s\n", name, setstr) - return nil - } - - if err := checkIn("Unproven", part.UnprovenSectors); err != nil { - return err - } - if err := checkIn("Live", part.LiveSectors); err != nil { - return err - } - if err := checkIn("Active", part.ActiveSectors); err != nil { - return err - } - if err := checkIn("Faulty", part.FaultySectors); err != nil { - return err - } - if err := checkIn("Recovering", part.RecoveringSectors); err != nil { - return err - } - - return errFound - } - - return nil - }) - }); err != errFound { - if err != nil { - return err - } - - fmt.Println("\nNot found in any partition") - } - } - - if cctx.Bool("log") { - fmt.Printf("--------\nEvent Log:\n") - - for i, l := range status.Log { - fmt.Printf("%d.\t%s:\t[%s]\t%s\n", i, time.Unix(int64(l.Timestamp), 0), l.Kind, l.Message) - if l.Trace != "" { - fmt.Printf("\t%s\n", l.Trace) - } - } - } - return nil - }, -} - var sectorsListCmd = &cli.Command{ Name: "list", Usage: "List sectors", @@ -494,7 +317,7 @@ var sectorsListCmd = &cli.Command{ m := map[string]interface{}{ "ID": s, - "State": color.New(stateOrder[sealing.SectorState(st.State)].col).Sprint(st.State), + "State": color.New(spcli.StateOrder[sealing.SectorState(st.State)].Col).Sprint(st.State), "OnChain": yesno(inSSet), "Active": yesno(inASet), } @@ -778,654 +601,6 @@ var sectorsRefsCmd = &cli.Command{ }, } -var sectorsCheckExpireCmd = &cli.Command{ - Name: "check-expire", - Usage: "Inspect expiring sectors", - Flags: []cli.Flag{ - &cli.Int64Flag{ - Name: "cutoff", - Usage: "skip sectors whose current expiration is more than epochs from now, defaults to 60 days", - Value: 172800, - }, - }, - Action: func(cctx *cli.Context) error { - - fullApi, nCloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer nCloser() - - ctx := lcli.ReqContext(cctx) - - maddr, err := getActorAddress(ctx, cctx) - if err != nil { - return err - } - - head, err := fullApi.ChainHead(ctx) - if err != nil { - return err - } - currEpoch := head.Height() - - nv, err := fullApi.StateNetworkVersion(ctx, types.EmptyTSK) - if err != nil { - return err - } - - sectors, err := fullApi.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - n := 0 - for _, s := range sectors { - if s.Expiration-currEpoch <= abi.ChainEpoch(cctx.Int64("cutoff")) { - sectors[n] = s - n++ - } - } - sectors = sectors[:n] - - sort.Slice(sectors, func(i, j int) bool { - if sectors[i].Expiration == sectors[j].Expiration { - return sectors[i].SectorNumber < sectors[j].SectorNumber - } - return sectors[i].Expiration < sectors[j].Expiration - }) - - tw := tablewriter.New( - tablewriter.Col("ID"), - tablewriter.Col("SealProof"), - tablewriter.Col("InitialPledge"), - tablewriter.Col("Activation"), - tablewriter.Col("Expiration"), - tablewriter.Col("MaxExpiration"), - tablewriter.Col("MaxExtendNow")) - - for _, sector := range sectors { - MaxExpiration := sector.Activation + policy.GetSectorMaxLifetime(sector.SealProof, nv) - maxExtension, err := policy.GetMaxSectorExpirationExtension(nv) - if err != nil { - return xerrors.Errorf("failed to get max extension: %w", err) - } - - MaxExtendNow := currEpoch + maxExtension - - if MaxExtendNow > MaxExpiration { - MaxExtendNow = MaxExpiration - } - - tw.Write(map[string]interface{}{ - "ID": sector.SectorNumber, - "SealProof": sector.SealProof, - "InitialPledge": types.FIL(sector.InitialPledge).Short(), - "Activation": cliutil.EpochTime(currEpoch, sector.Activation), - "Expiration": cliutil.EpochTime(currEpoch, sector.Expiration), - "MaxExpiration": cliutil.EpochTime(currEpoch, MaxExpiration), - "MaxExtendNow": cliutil.EpochTime(currEpoch, MaxExtendNow), - }) - } - - return tw.Flush(os.Stdout) - }, -} - -type PseudoExpirationExtension struct { - Deadline uint64 - Partition uint64 - Sectors string - NewExpiration abi.ChainEpoch -} - -type PseudoExtendSectorExpirationParams struct { - Extensions []PseudoExpirationExtension -} - -func NewPseudoExtendParams(p *miner.ExtendSectorExpiration2Params) (*PseudoExtendSectorExpirationParams, error) { - res := PseudoExtendSectorExpirationParams{} - for _, ext := range p.Extensions { - scount, err := ext.Sectors.Count() - if err != nil { - return nil, err - } - - sectors, err := ext.Sectors.All(scount) - if err != nil { - return nil, err - } - - res.Extensions = append(res.Extensions, PseudoExpirationExtension{ - Deadline: ext.Deadline, - Partition: ext.Partition, - Sectors: ArrayToString(sectors), - NewExpiration: ext.NewExpiration, - }) - } - return &res, nil -} - -// ArrayToString Example: {1,3,4,5,8,9} -> "1,3-5,8-9" -func ArrayToString(array []uint64) string { - sort.Slice(array, func(i, j int) bool { - return array[i] < array[j] - }) - - var sarray []string - s := "" - - for i, elm := range array { - if i == 0 { - s = strconv.FormatUint(elm, 10) - continue - } - if elm == array[i-1] { - continue // filter out duplicates - } else if elm == array[i-1]+1 { - s = strings.Split(s, "-")[0] + "-" + strconv.FormatUint(elm, 10) - } else { - sarray = append(sarray, s) - s = strconv.FormatUint(elm, 10) - } - } - - if s != "" { - sarray = append(sarray, s) - } - - return strings.Join(sarray, ",") -} - -func getSectorsFromFile(filePath string) ([]abi.SectorNumber, error) { - file, err := os.Open(filePath) - if err != nil { - return nil, err - } - - scanner := bufio.NewScanner(file) - sectors := make([]abi.SectorNumber, 0) - - for scanner.Scan() { - line := scanner.Text() - - id, err := strconv.ParseUint(line, 10, 64) - if err != nil { - return nil, xerrors.Errorf("could not parse %s as sector id: %s", line, err) - } - - sectors = append(sectors, abi.SectorNumber(id)) - } - - if err = file.Close(); err != nil { - return nil, err - } - - return sectors, nil -} - -func SectorNumsToBitfield(sectors []abi.SectorNumber) bitfield.BitField { - var numbers []uint64 - for _, sector := range sectors { - numbers = append(numbers, uint64(sector)) - } - - return bitfield.NewFromSet(numbers) -} - -var sectorsExtendCmd = &cli.Command{ - Name: "extend", - Usage: "Extend expiring sectors while not exceeding each sector's max life", - ArgsUsage: "", - Flags: []cli.Flag{ - &cli.Int64Flag{ - Name: "from", - Usage: "only consider sectors whose current expiration epoch is in the range of [from, to], defaults to: now + 120 (1 hour)", - }, - &cli.Int64Flag{ - Name: "to", - Usage: "only consider sectors whose current expiration epoch is in the range of [from, to], defaults to: now + 92160 (32 days)", - }, - &cli.StringFlag{ - Name: "sector-file", - Usage: "provide a file containing one sector number in each line, ignoring above selecting criteria", - }, - &cli.StringFlag{ - Name: "exclude", - Usage: "optionally provide a file containing excluding sectors", - }, - &cli.Int64Flag{ - Name: "extension", - Usage: "try to extend selected sectors by this number of epochs, defaults to 540 days", - Value: 1555200, - }, - &cli.Int64Flag{ - Name: "new-expiration", - Usage: "try to extend selected sectors to this epoch, ignoring extension", - }, - &cli.BoolFlag{ - Name: "only-cc", - Usage: "only extend CC sectors (useful for making sector ready for snap upgrade)", - }, - &cli.BoolFlag{ - Name: "drop-claims", - Usage: "drop claims for sectors that can be extended, but only by dropping some of their verified power claims", - }, - &cli.Int64Flag{ - Name: "tolerance", - Usage: "don't try to extend sectors by fewer than this number of epochs, defaults to 7 days", - Value: 20160, - }, - &cli.StringFlag{ - Name: "max-fee", - Usage: "use up to this amount of FIL for one message. pass this flag to avoid message congestion.", - Value: "0", - }, - &cli.Int64Flag{ - Name: "max-sectors", - Usage: "the maximum number of sectors contained in each message", - }, - &cli.BoolFlag{ - Name: "really-do-it", - Usage: "pass this flag to really extend sectors, otherwise will only print out json representation of parameters", - }, - }, - Action: func(cctx *cli.Context) error { - mf, err := types.ParseFIL(cctx.String("max-fee")) - if err != nil { - return err - } - - spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(mf)} - - fullApi, nCloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer nCloser() - - ctx := lcli.ReqContext(cctx) - - maddr, err := getActorAddress(ctx, cctx) - if err != nil { - return err - } - - head, err := fullApi.ChainHead(ctx) - if err != nil { - return err - } - currEpoch := head.Height() - - nv, err := fullApi.StateNetworkVersion(ctx, types.EmptyTSK) - if err != nil { - return err - } - - activeSet, err := fullApi.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - activeSectorsInfo := make(map[abi.SectorNumber]*miner.SectorOnChainInfo, len(activeSet)) - for _, info := range activeSet { - activeSectorsInfo[info.SectorNumber] = info - } - - mact, err := fullApi.StateGetActor(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(fullApi), blockstore.NewMemory()) - adtStore := adt.WrapStore(ctx, cbor.NewCborStore(tbs)) - mas, err := miner.Load(adtStore, mact) - if err != nil { - return err - } - - activeSectorsLocation := make(map[abi.SectorNumber]*miner.SectorLocation, len(activeSet)) - - if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error { - return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error { - pas, err := part.ActiveSectors() - if err != nil { - return err - } - - return pas.ForEach(func(i uint64) error { - activeSectorsLocation[abi.SectorNumber(i)] = &miner.SectorLocation{ - Deadline: dlIdx, - Partition: partIdx, - } - return nil - }) - }) - }); err != nil { - return err - } - - excludeSet := make(map[abi.SectorNumber]struct{}) - if cctx.IsSet("exclude") { - excludeSectors, err := getSectorsFromFile(cctx.String("exclude")) - if err != nil { - return err - } - - for _, id := range excludeSectors { - excludeSet[id] = struct{}{} - } - } - - var sectors []abi.SectorNumber - if cctx.Args().Present() { - if cctx.IsSet("sector-file") { - return xerrors.Errorf("sector-file specified along with command line params") - } - - for i, s := range cctx.Args().Slice() { - id, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return xerrors.Errorf("could not parse sector %d: %w", i, err) - } - - sectors = append(sectors, abi.SectorNumber(id)) - } - } else if cctx.IsSet("sector-file") { - sectors, err = getSectorsFromFile(cctx.String("sector-file")) - if err != nil { - return err - } - } else { - from := currEpoch + 120 - to := currEpoch + 92160 - - if cctx.IsSet("from") { - from = abi.ChainEpoch(cctx.Int64("from")) - } - - if cctx.IsSet("to") { - to = abi.ChainEpoch(cctx.Int64("to")) - } - - for _, si := range activeSet { - if si.Expiration >= from && si.Expiration <= to { - sectors = append(sectors, si.SectorNumber) - } - } - } - - var sis []*miner.SectorOnChainInfo - for _, id := range sectors { - if _, exclude := excludeSet[id]; exclude { - continue - } - - si, found := activeSectorsInfo[id] - if !found { - return xerrors.Errorf("sector %d is not active", id) - } - if len(si.DealIDs) > 0 && cctx.Bool("only-cc") { - continue - } - - sis = append(sis, si) - } - - withinTolerance := func(a, b abi.ChainEpoch) bool { - diff := a - b - if diff < 0 { - diff = -diff - } - - return diff <= abi.ChainEpoch(cctx.Int64("tolerance")) - } - - extensions := map[miner.SectorLocation]map[abi.ChainEpoch][]abi.SectorNumber{} - for _, si := range sis { - extension := abi.ChainEpoch(cctx.Int64("extension")) - newExp := si.Expiration + extension - - if cctx.IsSet("new-expiration") { - newExp = abi.ChainEpoch(cctx.Int64("new-expiration")) - } - - maxExtension, err := policy.GetMaxSectorExpirationExtension(nv) - if err != nil { - return xerrors.Errorf("failed to get max extension: %w", err) - } - - maxExtendNow := currEpoch + maxExtension - if newExp > maxExtendNow { - newExp = maxExtendNow - } - - maxExp := si.Activation + policy.GetSectorMaxLifetime(si.SealProof, nv) - if newExp > maxExp { - newExp = maxExp - } - - if newExp <= si.Expiration || withinTolerance(newExp, si.Expiration) { - continue - } - - l, found := activeSectorsLocation[si.SectorNumber] - if !found { - return xerrors.Errorf("location for sector %d not found", si.SectorNumber) - } - - es, found := extensions[*l] - if !found { - ne := make(map[abi.ChainEpoch][]abi.SectorNumber) - ne[newExp] = []abi.SectorNumber{si.SectorNumber} - extensions[*l] = ne - } else { - added := false - for exp := range es { - if withinTolerance(newExp, exp) { - es[exp] = append(es[exp], si.SectorNumber) - added = true - break - } - } - - if !added { - es[newExp] = []abi.SectorNumber{si.SectorNumber} - } - } - } - - verifregAct, err := fullApi.StateGetActor(ctx, builtin.VerifiedRegistryActorAddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("failed to lookup verifreg actor: %w", err) - } - - verifregSt, err := verifreg.Load(adtStore, verifregAct) - if err != nil { - return xerrors.Errorf("failed to load verifreg state: %w", err) - } - - claimsMap, err := verifregSt.GetClaims(maddr) - if err != nil { - return xerrors.Errorf("failed to lookup claims for miner: %w", err) - } - - claimIdsBySector, err := verifregSt.GetClaimIdsBySector(maddr) - if err != nil { - return xerrors.Errorf("failed to lookup claim IDs by sector: %w", err) - } - - sectorsMax, err := policy.GetAddressedSectorsMax(nv) - if err != nil { - return err - } - - declMax, err := policy.GetDeclarationsMax(nv) - if err != nil { - return err - } - - addrSectors := sectorsMax - if cctx.Int("max-sectors") != 0 { - addrSectors = cctx.Int("max-sectors") - if addrSectors > sectorsMax { - return xerrors.Errorf("the specified max-sectors exceeds the maximum limit") - } - } - - var params []miner.ExtendSectorExpiration2Params - - p := miner.ExtendSectorExpiration2Params{} - scount := 0 - - for l, exts := range extensions { - for newExp, numbers := range exts { - sectorsWithoutClaimsToExtend := bitfield.New() - var sectorsWithClaims []miner.SectorClaim - for _, sectorNumber := range numbers { - claimIdsToMaintain := make([]verifreg.ClaimId, 0) - claimIdsToDrop := make([]verifreg.ClaimId, 0) - cannotExtendSector := false - claimIds, ok := claimIdsBySector[sectorNumber] - // Nothing to check, add to ccSectors - if !ok { - sectorsWithoutClaimsToExtend.Set(uint64(sectorNumber)) - } else { - for _, claimId := range claimIds { - claim, ok := claimsMap[claimId] - if !ok { - return xerrors.Errorf("failed to find claim for claimId %d", claimId) - } - claimExpiration := claim.TermStart + claim.TermMax - // can be maintained in the extended sector - if claimExpiration > newExp { - claimIdsToMaintain = append(claimIdsToMaintain, claimId) - } else { - sectorInfo, ok := activeSectorsInfo[sectorNumber] - if !ok { - return xerrors.Errorf("failed to find sector in active sector set: %w", err) - } - if !cctx.Bool("drop-claims") || - // FIP-0045 requires the claim minimum duration to have passed - currEpoch <= (claim.TermStart+claim.TermMin) || - // FIP-0045 requires the sector to be in its last 30 days of life - (currEpoch <= sectorInfo.Expiration-builtin.EndOfLifeClaimDropPeriod) { - fmt.Printf("skipping sector %d because claim %d does not live long enough \n", sectorNumber, claimId) - cannotExtendSector = true - break - } - - claimIdsToDrop = append(claimIdsToDrop, claimId) - } - } - if cannotExtendSector { - continue - } - - if len(claimIdsToMaintain)+len(claimIdsToDrop) != 0 { - sectorsWithClaims = append(sectorsWithClaims, miner.SectorClaim{ - SectorNumber: sectorNumber, - MaintainClaims: claimIdsToMaintain, - DropClaims: claimIdsToDrop, - }) - } - } - } - - sectorsWithoutClaimsCount, err := sectorsWithoutClaimsToExtend.Count() - if err != nil { - return xerrors.Errorf("failed to count cc sectors: %w", err) - } - - sectorsInDecl := int(sectorsWithoutClaimsCount) + len(sectorsWithClaims) - scount += sectorsInDecl - - if scount > addrSectors || len(p.Extensions) >= declMax { - params = append(params, p) - p = miner.ExtendSectorExpiration2Params{} - scount = sectorsInDecl - } - - p.Extensions = append(p.Extensions, miner.ExpirationExtension2{ - Deadline: l.Deadline, - Partition: l.Partition, - Sectors: SectorNumsToBitfield(numbers), - SectorsWithClaims: sectorsWithClaims, - NewExpiration: newExp, - }) - - } - } - - // if we have any sectors, then one last append is needed here - if scount != 0 { - params = append(params, p) - } - - if len(params) == 0 { - fmt.Println("nothing to extend") - return nil - } - - mi, err := fullApi.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - stotal := 0 - - for i := range params { - scount := 0 - for _, ext := range params[i].Extensions { - count, err := ext.Sectors.Count() - if err != nil { - return err - } - scount += int(count) - } - fmt.Printf("Extending %d sectors: ", scount) - stotal += scount - - if !cctx.Bool("really-do-it") { - pp, err := NewPseudoExtendParams(¶ms[i]) - if err != nil { - return err - } - - data, err := json.MarshalIndent(pp, "", " ") - if err != nil { - return err - } - - fmt.Println("\n", string(data)) - continue - } - - sp, aerr := actors.SerializeParams(¶ms[i]) - if aerr != nil { - return xerrors.Errorf("serializing params: %w", err) - } - - smsg, err := fullApi.MpoolPushMessage(ctx, &types.Message{ - From: mi.Worker, - To: maddr, - Method: builtin.MethodsMiner.ExtendSectorExpiration2, - Value: big.Zero(), - Params: sp, - }, spec) - if err != nil { - return xerrors.Errorf("mpool push message: %w", err) - } - - fmt.Println(smsg.Cid()) - } - - fmt.Printf("%d sectors extended\n", stotal) - - return nil - }, -} - var sectorsTerminateCmd = &cli.Command{ Name: "terminate", Usage: "Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector)", @@ -2238,175 +1413,6 @@ func yesno(b bool) string { return color.RedString("NO") } -var sectorsCompactPartitionsCmd = &cli.Command{ - Name: "compact-partitions", - Usage: "removes dead sectors from partitions and reduces the number of partitions used if possible", - Flags: []cli.Flag{ - &cli.Uint64Flag{ - Name: "deadline", - Usage: "the deadline to compact the partitions in", - Required: true, - }, - &cli.Int64SliceFlag{ - Name: "partitions", - Usage: "list of partitions to compact sectors in", - Required: true, - }, - &cli.BoolFlag{ - Name: "really-do-it", - Usage: "Actually send transaction performing the action", - Value: false, - }, - &cli.StringFlag{ - Name: "actor", - Usage: "Specify the address of the miner to run this command", - }, - }, - Action: func(cctx *cli.Context) error { - fullNodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer acloser() - - ctx := lcli.ReqContext(cctx) - - maddr, err := getActorAddress(ctx, cctx) - if err != nil { - return err - } - - minfo, err := fullNodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - deadline := cctx.Uint64("deadline") - if deadline > miner.WPoStPeriodDeadlines { - return fmt.Errorf("deadline %d out of range", deadline) - } - - parts := cctx.Int64Slice("partitions") - if len(parts) <= 0 { - return fmt.Errorf("must include at least one partition to compact") - } - fmt.Printf("compacting %d partitions\n", len(parts)) - - var makeMsgForPartitions func(partitionsBf bitfield.BitField) ([]*types.Message, error) - makeMsgForPartitions = func(partitionsBf bitfield.BitField) ([]*types.Message, error) { - params := miner.CompactPartitionsParams{ - Deadline: deadline, - Partitions: partitionsBf, - } - - sp, aerr := actors.SerializeParams(¶ms) - if aerr != nil { - return nil, xerrors.Errorf("serializing params: %w", err) - } - - msg := &types.Message{ - From: minfo.Worker, - To: maddr, - Method: builtin.MethodsMiner.CompactPartitions, - Value: big.Zero(), - Params: sp, - } - - estimatedMsg, err := fullNodeAPI.GasEstimateMessageGas(ctx, msg, nil, types.EmptyTSK) - if err != nil && xerrors.Is(err, &api.ErrOutOfGas{}) { - // the message is too big -- split into 2 - partitionsSlice, err := partitionsBf.All(math.MaxUint64) - if err != nil { - return nil, err - } - - partitions1 := bitfield.New() - for i := 0; i < len(partitionsSlice)/2; i++ { - partitions1.Set(uint64(i)) - } - - msgs1, err := makeMsgForPartitions(partitions1) - if err != nil { - return nil, err - } - - // time for the second half - partitions2 := bitfield.New() - for i := len(partitionsSlice) / 2; i < len(partitionsSlice); i++ { - partitions2.Set(uint64(i)) - } - - msgs2, err := makeMsgForPartitions(partitions2) - if err != nil { - return nil, err - } - - return append(msgs1, msgs2...), nil - } else if err != nil { - return nil, err - } - - return []*types.Message{estimatedMsg}, nil - } - - partitions := bitfield.New() - for _, partition := range parts { - partitions.Set(uint64(partition)) - } - - msgs, err := makeMsgForPartitions(partitions) - if err != nil { - return xerrors.Errorf("failed to make messages: %w", err) - } - - // Actually send the messages if really-do-it provided, simulate otherwise - if cctx.Bool("really-do-it") { - smsgs, err := fullNodeAPI.MpoolBatchPushMessage(ctx, msgs, nil) - if err != nil { - return xerrors.Errorf("mpool push: %w", err) - } - - if len(smsgs) == 1 { - fmt.Printf("Requested compact partitions in message %s\n", smsgs[0].Cid()) - } else { - fmt.Printf("Requested compact partitions in %d messages\n\n", len(smsgs)) - for _, v := range smsgs { - fmt.Println(v.Cid()) - } - } - - for _, v := range smsgs { - wait, err := fullNodeAPI.StateWaitMsg(ctx, v.Cid(), 2) - if err != nil { - return err - } - - // check it executed successfully - if wait.Receipt.ExitCode.IsError() { - fmt.Println(cctx.App.Writer, "compact partitions msg %s failed!", v.Cid()) - return err - } - } - - return nil - } - - for i, v := range msgs { - fmt.Printf("total of %d CompactPartitions msgs would be sent\n", len(msgs)) - - estMsg, err := fullNodeAPI.GasEstimateMessageGas(ctx, v, nil, types.EmptyTSK) - if err != nil { - return err - } - - fmt.Printf("msg %d would cost up to %s\n", i+1, types.FIL(estMsg.RequiredFunds())) - } - - return nil - - }, -} - var sectorsNumbersCmd = &cli.Command{ Name: "numbers", Usage: "manage sector number assignments", diff --git a/cmd/lotus-miner/storage.go b/cmd/lotus-miner/storage.go index fdd5b569656..b39fe2bf750 100644 --- a/cmd/lotus-miner/storage.go +++ b/cmd/lotus-miner/storage.go @@ -27,6 +27,7 @@ import ( "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/cli/spcli" "github.com/filecoin-project/lotus/lib/tablewriter" sealing "github.com/filecoin-project/lotus/storage/pipeline" "github.com/filecoin-project/lotus/storage/sealer/fsutil" @@ -803,7 +804,7 @@ var storageListSectorsCmd = &cli.Command{ "Storage": color.New(sc1).Sprint(e.storage), "Sector": e.id, "Type": e.ft.String(), - "State": color.New(stateOrder[sealing.SectorState(e.state)].col).Sprint(e.state), + "State": color.New(spcli.StateOrder[sealing.SectorState(e.state)].Col).Sprint(e.state), "Primary": maybeStr(e.primary, color.FgGreen, "primary") + maybeStr(e.copy, color.FgBlue, "copy") + maybeStr(e.main, color.FgRed, "main"), "Path use": maybeStr(e.seal, color.FgMagenta, "seal ") + maybeStr(e.store, color.FgCyan, "store"), "URLs": e.urls, @@ -995,7 +996,7 @@ var storageLocks = &cli.Command{ return xerrors.Errorf("getting sector status(%d): %w", lock.Sector.Number, err) } - lockstr := fmt.Sprintf("%d\t%s\t", lock.Sector.Number, color.New(stateOrder[sealing.SectorState(st.State)].col).Sprint(st.State)) + lockstr := fmt.Sprintf("%d\t%s\t", lock.Sector.Number, color.New(spcli.StateOrder[sealing.SectorState(st.State)].Col).Sprint(st.State)) for i := 0; i < storiface.FileTypes; i++ { if lock.Write[i] > 0 { diff --git a/cmd/lotus-shed/adl.go b/cmd/lotus-shed/adl.go new file mode 100644 index 00000000000..762f78b6c82 --- /dev/null +++ b/cmd/lotus-shed/adl.go @@ -0,0 +1,124 @@ +package main + +import ( + "encoding/json" + "fmt" + "io" + "os" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/ipld/go-car" + "github.com/urfave/cli/v2" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var adlCmd = &cli.Command{ + Name: "adl", + Usage: "adl manipulation commands", + Subcommands: []*cli.Command{ + adlAmtCmd, + }, +} + +var adlAmtCmd = &cli.Command{ + Name: "amt", + Usage: "AMT manipulation commands", + Subcommands: []*cli.Command{ + adlAmtGetCmd, + }, +} + +var adlAmtGetCmd = &cli.Command{ + Name: "get", + Usage: "Get an element from an AMT", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "car-file", + Usage: "write a car file with two hamts (use lotus-shed export-car)", + }, + &cli.IntFlag{ + Name: "bitwidth", + Usage: "bitwidth of the HAMT", + Value: 5, + }, + &cli.StringFlag{ + Name: "root", + Usage: "root cid of the HAMT", + }, + &cli.Int64Flag{ + Name: "key", + Usage: "key to get", + }, + }, + Action: func(cctx *cli.Context) error { + bs := blockstore.NewMemorySync() + + f, err := os.Open(cctx.String("car-file")) + if err != nil { + return err + } + defer func(f *os.File) { + _ = f.Close() + }(f) + + cr, err := car.NewCarReader(f) + if err != nil { + return err + } + + for { + blk, err := cr.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + + if err := bs.Put(cctx.Context, blk); err != nil { + return err + } + } + + root, err := cid.Parse(cctx.String("root")) + if err != nil { + return err + } + + m, err := adt13.AsArray(adt.WrapStore(cctx.Context, cbor.NewCborStore(bs)), root, cctx.Int("bitwidth")) + if err != nil { + return err + } + + var out cbg.Deferred + ok, err := m.Get(cctx.Uint64("key"), &out) + if err != nil { + return err + } + if !ok { + return xerrors.Errorf("no such element") + } + + fmt.Printf("RAW: %x\n", out.Raw) + fmt.Println("----") + + var i interface{} + if err := cbor.DecodeInto(out.Raw, &i); err == nil { + ij, err := json.MarshalIndent(i, "", " ") + if err != nil { + return err + } + + fmt.Println(string(ij)) + } + + return nil + }, +} diff --git a/cmd/lotus-shed/diff.go b/cmd/lotus-shed/diff.go index 981dc850c62..a8eac657514 100644 --- a/cmd/lotus-shed/diff.go +++ b/cmd/lotus-shed/diff.go @@ -1,20 +1,31 @@ package main import ( + "bytes" "context" + "encoding/json" "fmt" "io" + "os" + "github.com/fatih/color" "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/ipld/go-car" "github.com/urfave/cli/v2" "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-amt-ipld/v4" + "github.com/filecoin-project/go-hamt-ipld/v3" "github.com/filecoin-project/go-state-types/abi" miner9 "github.com/filecoin-project/go-state-types/builtin/v9/miner" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/lib/must" "github.com/filecoin-project/lotus/node/repo" ) @@ -24,6 +35,8 @@ var diffCmd = &cli.Command{ Subcommands: []*cli.Command{ diffStateTrees, diffMinerStates, + diffHAMTs, + diffAMTs, }, } @@ -64,7 +77,9 @@ var diffMinerStates = &cli.Command{ return err } - defer lkrepo.Close() //nolint:errcheck + defer func(lkrepo repo.LockedRepo) { + _ = lkrepo.Close() + }(lkrepo) bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { @@ -258,3 +273,247 @@ var diffStateTrees = &cli.Command{ return nil }, } + +var diffHAMTs = &cli.Command{ + Name: "hamts", + Usage: "diff two HAMTs", + ArgsUsage: " ", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "car-file", + Usage: "write a car file with two hamts (use lotus-shed export-car)", + }, + &cli.IntFlag{ + Name: "bitwidth", + Usage: "bitwidth of the HAMT", + Value: 5, + }, + &cli.StringFlag{ + Name: "key-type", + Usage: "type of the key", + Value: "uint", + }, + }, + Action: func(cctx *cli.Context) error { + var bs blockstore.Blockstore = blockstore.NewMemorySync() + + if cctx.IsSet("car-file") { + f, err := os.Open(cctx.String("car-file")) + if err != nil { + return err + } + defer func(f *os.File) { + _ = f.Close() + }(f) + + cr, err := car.NewCarReader(f) + if err != nil { + return err + } + + for { + blk, err := cr.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + + if err := bs.Put(cctx.Context, blk); err != nil { + return err + } + } + } else { + // use running node + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return xerrors.Errorf("connect to full node: %w", err) + } + defer closer() + + bs = blockstore.NewAPIBlockstore(api) + } + + cidA, err := cid.Parse(cctx.Args().Get(0)) + if err != nil { + return err + } + + cidB, err := cid.Parse(cctx.Args().Get(1)) + if err != nil { + return err + } + + cst := cbor.NewCborStore(bs) + + var keyParser func(k string) (interface{}, error) + switch cctx.String("key-type") { + case "uint": + keyParser = func(k string) (interface{}, error) { + return abi.ParseUIntKey(k) + } + case "actor": + keyParser = func(k string) (interface{}, error) { + return address.NewFromBytes([]byte(k)) + } + default: + return fmt.Errorf("unknown key type: %s", cctx.String("key-type")) + } + + diffs, err := hamt.Diff(cctx.Context, cst, cst, cidA, cidB, hamt.UseTreeBitWidth(cctx.Int("bitwidth"))) + if err != nil { + return err + } + + for _, d := range diffs { + switch d.Type { + case hamt.Add: + color.Green("+ Add %v", must.One(keyParser(d.Key))) + case hamt.Remove: + color.Red("- Remove %v", must.One(keyParser(d.Key))) + case hamt.Modify: + color.Yellow("~ Modify %v", must.One(keyParser(d.Key))) + } + } + + return nil + }, +} + +var diffAMTs = &cli.Command{ + Name: "amts", + Usage: "diff two AMTs", + ArgsUsage: " ", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "car-file", + Usage: "write a car file with two amts (use lotus-shed export-car)", + }, + &cli.UintFlag{ + Name: "bitwidth", + Usage: "bitwidth of the AMT", + Value: 5, + }, + }, + Action: func(cctx *cli.Context) error { + var bs blockstore.Blockstore = blockstore.NewMemorySync() + + if cctx.IsSet("car-file") { + f, err := os.Open(cctx.String("car-file")) + if err != nil { + return err + } + defer func(f *os.File) { + _ = f.Close() + }(f) + + cr, err := car.NewCarReader(f) + if err != nil { + return err + } + + for { + blk, err := cr.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + + if err := bs.Put(cctx.Context, blk); err != nil { + return err + } + } + } else { + // use running node + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return xerrors.Errorf("connect to full node: %w", err) + } + defer closer() + + bs = blockstore.NewAPIBlockstore(api) + } + + cidA, err := cid.Parse(cctx.Args().Get(0)) + if err != nil { + return err + } + + cidB, err := cid.Parse(cctx.Args().Get(1)) + if err != nil { + return err + } + + cst := cbor.NewCborStore(bs) + + diffs, err := amt.Diff(cctx.Context, cst, cst, cidA, cidB, amt.UseTreeBitWidth(cctx.Uint("bitwidth"))) + if err != nil { + return err + } + + for _, d := range diffs { + switch d.Type { + case amt.Add: + color.Green("+ Add %v", d.Key) + case amt.Remove: + color.Red("- Remove %v", d.Key) + case amt.Modify: + color.Yellow("~ Modify %v", d.Key) + + var vb, va interface{} + err := cbor.DecodeInto(d.Before.Raw, &vb) + if err != nil { + return err + } + err = cbor.DecodeInto(d.After.Raw, &va) + if err != nil { + return err + } + + vjsonb, err := json.MarshalIndent(vb, " ", " ") + if err != nil { + return err + } + vjsona, err := json.MarshalIndent(va, " ", " ") + if err != nil { + return err + } + + linesb := bytes.Split(vjsonb, []byte("\n")) // - + linesa := bytes.Split(vjsona, []byte("\n")) // + + + maxLen := len(linesb) + if len(linesa) > maxLen { + maxLen = len(linesa) + } + + for i := 0; i < maxLen; i++ { + // Check if 'linesb' has run out of lines but 'linesa' hasn't + if i >= len(linesb) && i < len(linesa) { + color.Green("+ %s\n", linesa[i]) + continue + } + // Check if 'linesa' has run out of lines but 'linesb' hasn't + if i >= len(linesa) && i < len(linesb) { + color.Red("- %s\n", linesb[i]) + continue + } + // Compare lines if both slices have lines at index i + if !bytes.Equal(linesb[i], linesa[i]) { + color.Red("- %s\n", linesb[i]) + color.Green("+ %s\n", linesa[i]) + } else { + // Print the line if it is the same in both slices + fmt.Printf(" %s\n", linesb[i]) + } + } + + } + } + + return nil + }, +} diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index e9f9f3b6bd1..4770f714597 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -91,6 +91,7 @@ func main() { FevmAnalyticsCmd, mismatchesCmd, blockCmd, + adlCmd, lpUtilCmd, } diff --git a/cmd/lotus-shed/migrations.go b/cmd/lotus-shed/migrations.go index 96e4747b7ef..febe833d75e 100644 --- a/cmd/lotus-shed/migrations.go +++ b/cmd/lotus-shed/migrations.go @@ -1,27 +1,41 @@ package main import ( + "bytes" "context" + "encoding/json" "fmt" "os" "path/filepath" "strconv" "time" + "github.com/fatih/color" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/exchange/offline" + "github.com/ipfs/boxo/ipld/merkledag" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" + cbornode "github.com/ipfs/go-ipld-cbor" + "github.com/ipld/go-car" "github.com/urfave/cli/v2" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-amt-ipld/v4" + "github.com/filecoin-project/go-hamt-ipld/v3" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" v10 "github.com/filecoin-project/go-state-types/builtin/v10" v11 "github.com/filecoin-project/go-state-types/builtin/v11" v12 "github.com/filecoin-project/go-state-types/builtin/v12" + v13 "github.com/filecoin-project/go-state-types/builtin/v13" + market13 "github.com/filecoin-project/go-state-types/builtin/v13/market" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" market8 "github.com/filecoin-project/go-state-types/builtin/v8/market" adt8 "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" v9 "github.com/filecoin-project/go-state-types/builtin/v9" @@ -53,6 +67,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/lib/must" "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" ) @@ -72,6 +87,9 @@ var migrationsCmd = &cli.Command{ &cli.BoolFlag{ Name: "check-invariants", }, + &cli.StringFlag{ + Name: "export-bad-migration", + }, }, Action: func(cctx *cli.Context) error { fmt.Println("REMINDER: If you are running this, you likely want to ALSO run the continuity testing tool!") @@ -215,6 +233,31 @@ var migrationsCmd = &cli.Command{ cachedMigrationTime := time.Since(startTime) if newCid1 != newCid2 { + { + if err := printStateDiff(ctx, network.Version(nv), newCid2, newCid1, bs); err != nil { + fmt.Println("failed to print state diff: ", err) + } + } + + if cctx.IsSet("export-bad-migration") { + fi, err := os.Create(cctx.String("export-bad-migration")) + if err != nil { + return xerrors.Errorf("opening the output file: %w", err) + } + + defer fi.Close() //nolint:errcheck + + roots := []cid.Cid{newCid1, newCid2} + + dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) + err = car.WriteCarWithWalker(ctx, dag, roots, fi, carWalkFunc) + if err != nil { + return err + } + + fmt.Println("exported bad migration to ", cctx.String("export-bad-migration")) + } + return xerrors.Errorf("got different results with and without the cache: %s, %s", newCid1, newCid2) } @@ -246,6 +289,8 @@ func getMigrationFuncsForNetwork(nv network.Version) (UpgradeActorsFunc, PreUpgr return filcns.UpgradeActorsV11, filcns.PreUpgradeActorsV11, checkNv19Invariants, nil case network.Version21: return filcns.UpgradeActorsV12, filcns.PreUpgradeActorsV12, checkNv21Invariants, nil + case network.Version22: + return filcns.UpgradeActorsV13, filcns.PreUpgradeActorsV13, checkNv22Invariants, nil default: return nil, nil, nil, xerrors.Errorf("migration not implemented for nv%d", nv) } @@ -255,6 +300,357 @@ type UpgradeActorsFunc = func(context.Context, *stmgr.StateManager, stmgr.Migrat type PreUpgradeActorsFunc = func(context.Context, *stmgr.StateManager, stmgr.MigrationCache, cid.Cid, abi.ChainEpoch, *types.TipSet) error type CheckInvariantsFunc = func(context.Context, cid.Cid, cid.Cid, blockstore.Blockstore, abi.ChainEpoch) error +func printStateDiff(ctx context.Context, nv network.Version, newCid1, newCid2 cid.Cid, bs blockstore.Blockstore) error { + // migration diff + var sra, srb types.StateRoot + cst := cbornode.NewCborStore(bs) + + if err := cst.Get(ctx, newCid1, &sra); err != nil { + return err + } + if err := cst.Get(ctx, newCid2, &srb); err != nil { + return err + } + + if sra.Version != srb.Version { + fmt.Println("state root versions do not match: ", sra.Version, srb.Version) + } + if sra.Info != srb.Info { + fmt.Println("state root infos do not match: ", sra.Info, srb.Info) + } + if sra.Actors != srb.Actors { + fmt.Println("state root actors do not match: ", sra.Actors, srb.Actors) + if err := printActorsDiff(ctx, cst, nv, sra.Actors, srb.Actors); err != nil { + return err + } + } + + return nil +} + +func printActorsDiff(ctx context.Context, cst *cbornode.BasicIpldStore, nv network.Version, a, b cid.Cid) error { + // actor diff, a b are a hamt + + diffs, err := hamt.Diff(ctx, cst, cst, a, b, hamt.UseTreeBitWidth(builtin.DefaultHamtBitwidth)) + if err != nil { + return err + } + + keyParser := func(k string) (interface{}, error) { + return address.NewFromBytes([]byte(k)) + } + + for _, d := range diffs { + switch d.Type { + case hamt.Add: + color.Green("+ Add %v", must.One(keyParser(d.Key))) + case hamt.Remove: + color.Red("- Remove %v", must.One(keyParser(d.Key))) + case hamt.Modify: + addr := must.One(keyParser(d.Key)).(address.Address) + color.Yellow("~ Modify %v", addr) + var aa, bb types.ActorV5 + + if err := aa.UnmarshalCBOR(bytes.NewReader(d.Before.Raw)); err != nil { + return err + } + if err := bb.UnmarshalCBOR(bytes.NewReader(d.After.Raw)); err != nil { + return err + } + + if err := printActorDiff(ctx, cst, nv, addr, aa, bb); err != nil { + return err + } + } + } + + return nil +} + +func printActorDiff(ctx context.Context, cst *cbornode.BasicIpldStore, nv network.Version, addr address.Address, a, b types.ActorV5) error { + if a.Code != b.Code { + fmt.Println(" Code: ", a.Code, b.Code) + } + if a.Head != b.Head { + fmt.Println(" Head: ", a.Head, b.Head) + } + if a.Nonce != b.Nonce { + fmt.Println(" Nonce: ", a.Nonce, b.Nonce) + } + if big.Cmp(a.Balance, b.Balance) == 0 { + fmt.Println(" Balance: ", a.Balance, b.Balance) + } + + switch addr.String() { + case "f05": + if err := printMarketActorDiff(ctx, cst, nv, a.Head, b.Head); err != nil { + return err + } + default: + fmt.Println("no logic to diff actor state for ", addr) + } + + return nil +} + +func printMarketActorDiff(ctx context.Context, cst *cbornode.BasicIpldStore, nv network.Version, a, b cid.Cid) error { + if nv != network.Version22 { + return xerrors.Errorf("market actor diff not implemented for nv%d", nv) + } + + var ma, mb market13.State + if err := cst.Get(ctx, a, &ma); err != nil { + return err + } + if err := cst.Get(ctx, b, &mb); err != nil { + return err + } + + if ma.Proposals != mb.Proposals { + fmt.Println(" Proposals: ", ma.Proposals, mb.Proposals) + } + if ma.States != mb.States { + fmt.Println(" States: ", ma.States, mb.States) + + // diff the AMTs + amtDiff, err := amt.Diff(ctx, cst, cst, ma.States, mb.States, amt.UseTreeBitWidth(market13.StatesAmtBitwidth)) + if err != nil { + return err + } + + proposalsArrA, err := adt13.AsArray(adt8.WrapStore(ctx, cst), ma.Proposals, market13.ProposalsAmtBitwidth) + if err != nil { + return err + } + proposalsArrB, err := adt13.AsArray(adt8.WrapStore(ctx, cst), mb.Proposals, market13.ProposalsAmtBitwidth) + if err != nil { + return err + } + + for _, d := range amtDiff { + switch d.Type { + case amt.Add: + color.Green(" state + Add %v", d.Key) + case amt.Remove: + color.Red(" state - Remove %v", d.Key) + case amt.Modify: + color.Yellow(" state ~ Modify %v", d.Key) + + var a, b market13.DealState + if err := a.UnmarshalCBOR(bytes.NewReader(d.Before.Raw)); err != nil { + return err + } + if err := b.UnmarshalCBOR(bytes.NewReader(d.After.Raw)); err != nil { + return err + } + + ja, err := json.Marshal(a) + if err != nil { + return err + } + jb, err := json.Marshal(b) + if err != nil { + return err + } + + fmt.Println(" A: ", string(ja)) + fmt.Println(" B: ", string(jb)) + + var propA, propB market13.DealProposal + + if _, err := proposalsArrA.Get(d.Key, &propA); err != nil { + return err + } + if _, err := proposalsArrB.Get(d.Key, &propB); err != nil { + return err + } + + pab, err := json.Marshal(propA) + if err != nil { + return err + } + pbb, err := json.Marshal(propB) + if err != nil { + return err + } + if string(pab) != string(pbb) { + fmt.Println(" PropA: ", string(pab)) + fmt.Println(" PropB: ", string(pbb)) + } else { + fmt.Println(" Prop: ", string(pab)) + } + + } + } + } + if ma.PendingProposals != mb.PendingProposals { + fmt.Println(" PendingProposals: ", ma.PendingProposals, mb.PendingProposals) + } + if ma.EscrowTable != mb.EscrowTable { + fmt.Println(" EscrowTable: ", ma.EscrowTable, mb.EscrowTable) + } + if ma.LockedTable != mb.LockedTable { + fmt.Println(" LockedTable: ", ma.LockedTable, mb.LockedTable) + } + if ma.NextID != mb.NextID { + fmt.Println(" NextID: ", ma.NextID, mb.NextID) + } + if ma.DealOpsByEpoch != mb.DealOpsByEpoch { + fmt.Println(" DealOpsByEpoch: ", ma.DealOpsByEpoch, mb.DealOpsByEpoch) + } + if ma.LastCron != mb.LastCron { + fmt.Println(" LastCron: ", ma.LastCron, mb.LastCron) + } + if ma.TotalClientLockedCollateral != mb.TotalClientLockedCollateral { + fmt.Println(" TotalClientLockedCollateral: ", ma.TotalClientLockedCollateral, mb.TotalClientLockedCollateral) + } + if ma.TotalProviderLockedCollateral != mb.TotalProviderLockedCollateral { + fmt.Println(" TotalProviderLockedCollateral: ", ma.TotalProviderLockedCollateral, mb.TotalProviderLockedCollateral) + } + if ma.TotalClientStorageFee != mb.TotalClientStorageFee { + fmt.Println(" TotalClientStorageFee: ", ma.TotalClientStorageFee, mb.TotalClientStorageFee) + } + if ma.PendingDealAllocationIds != mb.PendingDealAllocationIds { + fmt.Println(" PendingDealAllocationIds: ", ma.PendingDealAllocationIds, mb.PendingDealAllocationIds) + } + if ma.ProviderSectors != mb.ProviderSectors { + fmt.Println(" ProviderSectors: ", ma.ProviderSectors, mb.ProviderSectors) + + // diff the HAMTs + hamtDiff, err := hamt.Diff(ctx, cst, cst, ma.ProviderSectors, mb.ProviderSectors, hamt.UseTreeBitWidth(market13.ProviderSectorsHamtBitwidth)) + if err != nil { + return err + } + + for _, d := range hamtDiff { + spIDk := must.One(abi.ParseUIntKey(d.Key)) + + switch d.Type { + case hamt.Add: + color.Green(" ProviderSectors + Add f0%v", spIDk) + + var b cbg.CborCid + if err := b.UnmarshalCBOR(bytes.NewReader(d.After.Raw)); err != nil { + return err + } + + fmt.Println(" |-B: ", cid.Cid(b).String()) + + inner, err := adt13.AsMap(adt8.WrapStore(ctx, cst), cid.Cid(b), market13.ProviderSectorsHamtBitwidth) + if err != nil { + return err + } + + var ids market13.SectorDealIDs + err = inner.ForEach(&ids, func(k string) error { + sectorNumber := must.One(abi.ParseUIntKey(k)) + + color.Green(" |-- ProviderSectors + Add %v", sectorNumber) + fmt.Printf(" |+: %v\n", ids) + + return nil + }) + if err != nil { + return err + } + case hamt.Remove: + color.Red(" ProviderSectors - Remove f0%v", spIDk) + case hamt.Modify: + color.Yellow(" ProviderSectors ~ Modify f0%v", spIDk) + + var a, b cbg.CborCid + if err := a.UnmarshalCBOR(bytes.NewReader(d.Before.Raw)); err != nil { + return err + } + if err := b.UnmarshalCBOR(bytes.NewReader(d.After.Raw)); err != nil { + return err + } + + fmt.Println(" |-A: ", cid.Cid(b).String()) + fmt.Println(" |-B: ", cid.Cid(a).String()) + + // diff the inner HAMTs + innerHamtDiff, err := hamt.Diff(ctx, cst, cst, cid.Cid(a), cid.Cid(b), hamt.UseTreeBitWidth(market13.ProviderSectorsHamtBitwidth)) + if err != nil { + return err + } + + for _, d := range innerHamtDiff { + sectorNumber := must.One(abi.ParseUIntKey(d.Key)) + + switch d.Type { + case hamt.Add: + var b market13.SectorDealIDs + + if err := b.UnmarshalCBOR(bytes.NewReader(d.After.Raw)); err != nil { + return err + } + + color.Green(" |-- ProviderSectors + Add %v", sectorNumber) + fmt.Printf(" |B: %v\n", b) + case hamt.Remove: + var a market13.SectorDealIDs + + if err := a.UnmarshalCBOR(bytes.NewReader(d.Before.Raw)); err != nil { + return err + } + + color.Red(" |-- ProviderSectors - Remove %v", sectorNumber) + fmt.Printf(" |A: %v\n", a) + case hamt.Modify: + var a, b market13.SectorDealIDs + + if err := a.UnmarshalCBOR(bytes.NewReader(d.Before.Raw)); err != nil { + return err + } + if err := b.UnmarshalCBOR(bytes.NewReader(d.After.Raw)); err != nil { + return err + } + + color.Yellow(" |-- ProviderSectors ~ Modify %v", sectorNumber) + fmt.Printf(" |A: %v\n", a) + fmt.Printf(" |B: %v\n", b) + } + } + } + } + } + + return nil +} + +func checkNv22Invariants(ctx context.Context, oldStateRootCid cid.Cid, newStateRootCid cid.Cid, bs blockstore.Blockstore, epoch abi.ChainEpoch) error { + + actorStore := store.ActorStore(ctx, bs) + startTime := time.Now() + + // Load the new state root. + var newStateRoot types.StateRoot + if err := actorStore.Get(ctx, newStateRootCid, &newStateRoot); err != nil { + return xerrors.Errorf("failed to decode state root: %w", err) + } + + actorCodeCids, err := actors.GetActorCodeIDs(actorstypes.Version13) + if err != nil { + return err + } + newActorTree, err := builtin.LoadTree(actorStore, newStateRoot.Actors) + if err != nil { + return err + } + messages, err := v13.CheckStateInvariants(newActorTree, epoch, actorCodeCids) + if err != nil { + return xerrors.Errorf("checking state invariants: %w", err) + } + + for _, message := range messages.Messages() { + fmt.Println("got the following error: ", message) + } + + fmt.Println("completed invariant checks, took ", time.Since(startTime)) + + return nil +} func checkNv21Invariants(ctx context.Context, oldStateRootCid cid.Cid, newStateRootCid cid.Cid, bs blockstore.Blockstore, epoch abi.ChainEpoch) error { actorStore := store.ActorStore(ctx, bs) diff --git a/cmd/lotus-shed/sectors.go b/cmd/lotus-shed/sectors.go index 899e0f290b4..176f232fe6a 100644 --- a/cmd/lotus-shed/sectors.go +++ b/cmd/lotus-shed/sectors.go @@ -31,6 +31,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/cli/spcli" "github.com/filecoin-project/lotus/lib/parmap" "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/storage/paths" @@ -44,7 +45,7 @@ var sectorsCmd = &cli.Command{ Usage: "Tools for interacting with sectors", Flags: []cli.Flag{}, Subcommands: []*cli.Command{ - terminateSectorCmd, + spcli.TerminateSectorCmd(shedGetActor), terminateSectorPenaltyEstimationCmd, visAllocatedSectorsCmd, dumpRLESectorCmd, @@ -53,138 +54,14 @@ var sectorsCmd = &cli.Command{ }, } -var terminateSectorCmd = &cli.Command{ - Name: "terminate", - Usage: "Forcefully terminate a sector (WARNING: This means losing power and pay a one-time termination penalty(including collateral) for the terminated sector)", - ArgsUsage: "[sectorNum1 sectorNum2 ...]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "actor", - Usage: "specify the address of miner actor", - }, - &cli.BoolFlag{ - Name: "really-do-it", - Usage: "pass this flag if you know what you are doing", - }, - &cli.StringFlag{ - Name: "from", - Usage: "specify the address to send the terminate message from", - }, - }, - Action: func(cctx *cli.Context) error { - if cctx.NArg() < 1 { - return lcli.ShowHelp(cctx, fmt.Errorf("at least one sector must be specified")) - } - - var maddr address.Address - if act := cctx.String("actor"); act != "" { - var err error - maddr, err = address.NewFromString(act) - if err != nil { - return fmt.Errorf("parsing address %s: %w", act, err) - } - } - - if !cctx.Bool("really-do-it") { - return fmt.Errorf("this is a command for advanced users, only use it if you are sure of what you are doing") - } - - nodeApi, closer, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - if maddr.Empty() { - minerApi, acloser, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer acloser() - - maddr, err = minerApi.ActorAddress(ctx) - if err != nil { - return err - } - } - - mi, err := nodeApi.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return err - } - - terminationDeclarationParams := []miner2.TerminationDeclaration{} - - for _, sn := range cctx.Args().Slice() { - sectorNum, err := strconv.ParseUint(sn, 10, 64) - if err != nil { - return fmt.Errorf("could not parse sector number: %w", err) - } - - sectorbit := bitfield.New() - sectorbit.Set(sectorNum) - - loca, err := nodeApi.StateSectorPartition(ctx, maddr, abi.SectorNumber(sectorNum), types.EmptyTSK) - if err != nil { - return fmt.Errorf("get state sector partition %s", err) - } - - para := miner2.TerminationDeclaration{ - Deadline: loca.Deadline, - Partition: loca.Partition, - Sectors: sectorbit, - } - - terminationDeclarationParams = append(terminationDeclarationParams, para) - } - - terminateSectorParams := &miner2.TerminateSectorsParams{ - Terminations: terminationDeclarationParams, - } - - sp, err := actors.SerializeParams(terminateSectorParams) - if err != nil { - return xerrors.Errorf("serializing params: %w", err) - } - - var fromAddr address.Address - if from := cctx.String("from"); from != "" { - var err error - fromAddr, err = address.NewFromString(from) - if err != nil { - return fmt.Errorf("parsing address %s: %w", from, err) - } - } else { - fromAddr = mi.Worker - } - - smsg, err := nodeApi.MpoolPushMessage(ctx, &types.Message{ - From: fromAddr, - To: maddr, - Method: builtin.MethodsMiner.TerminateSectors, - - Value: big.Zero(), - Params: sp, - }, nil) - if err != nil { - return xerrors.Errorf("mpool push message: %w", err) - } - - fmt.Println("sent termination message:", smsg.Cid()) - - wait, err := nodeApi.StateWaitMsg(ctx, smsg.Cid(), uint64(cctx.Int("confidence"))) - if err != nil { - return err - } - - if wait.Receipt.ExitCode.IsError() { - return fmt.Errorf("terminate sectors message returned exit %d", wait.Receipt.ExitCode) - } +func shedGetActor(cctx *cli.Context) (address.Address, error) { + minerApi, acloser, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return address.Address{}, err + } + defer acloser() - return nil - }, + return minerApi.ActorAddress(cctx.Context) } func findPenaltyInInternalExecutions(prefix string, trace []types.ExecutionTrace) { diff --git a/cmd/lotus/main.go b/cmd/lotus/main.go index 85324e466b5..fce9a6136f4 100644 --- a/cmd/lotus/main.go +++ b/cmd/lotus/main.go @@ -13,6 +13,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/cli/clicommands" cliutil "github.com/filecoin-project/lotus/cli/util" "github.com/filecoin-project/lotus/lib/lotuslog" "github.com/filecoin-project/lotus/lib/tracing" @@ -112,7 +113,7 @@ func main() { return nil }, - Commands: append(local, lcli.Commands...), + Commands: append(local, clicommands.Commands...), } app.Setup() diff --git a/cmd/sptool/actor.go b/cmd/sptool/actor.go new file mode 100644 index 00000000000..fb0d5e96655 --- /dev/null +++ b/cmd/sptool/actor.go @@ -0,0 +1,140 @@ +package main + +import ( + "fmt" + "os" + "strings" + + "github.com/fatih/color" + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/go-address" + + builtin2 "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/cli/spcli" + "github.com/filecoin-project/lotus/lib/tablewriter" +) + +var actorCmd = &cli.Command{ + Name: "actor", + Usage: "Manage Filecoin Miner Actor Metadata", + Subcommands: []*cli.Command{ + spcli.ActorSetAddrsCmd(SPTActorGetter), + spcli.ActorWithdrawCmd(SPTActorGetter), + spcli.ActorRepayDebtCmd(SPTActorGetter), + spcli.ActorSetPeeridCmd(SPTActorGetter), + spcli.ActorSetOwnerCmd(SPTActorGetter), + spcli.ActorControlCmd(SPTActorGetter, actorControlListCmd(SPTActorGetter)), + spcli.ActorProposeChangeWorkerCmd(SPTActorGetter), + spcli.ActorConfirmChangeWorkerCmd(SPTActorGetter), + spcli.ActorCompactAllocatedCmd(SPTActorGetter), + spcli.ActorProposeChangeBeneficiaryCmd(SPTActorGetter), + spcli.ActorConfirmChangeBeneficiaryCmd(SPTActorGetter), + }, +} + +func actorControlListCmd(getActor spcli.ActorAddressGetter) *cli.Command { + return &cli.Command{ + Name: "list", + Usage: "Get currently set control addresses. Note: This excludes most roles as they are not known to the immediate chain state.", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "verbose", + }, + }, + Action: func(cctx *cli.Context) error { + api, acloser, err := lcli.GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer acloser() + + ctx := lcli.ReqContext(cctx) + + maddr, err := getActor(cctx) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + tw := tablewriter.New( + tablewriter.Col("name"), + tablewriter.Col("ID"), + tablewriter.Col("key"), + tablewriter.Col("use"), + tablewriter.Col("balance"), + ) + + post := map[address.Address]struct{}{} + + for _, ca := range mi.ControlAddresses { + post[ca] = struct{}{} + } + + printKey := func(name string, a address.Address) { + var actor *types.Actor + if actor, err = api.StateGetActor(ctx, a, types.EmptyTSK); err != nil { + fmt.Printf("%s\t%s: error getting actor: %s\n", name, a, err) + return + } + b := actor.Balance + + var k = a + // 'a' maybe a 'robust', in that case, 'StateAccountKey' returns an error. + if builtin2.IsAccountActor(actor.Code) { + if k, err = api.StateAccountKey(ctx, a, types.EmptyTSK); err != nil { + fmt.Printf("%s\t%s: error getting account key: %s\n", name, a, err) + return + } + } + kstr := k.String() + if !cctx.Bool("verbose") { + if len(kstr) > 9 { + kstr = kstr[:6] + "..." + } + } + + bstr := types.FIL(b).String() + switch { + case b.LessThan(types.FromFil(10)): + bstr = color.RedString(bstr) + case b.LessThan(types.FromFil(50)): + bstr = color.YellowString(bstr) + default: + bstr = color.GreenString(bstr) + } + + var uses []string + if a == mi.Worker { + uses = append(uses, color.YellowString("other")) + } + if _, ok := post[a]; ok { + uses = append(uses, color.GreenString("post")) + } + + tw.Write(map[string]interface{}{ + "name": name, + "ID": a, + "key": kstr, + "use": strings.Join(uses, " "), + "balance": bstr, + }) + } + + printKey("owner", mi.Owner) + printKey("worker", mi.Worker) + printKey("beneficiary", mi.Beneficiary) + for i, ca := range mi.ControlAddresses { + printKey(fmt.Sprintf("control-%d", i), ca) + } + + return tw.Flush(os.Stdout) + }, + } +} diff --git a/cmd/sptool/main.go b/cmd/sptool/main.go new file mode 100644 index 00000000000..7970b8db37c --- /dev/null +++ b/cmd/sptool/main.go @@ -0,0 +1,84 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + + logging "github.com/ipfs/go-log/v2" + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/cli/spcli" +) + +var log = logging.Logger("sptool") + +func main() { + local := []*cli.Command{ + actorCmd, + spcli.InfoCmd(SPTActorGetter), + sectorsCmd, + provingCmd, + //multiSigCmd, + } + + app := &cli.App{ + Name: "sptool", + Usage: "Manage Filecoin Miner Actor", + Version: build.UserVersion(), + Commands: local, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + EnvVars: []string{"LOTUS_PATH"}, + Hidden: true, + Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME + }, + &cli.StringFlag{ + Name: "log-level", + Value: "info", + }, + &cli.StringFlag{ + Name: "actor", + Required: os.Getenv("LOTUS_DOCS_GENERATION") != "1", + Usage: "miner actor to manage", + EnvVars: []string{"SP_ADDRESS"}, + }, + }, + Before: func(cctx *cli.Context) error { + return logging.SetLogLevel("sptool", cctx.String("sptool")) + }, + } + + // terminate early on ctrl+c + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + ctx, cancel := context.WithCancel(context.Background()) + go func() { + <-c + cancel() + fmt.Println("Received interrupt, shutting down... Press CTRL+C again to force shutdown") + <-c + fmt.Println("Forcing stop") + os.Exit(1) + }() + + if err := app.RunContext(ctx, os.Args); err != nil { + log.Errorf("%+v", err) + os.Exit(1) + return + } + +} + +func SPTActorGetter(cctx *cli.Context) (address.Address, error) { + addr, err := address.NewFromString(cctx.String("actor")) + if err != nil { + return address.Undef, fmt.Errorf("parsing address: %w", err) + } + return addr, nil +} diff --git a/cmd/sptool/proving.go b/cmd/sptool/proving.go new file mode 100644 index 00000000000..87c67b5f4e5 --- /dev/null +++ b/cmd/sptool/proving.go @@ -0,0 +1,18 @@ +package main + +import ( + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/lotus/cli/spcli" +) + +var provingCmd = &cli.Command{ + Name: "proving", + Usage: "View proving information", + Subcommands: []*cli.Command{ + spcli.ProvingInfoCmd(SPTActorGetter), + spcli.ProvingDeadlinesCmd(SPTActorGetter), + spcli.ProvingDeadlineInfoCmd(SPTActorGetter), + spcli.ProvingFaultsCmd(SPTActorGetter), + }, +} diff --git a/cmd/sptool/sector.go b/cmd/sptool/sector.go new file mode 100644 index 00000000000..8f33053d3c0 --- /dev/null +++ b/cmd/sptool/sector.go @@ -0,0 +1,356 @@ +package main + +import ( + "fmt" + "os" + "sort" + + "github.com/docker/go-units" + "github.com/fatih/color" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/cli/spcli" + cliutil "github.com/filecoin-project/lotus/cli/util" + "github.com/filecoin-project/lotus/lib/tablewriter" +) + +var sectorsCmd = &cli.Command{ + Name: "sectors", + Usage: "interact with sector store", + Subcommands: []*cli.Command{ + spcli.SectorsStatusCmd(SPTActorGetter, nil), + sectorsListCmd, // in-house b/c chain-only is so different. Needs Curio *web* implementation + spcli.SectorPreCommitsCmd(SPTActorGetter), + spcli.SectorsCheckExpireCmd(SPTActorGetter), + sectorsExpiredCmd, // in-house b/c chain-only is so different + spcli.SectorsExtendCmd(SPTActorGetter), + //spcli.SectorsTerminateCmd(SPTActorGetter), // Could not trace through the state-machine + spcli.SectorsCompactPartitionsCmd(SPTActorGetter), + }} + +var sectorsExpiredCmd = &cli.Command{ + Name: "expired", + Usage: "Get or cleanup expired sectors", + Flags: []cli.Flag{ + &cli.Int64Flag{ + Name: "expired-epoch", + Usage: "epoch at which to check sector expirations", + DefaultText: "WinningPoSt lookback epoch", + }, + }, + Action: func(cctx *cli.Context) error { + fullApi, nCloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return xerrors.Errorf("getting fullnode api: %w", err) + } + defer nCloser() + ctx := lcli.ReqContext(cctx) + + head, err := fullApi.ChainHead(ctx) + if err != nil { + return xerrors.Errorf("getting chain head: %w", err) + } + + lbEpoch := abi.ChainEpoch(cctx.Int64("expired-epoch")) + if !cctx.IsSet("expired-epoch") { + nv, err := fullApi.StateNetworkVersion(ctx, head.Key()) + if err != nil { + return xerrors.Errorf("getting network version: %w", err) + } + + lbEpoch = head.Height() - policy.GetWinningPoStSectorSetLookback(nv) + if lbEpoch < 0 { + return xerrors.Errorf("too early to terminate sectors") + } + } + + if cctx.IsSet("confirm-remove-count") && !cctx.IsSet("expired-epoch") { + return xerrors.Errorf("--expired-epoch must be specified with --confirm-remove-count") + } + + lbts, err := fullApi.ChainGetTipSetByHeight(ctx, lbEpoch, head.Key()) + if err != nil { + return xerrors.Errorf("getting lookback tipset: %w", err) + } + + maddr, err := SPTActorGetter(cctx) + if err != nil { + return xerrors.Errorf("getting actor address: %w", err) + } + + // toCheck is a working bitfield which will only contain terminated sectors + toCheck := bitfield.New() + { + sectors, err := fullApi.StateMinerSectors(ctx, maddr, nil, lbts.Key()) + if err != nil { + return xerrors.Errorf("getting sector on chain info: %w", err) + } + + for _, sector := range sectors { + if sector.Expiration <= lbts.Height() { + toCheck.Set(uint64(sector.SectorNumber)) + } + } + } + + mact, err := fullApi.StateGetActor(ctx, maddr, lbts.Key()) + if err != nil { + return err + } + + tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(fullApi), blockstore.NewMemory()) + mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact) + if err != nil { + return err + } + + alloc, err := mas.GetAllocatedSectors() + if err != nil { + return xerrors.Errorf("getting allocated sectors: %w", err) + } + + // only allocated sectors can be expired, + toCheck, err = bitfield.IntersectBitField(toCheck, *alloc) + if err != nil { + return xerrors.Errorf("intersecting bitfields: %w", err) + } + + if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error { + return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error { + live, err := part.LiveSectors() + if err != nil { + return err + } + + toCheck, err = bitfield.SubtractBitField(toCheck, live) + if err != nil { + return err + } + + unproven, err := part.UnprovenSectors() + if err != nil { + return err + } + + toCheck, err = bitfield.SubtractBitField(toCheck, unproven) + + return err + }) + }); err != nil { + return err + } + + err = mas.ForEachPrecommittedSector(func(pci miner.SectorPreCommitOnChainInfo) error { + toCheck.Unset(uint64(pci.Info.SectorNumber)) + return nil + }) + if err != nil { + return err + } + + // toCheck now only contains sectors which either failed to precommit or are expired/terminated + fmt.Printf("Sectors that either failed to precommit or are expired/terminated:\n") + + err = toCheck.ForEach(func(u uint64) error { + fmt.Println(abi.SectorNumber(u)) + + return nil + }) + if err != nil { + return err + } + + return nil + }, +} + +var sectorsListCmd = &cli.Command{ + Name: "list", + Usage: "List sectors", + Flags: []cli.Flag{ + /* + &cli.BoolFlag{ + Name: "show-removed", + Usage: "show removed sectors", + Aliases: []string{"r"}, + }, + &cli.BoolFlag{ + Name: "fast", + Usage: "don't show on-chain info for better performance", + Aliases: []string{"f"}, + }, + &cli.BoolFlag{ + Name: "events", + Usage: "display number of events the sector has received", + Aliases: []string{"e"}, + }, + &cli.BoolFlag{ + Name: "initial-pledge", + Usage: "display initial pledge", + Aliases: []string{"p"}, + }, + &cli.BoolFlag{ + Name: "seal-time", + Usage: "display how long it took for the sector to be sealed", + Aliases: []string{"t"}, + }, + &cli.StringFlag{ + Name: "states", + Usage: "filter sectors by a comma-separated list of states", + }, + &cli.BoolFlag{ + Name: "unproven", + Usage: "only show sectors which aren't in the 'Proving' state", + Aliases: []string{"u"}, + }, + */ + }, + Subcommands: []*cli.Command{ + //sectorsListUpgradeBoundsCmd, + }, + Action: func(cctx *cli.Context) error { + fullApi, closer2, err := lcli.GetFullNodeAPI(cctx) // TODO: consider storing full node address in config + if err != nil { + return err + } + defer closer2() + + ctx := lcli.ReqContext(cctx) + + maddr, err := SPTActorGetter(cctx) + if err != nil { + return err + } + + head, err := fullApi.ChainHead(ctx) + if err != nil { + return err + } + + activeSet, err := fullApi.StateMinerActiveSectors(ctx, maddr, head.Key()) + if err != nil { + return err + } + activeIDs := make(map[abi.SectorNumber]struct{}, len(activeSet)) + for _, info := range activeSet { + activeIDs[info.SectorNumber] = struct{}{} + } + + sset, err := fullApi.StateMinerSectors(ctx, maddr, nil, head.Key()) + if err != nil { + return err + } + commitedIDs := make(map[abi.SectorNumber]struct{}, len(sset)) + for _, info := range sset { + commitedIDs[info.SectorNumber] = struct{}{} + } + + sort.Slice(sset, func(i, j int) bool { + return sset[i].SectorNumber < sset[j].SectorNumber + }) + + tw := tablewriter.New( + tablewriter.Col("ID"), + tablewriter.Col("State"), + tablewriter.Col("OnChain"), + tablewriter.Col("Active"), + tablewriter.Col("Expiration"), + tablewriter.Col("SealTime"), + tablewriter.Col("Events"), + tablewriter.Col("Deals"), + tablewriter.Col("DealWeight"), + tablewriter.Col("VerifiedPower"), + tablewriter.Col("Pledge"), + tablewriter.NewLineCol("Error"), + tablewriter.NewLineCol("RecoveryTimeout")) + + fast := cctx.Bool("fast") + + for _, st := range sset { + s := st.SectorNumber + + _, inSSet := commitedIDs[s] + _, inASet := activeIDs[s] + + const verifiedPowerGainMul = 9 + dw, vp := .0, .0 + { + rdw := big.Add(st.DealWeight, st.VerifiedDealWeight) + dw = float64(big.Div(rdw, big.NewInt(int64(st.Expiration-st.Activation))).Uint64()) + vp = float64(big.Div(big.Mul(st.VerifiedDealWeight, big.NewInt(verifiedPowerGainMul)), big.NewInt(int64(st.Expiration-st.Activation))).Uint64()) + } + + var deals int + for _, deal := range st.DealIDs { + if deal != 0 { + deals++ + } + } + + exp := st.Expiration + // if st.OnTime > 0 && st.OnTime < exp { + // exp = st.OnTime // Can be different when the sector was CC upgraded + // } + + m := map[string]interface{}{ + "ID": s, + //"State": color.New(spcli.StateOrder[sealing.SectorState(st.State)].Col).Sprint(st.State), + "OnChain": yesno(inSSet), + "Active": yesno(inASet), + } + + if deals > 0 { + m["Deals"] = color.GreenString("%d", deals) + } else { + m["Deals"] = color.BlueString("CC") + // if st.ToUpgrade { + // m["Deals"] = color.CyanString("CC(upgrade)") + // } + } + + if !fast { + if !inSSet { + m["Expiration"] = "n/a" + } else { + m["Expiration"] = cliutil.EpochTime(head.Height(), exp) + // if st.Early > 0 { + // m["RecoveryTimeout"] = color.YellowString(cliutil.EpochTime(head.Height(), st.Early)) + // } + } + if inSSet && cctx.Bool("initial-pledge") { + m["Pledge"] = types.FIL(st.InitialPledge).Short() + } + } + + if !fast && deals > 0 { + m["DealWeight"] = units.BytesSize(dw) + if vp > 0 { + m["VerifiedPower"] = color.GreenString(units.BytesSize(vp)) + } + } + + tw.Write(m) + } + + return tw.Flush(os.Stdout) + }, +} + +func yesno(b bool) string { + if b { + return color.GreenString("YES") + } + return color.RedString("NO") +} diff --git a/curiosrc/ffi/sdr_funcs.go b/curiosrc/ffi/sdr_funcs.go index 52d90f70d36..74a3270cf73 100644 --- a/curiosrc/ffi/sdr_funcs.go +++ b/curiosrc/ffi/sdr_funcs.go @@ -80,7 +80,26 @@ func (l *storageProvider) AcquireSector(ctx context.Context, taskID *harmonytask paths = resv.Paths storageIDs = resv.PathIDs releaseStorage = resv.Release + + if len(existing.AllSet()) > 0 { + // there are some "existing" files in the reservation. Some of them may need fetching, so call l.storage.AcquireSector + // (which unlike in the reservation code will be called on the paths.Remote instance) to ensure that the files are + // present locally. Note that we do not care about 'allocate' reqeuests, those files don't exist, and are just + // proposed paths with a reservation of space. + + _, checkPathIDs, err := l.storage.AcquireSector(ctx, sector, existing, storiface.FTNone, sealing, storiface.AcquireMove, storiface.AcquireInto(storiface.PathsWithIDs{Paths: paths, IDs: storageIDs})) + if err != nil { + return storiface.SectorPaths{}, nil, xerrors.Errorf("acquire reserved existing files: %w", err) + } + + // assert that checkPathIDs is the same as storageIDs + if storageIDs.Subset(existing) != checkPathIDs.Subset(existing) { + return storiface.SectorPaths{}, nil, xerrors.Errorf("acquire reserved existing files: pathIDs mismatch %#v != %#v", storageIDs, checkPathIDs) + } + } } else { + // No related reservation, acquire storage as usual + var err error paths, storageIDs, err = l.storage.AcquireSector(ctx, sector, existing, allocate, sealing, storiface.AcquireMove) if err != nil { @@ -142,20 +161,7 @@ func (sb *SealCalls) GenerateSDR(ctx context.Context, taskID harmonytask.TaskID, return nil } -func (sb *SealCalls) TreeD(ctx context.Context, sector storiface.SectorRef, size abi.PaddedPieceSize, data io.Reader, unpaddedData bool) (cid.Cid, error) { - maybeUns := storiface.FTNone - // todo sectors with data - - paths, releaseSector, err := sb.sectors.AcquireSector(ctx, nil, sector, storiface.FTCache, maybeUns, storiface.PathSealing) - if err != nil { - return cid.Undef, xerrors.Errorf("acquiring sector paths: %w", err) - } - defer releaseSector() - - return proof.BuildTreeD(data, unpaddedData, filepath.Join(paths.Cache, proofpaths.TreeDName), size) -} - -func (sb *SealCalls) TreeRC(ctx context.Context, sector storiface.SectorRef, unsealed cid.Cid) (cid.Cid, cid.Cid, error) { +func (sb *SealCalls) TreeDRC(ctx context.Context, sector storiface.SectorRef, unsealed cid.Cid, size abi.PaddedPieceSize, data io.Reader, unpaddedData bool) (cid.Cid, cid.Cid, error) { p1o, err := sb.makePhase1Out(unsealed, sector.ProofType) if err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("make phase1 output: %w", err) @@ -167,6 +173,15 @@ func (sb *SealCalls) TreeRC(ctx context.Context, sector storiface.SectorRef, uns } defer releaseSector() + treeDUnsealed, err := proof.BuildTreeD(data, unpaddedData, filepath.Join(paths.Cache, proofpaths.TreeDName), size) + if err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("building tree-d: %w", err) + } + + if treeDUnsealed != unsealed { + return cid.Undef, cid.Undef, xerrors.Errorf("tree-d cid mismatch with supplied unsealed cid") + } + { // create sector-sized file at paths.Sealed; PC2 transforms it into a sealed sector in-place ssize, err := sector.ProofType.SectorSize() @@ -210,7 +225,16 @@ func (sb *SealCalls) TreeRC(ctx context.Context, sector storiface.SectorRef, uns } } - return ffi.SealPreCommitPhase2(p1o, paths.Cache, paths.Sealed) + sl, uns, err := ffi.SealPreCommitPhase2(p1o, paths.Cache, paths.Sealed) + if err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("computing seal proof: %w", err) + } + + if uns != unsealed { + return cid.Undef, cid.Undef, xerrors.Errorf("unsealed cid changed after sealing") + } + + return sl, uns, nil } func (sb *SealCalls) GenerateSynthPoRep() { diff --git a/curiosrc/ffi/task_storage.go b/curiosrc/ffi/task_storage.go index ddc5e00a3b4..30dd904179e 100644 --- a/curiosrc/ffi/task_storage.go +++ b/curiosrc/ffi/task_storage.go @@ -10,7 +10,6 @@ import ( "github.com/filecoin-project/lotus/lib/harmony/harmonytask" "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/must" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -111,6 +110,11 @@ func (t *TaskStorage) HasCapacity() bool { } func (t *TaskStorage) Claim(taskID int) error { + // TaskStorage Claim Attempts to reserve storage for the task + // A: Create a reservation for files to be allocated + // B: Create a reservation for existing files to be fetched into local storage + // C: Create a reservation for existing files in local storage which may be extended (e.g. sector cache when computing Trees) + ctx := context.Background() sectorRef, err := t.taskToSectorRef(harmonytask.TaskID(taskID)) @@ -121,7 +125,7 @@ func (t *TaskStorage) Claim(taskID int) error { // storage writelock sector lkctx, cancel := context.WithCancel(ctx) - allocate := storiface.FTCache + requestedTypes := t.alloc | t.existing lockAcquireTimuout := time.Second * 10 lockAcquireTimer := time.NewTimer(lockAcquireTimuout) @@ -135,7 +139,7 @@ func (t *TaskStorage) Claim(taskID int) error { } }() - if err := t.sc.sectors.sindex.StorageLock(lkctx, sectorRef.ID(), storiface.FTNone, allocate); err != nil { + if err := t.sc.sectors.sindex.StorageLock(lkctx, sectorRef.ID(), storiface.FTNone, requestedTypes); err != nil { // timer will expire return xerrors.Errorf("claim StorageLock: %w", err) } @@ -149,39 +153,18 @@ func (t *TaskStorage) Claim(taskID int) error { lockAcquireTimer.Reset(0) }() - // find anywhere - // if found return nil, for now - s, err := t.sc.sectors.sindex.StorageFindSector(ctx, sectorRef.ID(), allocate, must.One(sectorRef.RegSealProof.SectorSize()), false) - if err != nil { - return xerrors.Errorf("claim StorageFindSector: %w", err) - } - - lp, err := t.sc.sectors.localStore.Local(ctx) - if err != nil { - return err - } - - // see if there are any non-local sector files in storage - for _, info := range s { - for _, l := range lp { - if l.ID == info.ID { - continue - } - - // TODO: Create reservation for fetching; This will require quite a bit more refactoring, but for now we'll - // only care about new allocations - return nil - } - } - - // acquire a path to make a reservation in - pathsFs, pathIDs, err := t.sc.sectors.localStore.AcquireSector(ctx, sectorRef.Ref(), storiface.FTNone, allocate, storiface.PathSealing, storiface.AcquireMove) + // First see what we have locally. We are putting allocate and existing together because local acquire will look + // for existing files for allocate requests, separately existing files which aren't found locally will be need to + // be fetched, so we will need to create reservations for that too. + // NOTE localStore.AcquireSector does not open or create any files, nor does it reserve space. It only proposes + // paths to be used. + pathsFs, pathIDs, err := t.sc.sectors.localStore.AcquireSector(ctx, sectorRef.Ref(), storiface.FTNone, requestedTypes, t.pathType, storiface.AcquireMove) if err != nil { return err } // reserve the space - release, err := t.sc.sectors.localStore.Reserve(ctx, sectorRef.Ref(), allocate, pathIDs, storiface.FSOverheadSeal) + release, err := t.sc.sectors.localStore.Reserve(ctx, sectorRef.Ref(), requestedTypes, pathIDs, storiface.FSOverheadSeal) if err != nil { return err } diff --git a/curiosrc/seal/task_submit_precommit.go b/curiosrc/seal/task_submit_precommit.go index 9cc8d446b6b..0f896cb93e1 100644 --- a/curiosrc/seal/task_submit_precommit.go +++ b/curiosrc/seal/task_submit_precommit.go @@ -28,6 +28,7 @@ import ( ) type SubmitPrecommitTaskApi interface { + ChainHead(context.Context) (*types.TipSet, error) StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (big.Int, error) StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) @@ -59,6 +60,8 @@ func NewSubmitPrecommitTask(sp *SealPoller, db *harmonydb.DB, api SubmitPrecommi func (s *SubmitPrecommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { ctx := context.Background() + // 1. Load sector info + var sectorParamsArr []struct { SpID int64 `db:"sp_id"` SectorNumber int64 `db:"sector_number"` @@ -96,6 +99,8 @@ func (s *SubmitPrecommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bo return false, xerrors.Errorf("parsing unsealed CID: %w", err) } + // 2. Prepare message params + params := miner.PreCommitSectorBatchParams2{} expiration := sectorParams.TicketEpoch + miner12.MaxSectorExpirationExtension @@ -157,6 +162,26 @@ func (s *SubmitPrecommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bo params.Sectors[0].Expiration = minExpiration } + // 3. Check precommit + + { + record, err := s.checkPrecommit(ctx, params) + if err != nil { + if record { + _, perr := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline + SET failed = TRUE, failed_at = NOW(), failed_reason = 'precommit-check', failed_reason_msg = $1 + WHERE task_id_precommit_msg = $2`, err.Error(), taskID) + if perr != nil { + return false, xerrors.Errorf("persisting precommit check error: %w", perr) + } + } + + return record, xerrors.Errorf("checking precommit: %w", err) + } + } + + // 4. Prepare and send message + var pbuf bytes.Buffer if err := params.MarshalCBOR(&pbuf); err != nil { return false, xerrors.Errorf("serializing params: %w", err) @@ -210,6 +235,29 @@ func (s *SubmitPrecommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bo return true, nil } +func (s *SubmitPrecommitTask) checkPrecommit(ctx context.Context, params miner.PreCommitSectorBatchParams2) (record bool, err error) { + if len(params.Sectors) != 1 { + return false, xerrors.Errorf("expected 1 sector") + } + + preCommitInfo := params.Sectors[0] + + head, err := s.api.ChainHead(ctx) + if err != nil { + return false, xerrors.Errorf("getting chain head: %w", err) + } + height := head.Height() + + //never commit P2 message before, check ticket expiration + ticketEarliest := height - policy.MaxPreCommitRandomnessLookback + + if preCommitInfo.SealRandEpoch < ticketEarliest { + return true, xerrors.Errorf("ticket expired: seal height: %d, head: %d", preCommitInfo.SealRandEpoch+policy.SealRandomnessLookback, height) + } + + return true, nil +} + func (s *SubmitPrecommitTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { id := ids[0] return &id, nil diff --git a/curiosrc/seal/task_trees.go b/curiosrc/seal/task_trees.go index fa22f8d4a3d..ccf9ab5aa37 100644 --- a/curiosrc/seal/task_trees.go +++ b/curiosrc/seal/task_trees.go @@ -186,22 +186,12 @@ func (t *TreesTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done ProofType: sectorParams.RegSealProof, } - // D - treeUnsealed, err := t.sc.TreeD(ctx, sref, abi.PaddedPieceSize(ssize), dataReader, unpaddedData) - if err != nil { - return false, xerrors.Errorf("computing tree d: %w", err) - } - - // R / C - sealed, unsealed, err := t.sc.TreeRC(ctx, sref, commd) + // D / R / C + sealed, unsealed, err := t.sc.TreeDRC(ctx, sref, commd, abi.PaddedPieceSize(ssize), dataReader, unpaddedData) if err != nil { return false, xerrors.Errorf("computing tree r and c: %w", err) } - if unsealed != treeUnsealed { - return false, xerrors.Errorf("tree-d and tree-r/c unsealed CIDs disagree") - } - // todo synth porep // todo porep challenge check @@ -228,13 +218,19 @@ func (t *TreesTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.Task } func (t *TreesTask) TypeDetails() harmonytask.TaskTypeDetails { + ssize := abi.SectorSize(32 << 30) // todo task details needs taskID to get correct sector size + if isDevnet { + ssize = abi.SectorSize(2 << 20) + } + return harmonytask.TaskTypeDetails{ Max: t.max, Name: "SDRTrees", Cost: resources.Resources{ - Cpu: 1, - Gpu: 1, - Ram: 8000 << 20, // todo + Cpu: 1, + Gpu: 1, + Ram: 8000 << 20, // todo + Storage: t.sc.Storage(t.taskToSector, storiface.FTSealed, storiface.FTCache, ssize, storiface.PathSealing), }, MaxFailures: 3, Follows: nil, @@ -245,6 +241,21 @@ func (t *TreesTask) Adder(taskFunc harmonytask.AddTaskFunc) { t.sp.pollers[pollerTrees].Set(taskFunc) } +func (t *TreesTask) taskToSector(id harmonytask.TaskID) (ffi.SectorRef, error) { + var refs []ffi.SectorRef + + err := t.db.Select(context.Background(), &refs, `SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_tree_r = $1`, id) + if err != nil { + return ffi.SectorRef{}, xerrors.Errorf("getting sector ref: %w", err) + } + + if len(refs) != 1 { + return ffi.SectorRef{}, xerrors.Errorf("expected 1 sector ref, got %d", len(refs)) + } + + return refs[0], nil +} + type UrlPieceReader struct { Url string RawSize int64 // the exact number of bytes read, if we read more or less that's an error diff --git a/documentation/en/actor-events-api.md b/documentation/en/actor-events-api.md new file mode 100644 index 00000000000..801d101b59d --- /dev/null +++ b/documentation/en/actor-events-api.md @@ -0,0 +1,430 @@ +# Actor Events and Lotus APIs + +* [Background](#background) +* [ActorEvent structure](#actorevent-structure) +* [Querying Lotus for ActorEvents](#querying-lotus-for-actorevents) +* [Retrieving events from message receipts](#retrieving-events-from-message-receipts) +* [Current builtin actor event schemas](#current-builtin-actor-event-schemas) + * [Verified registry actor events](#verified-registry-actor-events) + * [Verifier balance](#verifier-balance) + * [Allocation](#allocation) + * [Allocation removed](#allocation-removed) + * [Claim](#claim) + * [Claim updated](#claim-updated) + * [Claim removed](#claim-removed) + * [Market actor events](#market-actor-events) + * [Deal published](#deal-published) + * [Deal activated](#deal-activated) + * [Deal terminated](#deal-terminated) + * [Deal completed](#deal-completed) + * [Miner actor events](#miner-actor-events) + * [Sector precommitted](#sector-precommitted) + * [Sector activated](#sector-activated) + * [Sector updated](#sector-updated) + * [Sector terminated](#sector-terminated) + +## Background + +Actor events are a fire-and-forget mechanism for actors in Filecoin to signal events that occur during execution of their methods to external observers. Actor events are intended to be used by tooling and applications that need to observe and react to events that occur within the chain. The events themselves are not stored in chain state, although a root CID for an array (AMT) of all events emitted for a single message is recorded on message receipts, which are themselves referenced as an array (AMT) in the `ParentMessageReceipts` in each `BlockHeader` of a tipset. A node may optionally retain historical events for querying, but this is not guaranteed and not essential as it does not affect the chain state. + +The FVM already has this capability and new events for builtin actors have been added to support a range of new features, starting at network version 22 with a focus on some information gaps for consumers of data onboarding activity insight due to the introduction of [Direct Data Onboarding (DDO)](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0076.md), plus some additional events related to data onboarding, deal lifecycles, sector lifecycles, and DataCap activity. Additional events are expected to be added in the future to support other features and use cases. + +Builtin actor events share basic similarities to the existing events emitted by user-programmed actors in FVM, but each have a specific schema that reflects their specific concerns. They also all use CBOR encoding for their values. There are also new APIs in Lotus to support querying for these events that bear some similarities to the existing FEVM `Eth*` APIs for querying events but are unique to builtin actors. + +## ActorEvent structure + +Introduced in [FIP-0049](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0049.md), events use a structured logging style of composition, containing a list of entries that define properties of the event. The log entries are described below as `EventEntry` and have the same schema for user-programmed and builtin actor events. `ActorEvent` is specifically for representing builtin actor events and includes the list of entries, the actor that emitted the event, and some metadata about the event. + +```ipldsch +type ActorEvent struct { + entries [EventEntry] # Event entries in log form. + emitter Address # Filecoin address of the actor that emitted this event. + # Reverted is set to true if the message that produced this event was reverted because of a + # network re-org in that case, the event should be considered as reverted as well. + reverted Bool + height ChainEpoch # Height of the tipset that contained the message that produced this event. + tipsetCid &Any # CID of the tipset that contained the message that produced this event. + msgCid &Any # CID of message that produced this event. +} + +type EventEntry struct { + flags Int # A bitmap conveying metadata or hints about this entry. + key String # The key of this entry. + codec Int # The value's IPLD codec. + value Bytes # The value of this entry as a byte string, encoded with 'codec'. +} +``` + +A `flags` field is used to convey metadata or hints about the entry, currently this is used to provide an indication of the suitability of that field for indexing. Suitability for indexing is only a hint, and typically relates to the queriability of the content of that field. + +* A `flag` of `0x00` indicates that neither the key nor value are suitable for indexing. +* A `flag` of `0x01` indicates that the key only is suitable for indexing. +* A `flag` of `0x02` indicates that the value is suitable for indexing. +* A `flag` of `0x03` indicates that both the key and value are suitable for indexing. + +Typically events contain entires that use either use `0x01` or `0x03` flags. + +The structured logging style of composition should be seen in contrast to an alternative representation as a plain map or struct where the keys represent the fields of the event and the values represent the values of those fields. Some entries may duplicate keys, in which case that particular field of the event could be represented as an array. Builtin actor events are sufficiently well defined that translation to such a format is possible, but left up to the user. + +## Querying Lotus for ActorEvents + +Two Lotus APIs are provided that can be used to obtain direct access to events stored on the node being queried (a node may not have all historical events stored and available for query): + +- **[`GetActorEventsRaw`](https://github.com/filecoin-project/lotus/blob/master/documentation/en/api-v1-unstable-methods.md#GetActorEventsRaw)** will return all available historical actor events that match a given *filter* argument. +- **[`SubscribeActorEventsRaw`](https://github.com/filecoin-project/lotus/blob/master/documentation/en/api-v1-unstable-methods.md#SubscribeActorEventsRaw)** will return a long-lived stream providing all available actor events that match a given *filter* argument as they are generated. Optionally also providing a list of historical events. This API is available via websocket from the Lotus API RPC. + +Both APIs take an `EventFilter` as an argument to determine which events to return. This event filter optionally comprises the following: + +- `fromEpoch` determines when to start looking for matching events, either an epoch (in hex form), the string `earliest` or `latest` . A node is not guaranteed to have historical blocks for a particular epoch however `earliest` is intended to provide events from the begining of the available list. +- `toEpoch` determines when to stop looking for matching events, either an epoch (in hex form), the string `earliest` or `latest`. +- `addresses` will match a list of addresses that an event comes *from* (currently just a builtin actor address). +- `fields` is a key to value mapping that matches specific event entries. Each field being matched is a property in the `fields` map and the value of that property is an array of maps, where each entry is a possible matching value for that entry. Each possible match contains a `codec` integer (currently just CBOR `0x51` for builtin actor events described in this document) and a `value` bytes blob (Base64 encoded) of the encoded field value (e.g. a Base64 encoded form of a CBOR encoded key string, such as an actor ID or an event ID). Matching first involves finding if an event’s entries contain one of the desired `key`s, then checking that one of the value matchers for that `key` field matches the value. Value matching is performed both on the `codec` and the `value` bytes. If an event’s entry is matched, the entire event is considered a match. This may be used to query for particular event types, such as `allocation`. +An example `fields` with a single matcher would look like: `"fields": { "abc": [{ "codec": 81, "value": "ZGRhdGE=" }]}` where the key being matched is `abc` with the CBOR codec (`0x51` = `81`) and value is the unicode string `data` encoded as CBOR (then encoded in Base64 to supply to the filter). +- `tipsetCid` matches a particular TipSet. If this is provided, both `fromBlock` and `toBlock` will be ignored. + +Described as an [IPLD Schema](https://ipld.io/docs/schemas/), the event filter is: + +```ipldsch +type EventFilter struct { + fromEpoch optional String + toEpoch optional String + addresses optional [Address] + fields optional {String:[ActorEventValue]} + tipsetCid optional &Any +} + +type Address string # Address of an actor + +type ActorEventValue struct { + codec Int # typically the CBOR codec (0x51) + value Bytes # typically the CBOR encoded value +} +``` + +## Retrieving events from message receipts + +The Lotus API `ChainGetEvents` can be used to retrieve events given an event root CID. This CID is attached to the message receipt that generated the events. The `StateSearchMsg` API can be used to retrieve the message receipt given a message CID, the receipt contains the `EventsRoot` CID. The events returned from `ChainGetEvents` contain roughly the same information as the `ActorEvent` structure, including the `EventEntry` log array. + +## Current builtin actor event schemas + +Schemas for currently implemented builtin actor events are provided below. They follow the log structure, where each line in the schema table represents an `EventEntry` in the `ActorEvent` entry list. For simplicity, the `flags` are presented as either `k` for `0x01` (index key) or `kv` for `0x03` (index key and value) and the `codec` is always `0x51` for builtin actors so is omitted. + +_Note that the "bigint" CBOR encoding format used below is the same as is used for encoding bigints on the Filecoin chain: a byte array representing a big-endian unsigned integer, compatible with the Golang `big.Int` byte representation, with a `0x00` (positive) or `0x01` (negative) prefix; with a zero-length array representing a value of `0`._ + +The following events are defined in FIP-0083. Additional events will be added here as they are accepted by FIP. + +### Verified registry actor events + +#### Verifier balance + +The `verifier-balance` event is emitted when the balance of a verifier is updated in the Verified Registry actor. + +| Key | Value | Flags | +|-------------|-------------------------------------|-------| +| `"$type"` | `"verifier-balance"` (string) | kv | +| `"verifier"`| (int) | kv | +| `"balance"` | (bigint) | k | + +In structured form, this event would look like: + +```ipldsch +type DataCap Bytes # A bigint representing a DataCap + +type VerifierBalanceEvent struct { + verifier Int + balance DataCap +} +``` + +#### Allocation + +The `allocation` event is emitted when a verified client allocates DataCap to a specific data piece and storage provider. + +| Key | Value | Flags | +| ------------ | ----------------------- | ----- | +| `"$type"` | `"allocation"` (string) | kv | +| `"id"` | (int) | kv | +| `"client"` | (int) | kv | +| `"provider"` | (int) | kv | + +In structured form, this event would look like: + +```ipldsch +type AllocationEvent struct { + id Int + client Int + provider Int +} +``` + +#### Allocation removed + +The `allocation-removed` event is emitted when a DataCap allocation that is past its expiration epoch is removed. + +| Key | Value | Flags | +| ------------ | ------------------------------- | ----- | +| `"$type"` | `"allocation-removed"` (string) | kv | +| `"id"` | (int) | kv | +| `"client"` | (int) | kv | +| `"provider"` | (int) | kv | + +In structured form, this event would look like: + +```ipldsch +type AllocationRemovedEvent struct { + id Int + client Int + provider Int +} +``` + +#### Claim + +The `claim` event is emitted when a client allocation is claimed by a storage provider after the corresponding verified data is provably committed to the chain. + +| Key | Value | Flags | +| ------------ | ----------------------- | ----- | +| `"$type"` | `"claim"` (string) | kv | +| `"id"` | (int) | kv | +| `"client"` | (int) | kv | +| `"provider"` | (int) | kv | + +In structured form, this event would look like: + +```ipldsch +type ClaimEvent struct { + id Int + client Int + provider Int +} +``` + +#### Claim updated + +The `claim-updated` event is emitted when the term of an existing allocation is extended by the client. + +| Key | Value | Flags | +| ------------ | -------------------------- | ----- | +| `"$type"` | `"claim-updated"` (string) | kv | +| `"id"` | (int) | kv | +| `"client"` | (int) | kv | +| `"provider"` | (int) | kv | + +In structured form, this event would look like: + +```ipldsch +type ClaimUpdatedEvent struct { + id Int + client Int + provider Int +} +``` + +#### Claim removed + +The `claim-removed` event is emitted when an expired claim is removed by the Verified Registry actor. + +| Key | Value | Flags | +| ------------ | -------------------------- | ----- | +| `"$type"` | `"claim-removed"` (string) | kv | +| `"id"` | (int) | kv | +| `"client"` | (int) | kv | +| `"provider"` | (int) | kv | + +In structured form, this event would look like: + +```ipldsch +type ClaimRemovedEvent struct { + id Int + client Int + provider Int +} +``` + +### Market actor events + +The Market actor emits the following deal lifecycle events: + +#### Deal published + +The `deal-published` event is emitted for each new deal that is successfully published by a storage provider. + +| Key | Value | Flags | +| ----------- | --------------------------------- | ----- | +| `"$type"` | `"deal-published"` (string) | kv | +| `"id"` | (int) | kv | +| `"client"` | (int) | kv | +| `"provider"`| (int) | kv | + +In structured form, this event would look like: + +```ipldsch +type DealPublishedEvent struct { + id Int + client Int + provider Int +} +``` + +#### Deal activated + +The `deal-activated` event is emitted for each deal that is successfully activated. + +| Key | Value | Flags | +| ------------ | --------------------------------- | ----- | +| `"$type"` | `"deal-activated"` (string) | kv | +| `"id"` | (int) | kv | +| `"client"` | (int) | kv | +| `"provider"` | (int) | kv | + +In structured form, this event would look like: + +```ipldsch +type DealActivatedEvent struct { + id Int + client Int + provider Int +} +``` + +#### Deal terminated + +The `deal-terminated` event is emitted by the market actor cron job when it processes deals that were marked as terminated by the `OnMinerSectorsTerminate` method. + +[FIP-0074](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0074.md) ensures that terminated deals are processed immediately in the `OnMinerSectorsTerminate` method rather than being submitted for deferred processing to the market actor cron job. As of network version 22 this event will be emitted to indicate that a deal has been terminated for deals made after network version 22. + +| Key | Value | Flags | +| ----------- | --------------------------------- | ----- | +| `"$type"` | `"deal-terminated"` (string) | kv | +| `"id"` | (int) | kv | +| `"client"` | (int) | kv | +| `"provider"`| (int) | kv | + +In structured form, this event would look like: + +```ipldsch +type DealTerminatedEvent struct { + id Int + client Int + provider Int +} +``` + +#### Deal completed + +The `deal-completed` event is emitted when a deal is marked as successfully complete by the Market actor cron job. The cron job will deem a deal to be successfully completed if it is past it’s end epoch without being slashed. + +[FIP-0074](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0074.md) ensures that the processing of completed deals is done as part of a method called by the storage provider thus making this event available to clients and also to ensure that storage providers pay the gas costs of processing deal completion and event emission. This applies to new deals made after network version 22. For deals made before network version 22, this event will be emitted by the market actor cron job. + +| Key | Value | Flags | +| ----------- | --------------------------------- | ----- | +| `"$type"` | `"deal-completed"` (string) | kv | +| `"id"` | (int) | kv | +| `"client"` | (int) | kv | +| `"provider"`| (int) | kv | + +In structured form, this event would look like: + +```ipldsch +type DealCompletedEvent struct { + id Int + client Int + provider Int +} +``` + +### Miner actor events + +The Miner actor emits the following sector lifecycle events: + +#### Sector precommitted + +The `sector-precommitted` event is emitted for each new sector that is successfully pre-committed by a storage provider. + +| Key | Value | Flags | +| ---------- | -------------------------------- | ----- | +| `"$type"` | `"sector-precommitted"` (string) | kv | +| `"sector"` | (int) | kv | + +In structured form, this event would look like: + +```ipldsch +type SectorPrecommittedEvent struct { + sector Int +} +``` + +#### Sector activated + +The `sector-activated` event is emitted for each pre-committed sector that is successfully activated by a storage provider. For now, sector activation corresponds 1:1 with prove-committing a sector but this can change in the future. + +| Key | Value | Flags | +| ---------------- | ------------------------------------------------------------- | ----- | +| `"$type"` | `"sector-activated"` (string) | kv | +| `"sector"` | (int) | kv | +| `"unsealed-cid"` | (nullable CID) (null means sector has no data) | kv | +| `"piece-cid"` | (CID) | kv | +| `"piece-size"` | (int) | k | + +_Note that both `"piece-cid"` and `"piece-size"` entries will be included for each piece in the sector, so the keys are repeated._ + +In structured form, this event would look like: + +```ipldsch +type PieceDescription struct { + cid &Any + size Int +} + +type SectorActivatedEvent struct { + sector Int + unsealedCid nullable &Any + pieces [PieceDescription] +} +``` + +#### Sector updated + +The `sector-updated` event is emitted for each CC sector that is updated to contained actual sealed data. + +| Key | Value | Flags | +| --------------- | ------------------------------------------------------------- | ----- | +| `"$type"` | `"sector-updated"` (string) | kv | +| `"sector"` | (int) | kv | +| `"unsealed-cid"`| (nullable CID) (null means sector has no data) | kv | +| `"piece-cid"` | (CID) | kv | +| `"piece-size"` | (int) | k | + +_Note that both `"piece-cid"` and `"piece-size"` entries will be included for each piece in the sector, so the keys are repeated._ + +In structured form, this event would look like: + +```ipldsch +type PieceDescription struct { + cid &Any + size Int +} + +type SectorUpdatedEvent struct { + sector Int + unsealedCid nullable &Any + pieces [PieceDescription] +} +``` + +#### Sector terminated + +The `sector-terminated` event is emitted for each sector that is marked as terminated by a storage provider. + +| Key | Value | Flags | +| ----------- | ------------------------------ | ----- | +| `"$type"` | `"sector-terminated"` (string) | kv | +| `"sector"` | (int) | kv | + +In structured form, this event would look like: + +```ipldsch +type SectorTerminatedEvent struct { + sector Int +} +``` diff --git a/documentation/en/cli-curio.md b/documentation/en/cli-curio.md index 08ed8ab908e..296a45c437d 100644 --- a/documentation/en/cli-curio.md +++ b/documentation/en/cli-curio.md @@ -17,15 +17,13 @@ COMMANDS: test Utility functions for testing web Start Curio web interface guided-setup Run the guided setup for migrating from lotus-miner to Curio - from-miner Express a database config (for curio) from an existing miner. seal Manage the sealing pipeline + auth Manage RPC permissions + log Manage logging + wait-api Wait for lotus api to come online + fetch-params Fetch proving parameters version Print version help, h Shows a list of commands or help for one command - DEVELOPER: - auth Manage RPC permissions - log Manage logging - wait-api Wait for lotus api to come online - fetch-params Fetch proving parameters GLOBAL OPTIONS: --color use color in display output (default: depends on output being a TTY) @@ -106,7 +104,6 @@ COMMANDS: interpret, view, stacked, stack Interpret stacked config layers by this version of curio, with system-generated comments. remove, rm, del, delete Remove a named config layer. edit edit a config layer - from-miner Express a database config (for curio) from an existing miner. new-cluster Create new configuration for a new cluster help, h Shows a list of commands or help for one command @@ -206,24 +203,6 @@ OPTIONS: --help, -h show help ``` -### curio config from-miner -``` -NAME: - curio from-miner - Express a database config (for curio) from an existing miner. - -USAGE: - curio from-miner [command options] [arguments...] - -DESCRIPTION: - Express a database config (for curio) from an existing miner. - -OPTIONS: - --miner-repo value, --storagerepo value Specify miner repo path. flag(storagerepo) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH] - --to-layer value, -t value The layer name for this data push. 'base' is recommended for single-miner setup. - --overwrite, -o Use this with --to-layer to replace an existing layer (default: false) - --help, -h show help -``` - ### curio config new-cluster ``` NAME: @@ -334,24 +313,6 @@ OPTIONS: --help, -h show help ``` -## curio from-miner -``` -NAME: - curio from-miner - Express a database config (for curio) from an existing miner. - -USAGE: - curio from-miner [command options] [arguments...] - -DESCRIPTION: - Express a database config (for curio) from an existing miner. - -OPTIONS: - --miner-repo value, --storagerepo value Specify miner repo path. flag(storagerepo) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH] - --to-layer value, -t value The layer name for this data push. 'base' is recommended for single-miner setup. - --overwrite, -o Use this with --to-layer to replace an existing layer (default: false) - --help, -h show help -``` - ## curio seal ``` NAME: @@ -386,18 +347,6 @@ OPTIONS: --help, -h show help ``` -## curio version -``` -NAME: - curio version - Print version - -USAGE: - curio version [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - ## curio auth ``` NAME: @@ -525,9 +474,6 @@ NAME: USAGE: curio wait-api [command options] [arguments...] -CATEGORY: - DEVELOPER - OPTIONS: --timeout value duration to wait till fail (default: 30s) --help, -h show help @@ -541,8 +487,17 @@ NAME: USAGE: curio fetch-params [command options] [sectorSize] -CATEGORY: - DEVELOPER +OPTIONS: + --help, -h show help +``` + +## curio version +``` +NAME: + curio version - Print version + +USAGE: + curio version [command options] [arguments...] OPTIONS: --help, -h show help diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index 0f670427546..ed068624e9a 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -10,21 +10,20 @@ VERSION: 1.27.0-dev COMMANDS: - init Initialize a lotus miner repo - run Start a lotus miner process - stop Stop a running lotus miner - config Manage node config - backup Create node metadata backup - version Print version - help, h Shows a list of commands or help for one command + init Initialize a lotus miner repo + run Start a lotus miner process + stop Stop a running lotus miner + config Manage node config + backup Create node metadata backup + auth Manage RPC permissions + log Manage logging + wait-api Wait for lotus api to come online + fetch-params Fetch proving parameters + version Print version + help, h Shows a list of commands or help for one command CHAIN: actor manipulate the miner actor info Print miner info - DEVELOPER: - auth Manage RPC permissions - log Manage logging - wait-api Wait for lotus api to come online - fetch-params Fetch proving parameters STORAGE: sectors interact with sector store proving View proving information @@ -194,6 +193,150 @@ OPTIONS: --help, -h show help ``` +## lotus-miner auth +``` +NAME: + lotus-miner auth - Manage RPC permissions + +USAGE: + lotus-miner auth command [command options] [arguments...] + +COMMANDS: + create-token Create token + api-info Get token with API info required to connect to this node + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help +``` + +### lotus-miner auth create-token +``` +NAME: + lotus-miner auth create-token - Create token + +USAGE: + lotus-miner auth create-token [command options] [arguments...] + +OPTIONS: + --perm value permission to assign to the token, one of: read, write, sign, admin + --help, -h show help +``` + +### lotus-miner auth api-info +``` +NAME: + lotus-miner auth api-info - Get token with API info required to connect to this node + +USAGE: + lotus-miner auth api-info [command options] [arguments...] + +OPTIONS: + --perm value permission to assign to the token, one of: read, write, sign, admin + --help, -h show help +``` + +## lotus-miner log +``` +NAME: + lotus-miner log - Manage logging + +USAGE: + lotus-miner log command [command options] [arguments...] + +COMMANDS: + list List log systems + set-level Set log level + alerts Get alert states + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help +``` + +### lotus-miner log list +``` +NAME: + lotus-miner log list - List log systems + +USAGE: + lotus-miner log list [command options] [arguments...] + +OPTIONS: + --help, -h show help +``` + +### lotus-miner log set-level +``` +NAME: + lotus-miner log set-level - Set log level + +USAGE: + lotus-miner log set-level [command options] [level] + +DESCRIPTION: + Set the log level for logging systems: + + The system flag can be specified multiple times. + + eg) log set-level --system chain --system chainxchg debug + + Available Levels: + debug + info + warn + error + + Environment Variables: + GOLOG_LOG_LEVEL - Default log level for all log systems + GOLOG_LOG_FMT - Change output log format (json, nocolor) + GOLOG_FILE - Write logs to file + GOLOG_OUTPUT - Specify whether to output to file, stderr, stdout or a combination, i.e. file+stderr + + +OPTIONS: + --system value [ --system value ] limit to log system + --help, -h show help +``` + +### lotus-miner log alerts +``` +NAME: + lotus-miner log alerts - Get alert states + +USAGE: + lotus-miner log alerts [command options] [arguments...] + +OPTIONS: + --all get all (active and inactive) alerts (default: false) + --help, -h show help +``` + +## lotus-miner wait-api +``` +NAME: + lotus-miner wait-api - Wait for lotus api to come online + +USAGE: + lotus-miner wait-api [command options] [arguments...] + +OPTIONS: + --timeout value duration to wait till fail (default: 30s) + --help, -h show help +``` + +## lotus-miner fetch-params +``` +NAME: + lotus-miner fetch-params - Fetch proving parameters + +USAGE: + lotus-miner fetch-params [command options] [sectorSize] + +OPTIONS: + --help, -h show help +``` + ## lotus-miner version ``` NAME: @@ -444,156 +587,6 @@ OPTIONS: --help, -h show help ``` -## lotus-miner auth -``` -NAME: - lotus-miner auth - Manage RPC permissions - -USAGE: - lotus-miner auth command [command options] [arguments...] - -COMMANDS: - create-token Create token - api-info Get token with API info required to connect to this node - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### lotus-miner auth create-token -``` -NAME: - lotus-miner auth create-token - Create token - -USAGE: - lotus-miner auth create-token [command options] [arguments...] - -OPTIONS: - --perm value permission to assign to the token, one of: read, write, sign, admin - --help, -h show help -``` - -### lotus-miner auth api-info -``` -NAME: - lotus-miner auth api-info - Get token with API info required to connect to this node - -USAGE: - lotus-miner auth api-info [command options] [arguments...] - -OPTIONS: - --perm value permission to assign to the token, one of: read, write, sign, admin - --help, -h show help -``` - -## lotus-miner log -``` -NAME: - lotus-miner log - Manage logging - -USAGE: - lotus-miner log command [command options] [arguments...] - -COMMANDS: - list List log systems - set-level Set log level - alerts Get alert states - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### lotus-miner log list -``` -NAME: - lotus-miner log list - List log systems - -USAGE: - lotus-miner log list [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -### lotus-miner log set-level -``` -NAME: - lotus-miner log set-level - Set log level - -USAGE: - lotus-miner log set-level [command options] [level] - -DESCRIPTION: - Set the log level for logging systems: - - The system flag can be specified multiple times. - - eg) log set-level --system chain --system chainxchg debug - - Available Levels: - debug - info - warn - error - - Environment Variables: - GOLOG_LOG_LEVEL - Default log level for all log systems - GOLOG_LOG_FMT - Change output log format (json, nocolor) - GOLOG_FILE - Write logs to file - GOLOG_OUTPUT - Specify whether to output to file, stderr, stdout or a combination, i.e. file+stderr - - -OPTIONS: - --system value [ --system value ] limit to log system - --help, -h show help -``` - -### lotus-miner log alerts -``` -NAME: - lotus-miner log alerts - Get alert states - -USAGE: - lotus-miner log alerts [command options] [arguments...] - -OPTIONS: - --all get all (active and inactive) alerts (default: false) - --help, -h show help -``` - -## lotus-miner wait-api -``` -NAME: - lotus-miner wait-api - Wait for lotus api to come online - -USAGE: - lotus-miner wait-api [command options] [arguments...] - -CATEGORY: - DEVELOPER - -OPTIONS: - --timeout value duration to wait till fail (default: 30s) - --help, -h show help -``` - -## lotus-miner fetch-params -``` -NAME: - lotus-miner fetch-params - Fetch proving parameters - -USAGE: - lotus-miner fetch-params [command options] [sectorSize] - -CATEGORY: - DEVELOPER - -OPTIONS: - --help, -h show help -``` - ## lotus-miner sectors ``` NAME: @@ -1039,7 +1032,6 @@ OPTIONS: --deadline value the deadline to compact the partitions in (default: 0) --partitions value [ --partitions value ] list of partitions to compact sectors in --really-do-it Actually send transaction performing the action (default: false) - --actor value Specify the address of the miner to run this command --help, -h show help ``` diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index ad04b68ecb4..36f1e1059a3 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -1192,7 +1192,7 @@ COMMANDS: list-claims List claims available in verified registry actor or made by provider if specified remove-expired-allocations remove expired allocations (if no allocations are specified all eligible allocations are removed) remove-expired-claims remove expired claims (if no claims are specified all eligible claims are removed) - extend-claim extend claim expiration (TermMax) + extend-claim extends claim expiration (TermMax) help, h Shows a list of commands or help for one command OPTIONS: @@ -1329,10 +1329,13 @@ OPTIONS: ### lotus filplus extend-claim ``` NAME: - lotus filplus extend-claim - extend claim expiration (TermMax) + lotus filplus extend-claim - extends claim expiration (TermMax) USAGE: - lotus filplus extend-claim [command options] ... or ... + Extends claim expiration (TermMax). + If the client is original client then claim can be extended to maximum 5 years and no Datacap is required. + If the client id different then claim can be extended up to maximum 5 years from now and Datacap is required. + OPTIONS: --term-max value, --tmax value The maximum period for which a provider can earn quality-adjusted power for the piece (epochs). Default is 5 years. (default: 5256000) @@ -1341,6 +1344,7 @@ OPTIONS: --miner value, -m value, --provider value, -p value [ --miner value, -m value, --provider value, -p value ] storage provider address[es] --assume-yes, -y, --yes automatic yes to prompts; assume 'yes' as answer to all prompts and run non-interactively (default: false) --confidence value number of block confirmations to wait for (default: 5) + --batch-size value number of extend requests per batch. If set incorrectly, this will lead to out of gas error (default: 500) --help, -h show help ``` diff --git a/documentation/en/cli-sptool.md b/documentation/en/cli-sptool.md new file mode 100644 index 00000000000..7b888a884eb --- /dev/null +++ b/documentation/en/cli-sptool.md @@ -0,0 +1,441 @@ +# sptool +``` +NAME: + sptool - Manage Filecoin Miner Actor + +USAGE: + sptool [global options] command [command options] [arguments...] + +VERSION: + 1.27.0-dev + +COMMANDS: + actor Manage Filecoin Miner Actor Metadata + info Print miner actor info + sectors interact with sector store + proving View proving information + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS: + --log-level value (default: "info") + --actor value miner actor to manage [$SP_ADDRESS] + --help, -h show help + --version, -v print the version +``` + +## sptool actor +``` +NAME: + sptool actor - Manage Filecoin Miner Actor Metadata + +USAGE: + sptool actor command [command options] [arguments...] + +COMMANDS: + set-addresses, set-addrs set addresses that your miner can be publicly dialed on + withdraw withdraw available balance to beneficiary + repay-debt pay down a miner's debt + set-peer-id set the peer id of your miner + set-owner Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner) + control Manage control addresses + propose-change-worker Propose a worker address change + confirm-change-worker Confirm a worker address change + compact-allocated compact allocated sectors bitfield + propose-change-beneficiary Propose a beneficiary address change + confirm-change-beneficiary Confirm a beneficiary address change + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help +``` + +### sptool actor set-addresses +``` +NAME: + sptool actor set-addresses - set addresses that your miner can be publicly dialed on + +USAGE: + sptool actor set-addresses [command options] + +OPTIONS: + --from value optionally specify the account to send the message from + --gas-limit value set gas limit (default: 0) + --unset unset address (default: false) + --help, -h show help +``` + +### sptool actor withdraw +``` +NAME: + sptool actor withdraw - withdraw available balance to beneficiary + +USAGE: + sptool actor withdraw [command options] [amount (FIL)] + +OPTIONS: + --confidence value number of block confirmations to wait for (default: 5) + --beneficiary send withdraw message from the beneficiary address (default: false) + --help, -h show help +``` + +### sptool actor repay-debt +``` +NAME: + sptool actor repay-debt - pay down a miner's debt + +USAGE: + sptool actor repay-debt [command options] [amount (FIL)] + +OPTIONS: + --from value optionally specify the account to send funds from + --help, -h show help +``` + +### sptool actor set-peer-id +``` +NAME: + sptool actor set-peer-id - set the peer id of your miner + +USAGE: + sptool actor set-peer-id [command options] + +OPTIONS: + --gas-limit value set gas limit (default: 0) + --help, -h show help +``` + +### sptool actor set-owner +``` +NAME: + sptool actor set-owner - Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner) + +USAGE: + sptool actor set-owner [command options] [newOwnerAddress senderAddress] + +OPTIONS: + --really-do-it Actually send transaction performing the action (default: false) + --help, -h show help +``` + +### sptool actor control +``` +NAME: + sptool actor control - Manage control addresses + +USAGE: + sptool actor control command [command options] [arguments...] + +COMMANDS: + list Get currently set control addresses. Note: This excludes most roles as they are not known to the immediate chain state. + set Set control address(-es) + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help +``` + +#### sptool actor control list +``` +NAME: + sptool actor control list - Get currently set control addresses. Note: This excludes most roles as they are not known to the immediate chain state. + +USAGE: + sptool actor control list [command options] [arguments...] + +OPTIONS: + --verbose (default: false) + --help, -h show help +``` + +#### sptool actor control set +``` +NAME: + sptool actor control set - Set control address(-es) + +USAGE: + sptool actor control set [command options] [...address] + +OPTIONS: + --really-do-it Actually send transaction performing the action (default: false) + --help, -h show help +``` + +### sptool actor propose-change-worker +``` +NAME: + sptool actor propose-change-worker - Propose a worker address change + +USAGE: + sptool actor propose-change-worker [command options] [address] + +OPTIONS: + --really-do-it Actually send transaction performing the action (default: false) + --help, -h show help +``` + +### sptool actor confirm-change-worker +``` +NAME: + sptool actor confirm-change-worker - Confirm a worker address change + +USAGE: + sptool actor confirm-change-worker [command options] [address] + +OPTIONS: + --really-do-it Actually send transaction performing the action (default: false) + --help, -h show help +``` + +### sptool actor compact-allocated +``` +NAME: + sptool actor compact-allocated - compact allocated sectors bitfield + +USAGE: + sptool actor compact-allocated [command options] [arguments...] + +OPTIONS: + --mask-last-offset value Mask sector IDs from 0 to 'highest_allocated - offset' (default: 0) + --mask-upto-n value Mask sector IDs from 0 to 'n' (default: 0) + --really-do-it Actually send transaction performing the action (default: false) + --help, -h show help +``` + +### sptool actor propose-change-beneficiary +``` +NAME: + sptool actor propose-change-beneficiary - Propose a beneficiary address change + +USAGE: + sptool actor propose-change-beneficiary [command options] [beneficiaryAddress quota expiration] + +OPTIONS: + --really-do-it Actually send transaction performing the action (default: false) + --overwrite-pending-change Overwrite the current beneficiary change proposal (default: false) + --actor value specify the address of miner actor + --help, -h show help +``` + +### sptool actor confirm-change-beneficiary +``` +NAME: + sptool actor confirm-change-beneficiary - Confirm a beneficiary address change + +USAGE: + sptool actor confirm-change-beneficiary [command options] [minerID] + +OPTIONS: + --really-do-it Actually send transaction performing the action (default: false) + --existing-beneficiary send confirmation from the existing beneficiary address (default: false) + --new-beneficiary send confirmation from the new beneficiary address (default: false) + --help, -h show help +``` + +## sptool info +``` +NAME: + sptool info - Print miner actor info + +USAGE: + sptool info [command options] [arguments...] + +OPTIONS: + --help, -h show help +``` + +## sptool sectors +``` +NAME: + sptool sectors - interact with sector store + +USAGE: + sptool sectors command [command options] [arguments...] + +COMMANDS: + status Get the seal status of a sector by its number + list List sectors + precommits Print on-chain precommit info + check-expire Inspect expiring sectors + expired Get or cleanup expired sectors + extend Extend expiring sectors while not exceeding each sector's max life + compact-partitions removes dead sectors from partitions and reduces the number of partitions used if possible + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help +``` + +### sptool sectors status +``` +NAME: + sptool sectors status - Get the seal status of a sector by its number + +USAGE: + sptool sectors status [command options] + +OPTIONS: + --log, -l display event log (default: false) + --on-chain-info, -c show sector on chain info (default: false) + --partition-info, -p show partition related info (default: false) + --proof print snark proof bytes as hex (default: false) + --help, -h show help +``` + +### sptool sectors list +``` +NAME: + sptool sectors list - List sectors + +USAGE: + sptool sectors list [command options] [arguments...] + +OPTIONS: + --help, -h show help +``` + +### sptool sectors precommits +``` +NAME: + sptool sectors precommits - Print on-chain precommit info + +USAGE: + sptool sectors precommits [command options] [arguments...] + +OPTIONS: + --help, -h show help +``` + +### sptool sectors check-expire +``` +NAME: + sptool sectors check-expire - Inspect expiring sectors + +USAGE: + sptool sectors check-expire [command options] [arguments...] + +OPTIONS: + --cutoff value skip sectors whose current expiration is more than epochs from now, defaults to 60 days (default: 172800) + --help, -h show help +``` + +### sptool sectors expired +``` +NAME: + sptool sectors expired - Get or cleanup expired sectors + +USAGE: + sptool sectors expired [command options] [arguments...] + +OPTIONS: + --expired-epoch value epoch at which to check sector expirations (default: WinningPoSt lookback epoch) + --help, -h show help +``` + +### sptool sectors extend +``` +NAME: + sptool sectors extend - Extend expiring sectors while not exceeding each sector's max life + +USAGE: + sptool sectors extend [command options] + +OPTIONS: + --from value only consider sectors whose current expiration epoch is in the range of [from, to], defaults to: now + 120 (1 hour) (default: 0) + --to value only consider sectors whose current expiration epoch is in the range of [from, to], defaults to: now + 92160 (32 days) (default: 0) + --sector-file value provide a file containing one sector number in each line, ignoring above selecting criteria + --exclude value optionally provide a file containing excluding sectors + --extension value try to extend selected sectors by this number of epochs, defaults to 540 days (default: 1555200) + --new-expiration value try to extend selected sectors to this epoch, ignoring extension (default: 0) + --only-cc only extend CC sectors (useful for making sector ready for snap upgrade) (default: false) + --drop-claims drop claims for sectors that can be extended, but only by dropping some of their verified power claims (default: false) + --tolerance value don't try to extend sectors by fewer than this number of epochs, defaults to 7 days (default: 20160) + --max-fee value use up to this amount of FIL for one message. pass this flag to avoid message congestion. (default: "0") + --max-sectors value the maximum number of sectors contained in each message (default: 0) + --really-do-it pass this flag to really extend sectors, otherwise will only print out json representation of parameters (default: false) + --help, -h show help +``` + +### sptool sectors compact-partitions +``` +NAME: + sptool sectors compact-partitions - removes dead sectors from partitions and reduces the number of partitions used if possible + +USAGE: + sptool sectors compact-partitions [command options] [arguments...] + +OPTIONS: + --deadline value the deadline to compact the partitions in (default: 0) + --partitions value [ --partitions value ] list of partitions to compact sectors in + --really-do-it Actually send transaction performing the action (default: false) + --help, -h show help +``` + +## sptool proving +``` +NAME: + sptool proving - View proving information + +USAGE: + sptool proving command [command options] [arguments...] + +COMMANDS: + info View current state information + deadlines View the current proving period deadlines information + deadline View the current proving period deadline information by its index + faults View the currently known proving faulty sectors information + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help +``` + +### sptool proving info +``` +NAME: + sptool proving info - View current state information + +USAGE: + sptool proving info [command options] [arguments...] + +OPTIONS: + --help, -h show help +``` + +### sptool proving deadlines +``` +NAME: + sptool proving deadlines - View the current proving period deadlines information + +USAGE: + sptool proving deadlines [command options] [arguments...] + +OPTIONS: + --all, -a Count all sectors (only live sectors are counted by default) (default: false) + --help, -h show help +``` + +### sptool proving deadline +``` +NAME: + sptool proving deadline - View the current proving period deadline information by its index + +USAGE: + sptool proving deadline [command options] + +OPTIONS: + --sector-nums, -n Print sector/fault numbers belonging to this deadline (default: false) + --bitfield, -b Print partition bitfield stats (default: false) + --help, -h show help +``` + +### sptool proving faults +``` +NAME: + sptool proving faults - View the currently known proving faulty sectors information + +USAGE: + sptool proving faults [command options] [arguments...] + +OPTIONS: + --help, -h show help +``` diff --git a/documentation/en/data-onboarding-visibility.md b/documentation/en/data-onboarding-visibility.md new file mode 100644 index 00000000000..3225753dd1d --- /dev/null +++ b/documentation/en/data-onboarding-visibility.md @@ -0,0 +1,71 @@ +# Data Onboarding Visibility + +* [Introduction and background](#introduction-and-background) +* [DDO information flow](#ddo-information-flow) +* [Relevant message contents](#relevant-message-contents) +* [Relevant builtin actor events](#relevant-builtin-actor-events) + +## Introduction and background + +**Direct Data Onboarding** (DDO) as defined in **[FIP-0076](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0076.md)** provides an optional data onboarding path that is both gas-efficient and paves a path toward eventual smart contract mediated onboarding mechanisms. The existing market actor and market actor mediated onboarding pathway remains largely unchanged but is now optional; it is anticipated that the gas savings alone will see a significant shift away from use of the market actor. + +Historically, a large amount of tooling was built around Filecoin that makes use of the market actor (f05) to quantify various data-related metrics. A shift in behaviour toward data onboarding that bypasses the market actor requires adaption in order to have continuity with the some of the data-related metrics being collected. This will continue to be true as Filecoin evolves to enable onboarding mechanisms mediated by smart contracts. + +**Deal-lifecycle actor events** as defined in **[FIP-0083](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0083.md)** and detailed in [Actor Events and Lotus APIs](./actor-events-api.md) introduced the first batch of fire-and-forget externally observable events for builtin actors. The FVM already had this capability and these new first events for builtin actors were added to increase the visibility of information around data onboarding, particularly in light of the introduction of DDO which will require metrics gatherers to rely on mechanisms other than the market actor to collect data. + +Actor events are an optional method for gaining insight into data onboarding activities and data / storage lifecycles. Messages may also be used as a source of truth for data onboarding metrics, but are more complex to consume and may not be suitable for some workflows. For verified (Filecoin Plus) data, the verified registry actor (f06) should be used as the primary source of truth for data lifecycles; however FIP-0076 introduces the possibility of "sparkling data" which is not verified and not mediated through the builtin market actor or possibly any other actor. This data currently still requires piece commitments to be detailed as part of a miner's sector commitments, and so may be observed through messages and actor events that carry sector commitment piece manifests. + +## DDO information flow + +The most basic direct onboarding workflow as viewed by the chain is simply: + +- At PreCommit, an SP must specify a sector’s data commitment (unsealed CID, CommP) *(but does not need to specify the structure of that data nor any deals or verified allocations)*. +- At ProveCommit or ReplicaUpdate, an SP specifies the pieces of data (CommP and their size) comprising a sector in order to satisfy the data commitment. + +This basic form does not touch either the market actor or the verified registry actor. Importantly, it does not result in piece information being stored on chain, even though the ProveCommit message contains this information for the purpose of verifying the sector commitment. This is the most significant change from onboarding mechanics prior to network version 22. + +There are two possible additions to this flow: + +- Prior to PreCommit, an SP publishes a storage deal to the builtin market actor, in which case deal information exists on chain as it does with non-DDO deals today; then + - at ProveCommit, or ReplicaUpdate, the SP can notify an actor of the commitment. Currently this can only be the builtin market actor (in the future this may be a list of arbitrary user defined actors), in which case it will be used to activate a deal previously proposed on chain. +- At ProveCommit, or ReplicaUpdate, the SP can claim DataCap that was previously allocated by the client for a particular piece. + +💡 **The builtin market actor should not be used as single a source of truth regarding data onboarding activities.** The builtin market actor is only a source of truth for data onboarding mediated by the builtin market actor. + +💡 **The builtin market actor should not be used as a source of truth regarding verified claims and metrics related to FIL+ usage (size, clients, profiders).** The `VerifiedClaim` property of `DealState` has been removed from the builtin market actor. Instead, the verified registry should be used as the only source of truth regarding both allocations and claims. + +💡 **Sector data commitments and their constituent pieces are only stored on chain in the verified registry claims in the case of verified data (pieces) onboarded in any mechanism (DDO and/or builtin market actor).** Piece information for data onboarded that is not verified ("sparkling data") and not mediated through the builtin market actor will only appear in messages and actor events. Messages and actor events may be used as a source of truth for data sector commitments. + +## Relevant message contents + +Even though chain state is less informative for data onboarding not mediated through the builtin market actor, messages used for chain execution will continue to provide all relevant information and may be used to determine the number and size of pieces within a sector, as well as any DataCap claimed for specific pieces and therefore the client allocating the DataCap. + +The most important messages for this purpose are as follows: + +At ProveCommit, a [`ProveCommitSectors3`](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0076.md#provecommitsectors3) message will contain a `SectorActivations` property which is a list of `SectorActivationManifest`, one for each sector being activated. Within this per-sector manifest is a list of `Pieces` which details all of the pieces contributing to the sector commitment, each one is a `PieceActivationManifest` of the form: + +```ipldsch +type PieceActivationManifest struct { + CID &Any # Piece data commitment (CommP) + Size Int # Padded piece size + VerifiedAllocationKey nullable VerifiedAllocationKey # Identifies a verified allocation to be claimed + Notify DataActivationNotification # Notifications to be sent to other actors after activation +} +``` + +This manifest contains the piece commitment as well as an optional `VerifiedAllocationKey` which lists a client and an allocation to claim from the verified registry actor. + +At ReplicaUpdate, the [`ProveReplicaUpdates3`](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0076.md#provereplicaupdates3) message will contain a `SectorUpdates` property which is a list of `SectorUpdateManifest`, one for each sector being updated. This manifest mirrors the `SectorActivationManifest` for ProveCommit, containing a list of `Pieces` which may be similarly inspected for the relevant data. + +- **Pieces**: All piece information for each sector's data commitment may be collected from the piece manifests. +- **Verified data**: Claims may be cross-referenced with the verified registry state to access the details of the allocation, including piece information for the claim. The `StateGetClaim` Lotus API call provides this information. + +💡 Making use of the message contents directly is not a trivial activity. Messages need to be filtered by actor and method number, exit code needs to be checked from the receipt, and the parameters would need to be decoded according to the relevant schema for that message. Actor events exist to make this somewhat easier although may not be suitable for some workflows. + +## Relevant builtin actor events + +Depending on usage and data consumption workflow, consuming builtin actor events using the APIs detailed in [Actor Events and Lotus APIs](./actor-events-api.md), may be simpler and more suitable. The following events are relevant to DDO and may be used to determine the number and size of pieces within a sector, as well as any DataCap claimed for specific pieces and therefore the client allocating the DataCap. + +The [`sector-activated`](./actor-events-api.md#sector-activated) and [`sector-updated`](./actor-events-api.md#sector-updated) events are emitted by the miner actor and contain the piece information for each sector. This is submitted to the miner actor by the storage provider in the form of a piece manifest and is summarised as a list of pieces in the events. Both piece CID (CommP) and piece size are available in the event data. + +The [`claim`](./actor-events-api.md#claim) event is emitted by the verified registry actor and contains the client and provider for each claim. This event contains the claim ID which can be used to cross-reference with the verified registry state to access the details of the allocation, including piece information for the claim. The `StateGetClaim` Lotus API call provides this information. diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index 420c192bd4f..85ce178ce94 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -275,68 +275,67 @@ # env var: LOTUS_FEVM_ETHTXHASHMAPPINGLIFETIMEDAYS #EthTxHashMappingLifetimeDays = 0 - [Fevm.Events] - # DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. - # The API is enabled when EnableEthRPC or Events.EnableActorEventsAPI is true, but can be disabled selectively with this flag. - # - # type: bool - # env var: LOTUS_FEVM_EVENTS_DISABLEREALTIMEFILTERAPI - #DisableRealTimeFilterAPI = false - - # DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events - # that occurred in the past. HistoricFilterAPI maintains a queryable index of events. - # The API is enabled when EnableEthRPC or Events.EnableActorEventsAPI is true, but can be disabled selectively with this flag. - # - # type: bool - # env var: LOTUS_FEVM_EVENTS_DISABLEHISTORICFILTERAPI - #DisableHistoricFilterAPI = false - - # FilterTTL specifies the time to live for actor event filters. Filters that haven't been accessed longer than - # this time become eligible for automatic deletion. - # - # type: Duration - # env var: LOTUS_FEVM_EVENTS_FILTERTTL - #FilterTTL = "24h0m0s" - - # MaxFilters specifies the maximum number of filters that may exist at any one time. - # - # type: int - # env var: LOTUS_FEVM_EVENTS_MAXFILTERS - #MaxFilters = 100 - - # MaxFilterResults specifies the maximum number of results that can be accumulated by an actor event filter. - # - # type: int - # env var: LOTUS_FEVM_EVENTS_MAXFILTERRESULTS - #MaxFilterResults = 10000 - # MaxFilterHeightRange specifies the maximum range of heights that can be used in a filter (to avoid querying - # the entire chain) - # - # type: uint64 - # env var: LOTUS_FEVM_EVENTS_MAXFILTERHEIGHTRANGE - #MaxFilterHeightRange = 2880 - - # DatabasePath is the full path to a sqlite database that will be used to index actor events to - # support the historic filter APIs. If the database does not exist it will be created. The directory containing - # the database must already exist and be writeable. If a relative path is provided here, sqlite treats it as - # relative to the CWD (current working directory). - # - # type: string - # env var: LOTUS_FEVM_EVENTS_DATABASEPATH - #DatabasePath = "" +[Events] + # DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. + # The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. + # + # type: bool + # env var: LOTUS_EVENTS_DISABLEREALTIMEFILTERAPI + #DisableRealTimeFilterAPI = false + # DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events + # that occurred in the past. HistoricFilterAPI maintains a queryable index of events. + # The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. + # + # type: bool + # env var: LOTUS_EVENTS_DISABLEHISTORICFILTERAPI + #DisableHistoricFilterAPI = false -[Events] # EnableActorEventsAPI enables the Actor events API that enables clients to consume events # emitted by (smart contracts + built-in Actors). # This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be - # disabled by setting their respective Disable* options in Fevm.Events. + # disabled by setting their respective Disable* options. # # type: bool # env var: LOTUS_EVENTS_ENABLEACTOREVENTSAPI #EnableActorEventsAPI = false + # FilterTTL specifies the time to live for actor event filters. Filters that haven't been accessed longer than + # this time become eligible for automatic deletion. + # + # type: Duration + # env var: LOTUS_EVENTS_FILTERTTL + #FilterTTL = "24h0m0s" + + # MaxFilters specifies the maximum number of filters that may exist at any one time. + # + # type: int + # env var: LOTUS_EVENTS_MAXFILTERS + #MaxFilters = 100 + + # MaxFilterResults specifies the maximum number of results that can be accumulated by an actor event filter. + # + # type: int + # env var: LOTUS_EVENTS_MAXFILTERRESULTS + #MaxFilterResults = 10000 + + # MaxFilterHeightRange specifies the maximum range of heights that can be used in a filter (to avoid querying + # the entire chain) + # + # type: uint64 + # env var: LOTUS_EVENTS_MAXFILTERHEIGHTRANGE + #MaxFilterHeightRange = 2880 + + # DatabasePath is the full path to a sqlite database that will be used to index actor events to + # support the historic filter APIs. If the database does not exist it will be created. The directory containing + # the database must already exist and be writeable. If a relative path is provided here, sqlite treats it as + # relative to the CWD (current working directory). + # + # type: string + # env var: LOTUS_EVENTS_DATABASEPATH + #DatabasePath = "" + [Index] # EXPERIMENTAL FEATURE. USE WITH CAUTION diff --git a/go.mod b/go.mod index 06b80fc36eb..82f1838bb00 100644 --- a/go.mod +++ b/go.mod @@ -45,14 +45,15 @@ require ( github.com/filecoin-project/go-fil-commcid v0.1.0 github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 github.com/filecoin-project/go-fil-markets v1.28.3 + github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 github.com/filecoin-project/go-jsonrpc v0.3.1 github.com/filecoin-project/go-padreader v0.0.1 github.com/filecoin-project/go-paramfetch v0.0.4 - github.com/filecoin-project/go-state-types v0.13.0-rc.3 + github.com/filecoin-project/go-state-types v0.13.1 github.com/filecoin-project/go-statemachine v1.0.3 github.com/filecoin-project/go-statestore v0.2.0 github.com/filecoin-project/go-storedcounter v0.1.0 - github.com/filecoin-project/kubo-api-client v0.0.1 + github.com/filecoin-project/kubo-api-client v0.27.0 github.com/filecoin-project/pubsub v1.0.0 github.com/filecoin-project/specs-actors v0.9.15 github.com/filecoin-project/specs-actors/v2 v2.3.6 @@ -68,20 +69,20 @@ require ( github.com/georgysavva/scany/v2 v2.0.0 github.com/go-openapi/spec v0.19.11 github.com/golang/mock v1.6.0 - github.com/google/uuid v1.3.0 - github.com/gorilla/mux v1.8.0 - github.com/gorilla/websocket v1.5.0 + github.com/google/uuid v1.5.0 + github.com/gorilla/mux v1.8.1 + github.com/gorilla/websocket v1.5.1 github.com/gregdhill/go-openrpc v0.0.0-20220114144539-ae6f44720487 github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru/arc/v2 v2.0.5 - github.com/hashicorp/golang-lru/v2 v2.0.5 + github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94 github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab github.com/ipfs/bbloom v0.0.4 - github.com/ipfs/boxo v0.10.1 - github.com/ipfs/go-block-format v0.1.2 + github.com/ipfs/boxo v0.18.0 + github.com/ipfs/go-block-format v0.2.0 github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-cidutil v0.1.0 github.com/ipfs/go-datastore v0.6.0 @@ -89,18 +90,18 @@ require ( github.com/ipfs/go-ds-leveldb v0.5.0 github.com/ipfs/go-ds-measure v0.2.0 github.com/ipfs/go-fs-lock v0.0.7 - github.com/ipfs/go-graphsync v0.14.6 + github.com/ipfs/go-graphsync v0.16.0 github.com/ipfs/go-ipfs-blocksutil v0.0.1 - github.com/ipfs/go-ipld-cbor v0.0.6 - github.com/ipfs/go-ipld-format v0.5.0 + github.com/ipfs/go-ipld-cbor v0.1.0 + github.com/ipfs/go-ipld-format v0.6.0 github.com/ipfs/go-log/v2 v2.5.1 github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipfs/go-metrics-prometheus v0.0.2 - github.com/ipfs/go-unixfsnode v1.7.1 + github.com/ipfs/go-unixfsnode v1.9.0 github.com/ipld/go-car v0.6.1 - github.com/ipld/go-car/v2 v2.10.1 + github.com/ipld/go-car/v2 v2.13.1 github.com/ipld/go-codec-dagpb v1.6.0 - github.com/ipld/go-ipld-prime v0.20.0 + github.com/ipld/go-ipld-prime v0.21.0 github.com/ipld/go-ipld-selector-text-lite v0.0.1 github.com/ipni/go-libipni v0.0.8 github.com/ipni/index-provider v0.12.0 @@ -109,15 +110,15 @@ require ( github.com/kelseyhightower/envconfig v1.4.0 github.com/koalacxr/quantile v0.0.1 github.com/libp2p/go-buffer-pool v0.1.0 - github.com/libp2p/go-libp2p v0.31.1 - github.com/libp2p/go-libp2p-kad-dht v0.24.0 - github.com/libp2p/go-libp2p-pubsub v0.9.3 + github.com/libp2p/go-libp2p v0.33.1 + github.com/libp2p/go-libp2p-kad-dht v0.25.2 + github.com/libp2p/go-libp2p-pubsub v0.10.0 github.com/libp2p/go-libp2p-record v0.2.0 - github.com/libp2p/go-libp2p-routing-helpers v0.7.0 + github.com/libp2p/go-libp2p-routing-helpers v0.7.3 github.com/libp2p/go-maddr-filter v0.1.0 github.com/libp2p/go-msgio v0.3.0 github.com/manifoldco/promptui v0.9.0 - github.com/mattn/go-isatty v0.0.19 + github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-sqlite3 v1.14.16 github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 github.com/minio/sha256-simd v1.0.1 @@ -132,12 +133,12 @@ require ( github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 github.com/pkg/errors v0.9.1 github.com/polydawn/refmt v0.89.0 - github.com/prometheus/client_golang v1.16.0 + github.com/prometheus/client_golang v1.18.0 github.com/puzpuzpuz/xsync/v2 v2.4.0 github.com/raulk/clock v1.1.0 github.com/raulk/go-watchdog v1.3.0 - github.com/samber/lo v1.38.1 - github.com/stretchr/testify v1.8.4 + github.com/samber/lo v1.39.0 + github.com/stretchr/testify v1.9.0 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed github.com/urfave/cli/v2 v2.25.5 @@ -149,43 +150,45 @@ require ( github.com/zondax/ledger-filecoin-go v0.11.1 github.com/zyedidia/generic v1.2.1 go.opencensus.io v0.24.0 - go.opentelemetry.io/otel v1.16.0 + go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/bridge/opencensus v0.39.0 go.opentelemetry.io/otel/exporters/jaeger v1.14.0 - go.opentelemetry.io/otel/sdk v1.16.0 + go.opentelemetry.io/otel/sdk v1.21.0 go.uber.org/atomic v1.11.0 - go.uber.org/fx v1.20.0 + go.uber.org/fx v1.20.1 go.uber.org/multierr v1.11.0 - go.uber.org/zap v1.25.0 - golang.org/x/crypto v0.18.0 - golang.org/x/net v0.14.0 - golang.org/x/sync v0.3.0 - golang.org/x/sys v0.16.0 - golang.org/x/term v0.16.0 + go.uber.org/zap v1.27.0 + golang.org/x/crypto v0.19.0 + golang.org/x/net v0.21.0 + golang.org/x/sync v0.6.0 + golang.org/x/sys v0.17.0 + golang.org/x/term v0.17.0 golang.org/x/text v0.14.0 - golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 - golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 + golang.org/x/time v0.5.0 + golang.org/x/tools v0.18.0 + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 gopkg.in/cheggaaa/pb.v1 v1.0.28 gotest.tools v2.2.0+incompatible ) require ( github.com/GeertJohan/go.incremental v1.0.0 // indirect + github.com/Jorropo/jsync v1.0.1 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/akavel/rsrc v0.8.0 // indirect - github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bep/debounce v1.2.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cilium/ebpf v0.9.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect + github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/daaku/go.zipexe v1.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -201,13 +204,12 @@ require ( github.com/filecoin-project/go-ds-versioning v0.1.2 // indirect github.com/filecoin-project/go-hamt-ipld v0.1.5 // indirect github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 // indirect - github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 // indirect - github.com/flynn/noise v1.0.0 // indirect + github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/gdamore/encoding v1.0.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.5 // indirect github.com/go-openapi/jsonpointer v0.19.3 // indirect @@ -216,22 +218,22 @@ require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.1.1 // indirect + github.com/golang/glog v1.1.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect + github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect github.com/hannahhoward/cbor-gen-for v0.0.0-20230214144701-5d17c9d5243c // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/golang-lru v0.6.0 // indirect - github.com/huin/goupnp v1.2.0 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/huin/goupnp v1.3.0 // indirect github.com/iancoleman/orderedmap v0.1.0 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect github.com/ipfs/go-blockservice v0.5.1 // indirect github.com/ipfs/go-ipfs-blockstore v1.3.0 // indirect - github.com/ipfs/go-ipfs-cmds v0.9.0 // indirect + github.com/ipfs/go-ipfs-cmds v0.10.0 // indirect github.com/ipfs/go-ipfs-delay v0.0.1 // indirect github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect @@ -257,14 +259,14 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/kilic/bls12-381 v0.1.0 // indirect - github.com/klauspost/compress v1.16.7 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/compress v1.17.6 // indirect + github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect - github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect github.com/libp2p/go-libp2p-gostream v0.6.0 // indirect - github.com/libp2p/go-libp2p-kbucket v0.6.1 // indirect + github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect github.com/libp2p/go-reuseport v0.4.0 // indirect @@ -275,8 +277,7 @@ require ( github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.55 // indirect + github.com/miekg/dns v1.1.58 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/mr-tron/base58 v1.2.0 // indirect @@ -284,23 +285,22 @@ require ( github.com/muesli/termenv v0.15.2 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multistream v0.4.1 // indirect + github.com/multiformats/go-multistream v0.5.0 // indirect github.com/nikkolasg/hexjson v0.1.0 // indirect github.com/nkovacs/streamquote v1.0.0 // indirect - github.com/onsi/ginkgo/v2 v2.11.0 // indirect - github.com/opencontainers/runtime-spec v1.1.0 // indirect + github.com/onsi/ginkgo/v2 v2.15.0 // indirect + github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/common v0.47.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/prometheus/statsd_exporter v0.22.7 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/qtls-go1-20 v0.3.3 // indirect - github.com/quic-go/quic-go v0.38.2 // indirect - github.com/quic-go/webtransport-go v0.5.3 // indirect + github.com/quic-go/quic-go v0.42.0 // indirect + github.com/quic-go/webtransport-go v0.6.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect @@ -311,6 +311,7 @@ require ( github.com/twmb/murmur3 v1.1.6 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.0.1 // indirect + github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect @@ -319,17 +320,18 @@ require ( github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-go v0.14.3 // indirect - go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/sdk/metric v0.39.0 // indirect - go.opentelemetry.io/otel/trace v1.16.0 // indirect - go.uber.org/dig v1.17.0 // indirect + go.opentelemetry.io/otel/trace v1.21.0 // indirect + go.uber.org/dig v1.17.1 // indirect + go.uber.org/mock v0.4.0 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect - golang.org/x/mod v0.12.0 // indirect - gonum.org/v1/gonum v0.13.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/grpc v1.55.0 // indirect - google.golang.org/protobuf v1.30.0 // indirect + golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect + golang.org/x/mod v0.15.0 // indirect + gonum.org/v1/gonum v0.14.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 // indirect + google.golang.org/grpc v1.60.1 // indirect + google.golang.org/protobuf v1.32.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect diff --git a/go.sum b/go.sum index 5f0b05eec47..edfb8164cf1 100644 --- a/go.sum +++ b/go.sum @@ -58,6 +58,8 @@ github.com/GeertJohan/go.rice v1.0.3 h1:k5viR+xGtIhF61125vCE1cmJ5957RQGXG6dmbaWZ github.com/GeertJohan/go.rice v1.0.3/go.mod h1:XVdrU4pW00M4ikZed5q56tPf1v2KwnIKeIdc9CBYNt4= github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee h1:8doiS7ib3zi6/K172oDhSKU0dJ/miJramo9NITOMyZQ= github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= +github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= +github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa h1:1PPxEyGdIGVkX/kqMvLJ95a1dGS1Sz7tpNEgehEYYt0= @@ -91,8 +93,9 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -119,6 +122,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY= github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= @@ -204,8 +209,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= -github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 h1:ZFUue+PNxmHlu7pYv+IYMtqlaO/0VwaGEqKepZf9JpA= +github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= @@ -348,8 +353,8 @@ github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psS github.com/filecoin-project/go-state-types v0.1.6/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.11.2-0.20230712101859-8f37624fa540/go.mod h1:SyNPwTsU7I22gL2r0OAPcImvLoTVfgRwdK/Y5rR1zz8= -github.com/filecoin-project/go-state-types v0.13.0-rc.3 h1:gs+5uKYo2hDufhMzVfTWRsTp00rEY6nK/gYtTxj79RY= -github.com/filecoin-project/go-state-types v0.13.0-rc.3/go.mod h1:cHpOPup9H1g2T29dKHAjC2sc7/Ef5ypjuW9A3I+e9yY= +github.com/filecoin-project/go-state-types v0.13.1 h1:4CivvlcHAIoAtFFVVlZtokynaMQu5XLXGoTKhQkfG1I= +github.com/filecoin-project/go-state-types v0.13.1/go.mod h1:cHpOPup9H1g2T29dKHAjC2sc7/Ef5ypjuW9A3I+e9yY= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statemachine v1.0.3 h1:N07o6alys+V1tNoSTi4WuuoeNC4erS/6jE74+NsgQuk= github.com/filecoin-project/go-statemachine v1.0.3/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= @@ -358,8 +363,8 @@ github.com/filecoin-project/go-statestore v0.2.0 h1:cRRO0aPLrxKQCZ2UOQbzFGn4WDNd github.com/filecoin-project/go-statestore v0.2.0/go.mod h1:8sjBYbS35HwPzct7iT4lIXjLlYyPor80aU7t7a/Kspo= github.com/filecoin-project/go-storedcounter v0.1.0 h1:Mui6wSUBC+cQGHbDUBcO7rfh5zQkWJM/CpAZa/uOuus= github.com/filecoin-project/go-storedcounter v0.1.0/go.mod h1:4ceukaXi4vFURIoxYMfKzaRF5Xv/Pinh2oTnoxpv+z8= -github.com/filecoin-project/kubo-api-client v0.0.1 h1:IR1b+sm+VYxSRvbgECVv9SbhIgygcXcSoN1Q7xsHDXg= -github.com/filecoin-project/kubo-api-client v0.0.1/go.mod h1:c36PPMIVOkKfHDwDG5U05gUlPRY9wNuh/BePwo0e+6Y= +github.com/filecoin-project/kubo-api-client v0.27.0 h1:rQNbReJCCQ8L107VIQR0qjAlEqdDQRYOhDKYcKGcnPI= +github.com/filecoin-project/kubo-api-client v0.27.0/go.mod h1:1+geFlaV8oJRJ4IlVTqL3QC3T1f5N0aGSptErrtcMQs= github.com/filecoin-project/pubsub v1.0.0 h1:ZTmT27U07e54qV1mMiQo4HDr0buo8I1LDHBYLXlsNXM= github.com/filecoin-project/pubsub v1.0.0/go.mod h1:GkpB33CcUtUNrLPhJgfdy4FDx4OMNR9k+46DHx/Lqrg= github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= @@ -383,8 +388,9 @@ github.com/filecoin-project/specs-actors/v8 v8.0.1/go.mod h1:UYIPg65iPWoFw5NEftR github.com/filecoin-project/test-vectors/schema v0.0.7 h1:hhrcxLnQR2Oe6fjk63hZXG1fWQGyxgCVXOOlAlR/D9A= github.com/filecoin-project/test-vectors/schema v0.0.7/go.mod h1:WqdmeJrz0V37wp7DucRR/bvrScZffqaCyIk9G0BGw1o= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= +github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -392,8 +398,8 @@ github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2 github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= @@ -430,8 +436,8 @@ github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= @@ -480,8 +486,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.1 h1:jxpi2eWoU84wbX9iIEyAeeoac3FLuifZpY9tcNUD9kw= -github.com/golang/glog v1.1.1/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -539,8 +545,9 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -556,15 +563,15 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBBos92HalKpaGKHrp+3Uo6yTodo= -github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= +github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -577,14 +584,14 @@ github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfre github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gregdhill/go-openrpc v0.0.0-20220114144539-ae6f44720487 h1:NyaWOSkqFK1d9o+HLfnMIGzrHuUUPeBNIZyi5Zoe/lY= github.com/gregdhill/go-openrpc v0.0.0-20220114144539-ae6f44720487/go.mod h1:a1eRkbhd3DYpRH2lnuUsVG+QMTI+v0hGnsis8C9hMrA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -626,12 +633,12 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= -github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= -github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= -github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= @@ -640,8 +647,8 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= -github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/iancoleman/orderedmap v0.1.0 h1:2orAxZBJsvimgEBmMWfXaFlzSG2fbQil5qzP3F6cCkg= @@ -660,8 +667,8 @@ github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.10.1 h1:q0ZhbyN6iNZLipd6txt1xotCiP/icfvdAQ4YpUi+cL4= -github.com/ipfs/boxo v0.10.1/go.mod h1:1qgKq45mPRCxf4ZPoJV2lnXxyxucigILMJOrQrVivv8= +github.com/ipfs/boxo v0.18.0 h1:MOL9/AgoV3e7jlVMInicaSdbgralfqSsbkc31dZ9tmw= +github.com/ipfs/boxo v0.18.0/go.mod h1:pIZgTWdm3k3pLF9Uq6MB8JEcW07UDwNJjlXW1HELW80= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= @@ -672,8 +679,8 @@ github.com/ipfs/go-bitswap v0.11.0/go.mod h1:05aE8H3XOU+LXpTedeAS0OZpcO1WFsj5niY github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= -github.com/ipfs/go-block-format v0.1.2 h1:GAjkfhVx1f4YTODS6Esrj1wt2HhrtwTnhEr+DyPUaJo= -github.com/ipfs/go-block-format v0.1.2/go.mod h1:mACVcrxarQKstUU3Yf/RdwbC4DzPV6++rO2a3d+a/KE= +github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= +github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= github.com/ipfs/go-blockservice v0.5.1 h1:9pAtkyKAz/skdHTh0kH8VulzWp+qmSDD0aI17TYP/s0= @@ -727,8 +734,8 @@ github.com/ipfs/go-ds-measure v0.2.0/go.mod h1:SEUD/rE2PwRa4IQEC5FuNAmjJCyYObZr9 github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= github.com/ipfs/go-fs-lock v0.0.7 h1:6BR3dajORFrFTkb5EpCUFIAypsoxpGpDSVUdFwzgL9U= github.com/ipfs/go-fs-lock v0.0.7/go.mod h1:Js8ka+FNYmgQRLrRXzU3CB/+Csr1BwrRilEcvYrHhhc= -github.com/ipfs/go-graphsync v0.14.6 h1:NPxvuUy4Z08Mg8dwpBzwgbv/PGLIufSJ1sle6iAX8yo= -github.com/ipfs/go-graphsync v0.14.6/go.mod h1:yT0AfjFgicOoWdAlUJ96tQ5AkuGI4r1taIQX/aHbBQo= +github.com/ipfs/go-graphsync v0.16.0 h1:0BX7whXlV13Y9FZ/jRg+xaGHaGYbtGxGppKD6tncw6k= +github.com/ipfs/go-graphsync v0.16.0/go.mod h1:WfbMW3hhmX5GQEQ+KJxsFzVJVBKgC5szfrYK7Zc7xIM= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= @@ -741,8 +748,8 @@ github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtL github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= -github.com/ipfs/go-ipfs-cmds v0.9.0 h1:K0VcXg1l1k6aY6sHnoxYcyimyJQbcV1ueXuWgThmK9Q= -github.com/ipfs/go-ipfs-cmds v0.9.0/go.mod h1:SBFHK8WNwC416QWH9Vz1Ql42SSMAOqKpaHUMBu3jpLo= +github.com/ipfs/go-ipfs-cmds v0.10.0 h1:ZB4+RgYaH4UARfJY0uLKl5UXgApqnRjKbuCiJVcErYk= +github.com/ipfs/go-ipfs-cmds v0.10.0/go.mod h1:sX5d7jkCft9XLPnkgEfXY0z2UBOB5g6fh/obBS0enJE= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= @@ -781,15 +788,16 @@ github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-cbor v0.0.6-0.20211211231443-5d9b9e1f6fa8/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= -github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-cbor v0.1.0 h1:dx0nS0kILVivGhfWuB6dUpMa/LAwElHPw1yOGYopoYs= +github.com/ipfs/go-ipld-cbor v0.1.0/go.mod h1:U2aYlmVrJr2wsUBU67K4KgepApSZddGRDWBYR0H4sCk= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipld-format v0.5.0 h1:WyEle9K96MSrvr47zZHKKcDxJ/vlpET6PSiQsAFO+Ds= -github.com/ipfs/go-ipld-format v0.5.0/go.mod h1:ImdZqJQaEouMjCvqCe0ORUS+uoBmf7Hf+EO/jh+nk3M= +github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= +github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg= github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= github.com/ipfs/go-ipld-legacy v0.2.1/go.mod h1:782MOUghNzMO2DER0FlBR94mllfdCJCkTtDtPM51otM= @@ -832,8 +840,8 @@ github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHja github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= github.com/ipfs/go-unixfs v0.4.5/go.mod h1:BIznJNvt/gEx/ooRMI4Us9K8+qeGO7vx1ohnbk8gjFg= -github.com/ipfs/go-unixfsnode v1.7.1 h1:RRxO2b6CSr5UQ/kxnGzaChTjp5LWTdf3Y4n8ANZgB/s= -github.com/ipfs/go-unixfsnode v1.7.1/go.mod h1:PVfoyZkX1B34qzT3vJO4nsLUpRCyhnMuHBznRcXirlk= +github.com/ipfs/go-unixfsnode v1.9.0 h1:ubEhQhr22sPAKO2DNsyVBW7YB/zA8Zkif25aBvz8rc8= +github.com/ipfs/go-unixfsnode v1.9.0/go.mod h1:HxRu9HYHOjK6HUqFBAi++7DVoWAHn0o4v/nZ/VA+0g8= github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= github.com/ipfs/go-verifcid v0.0.2 h1:XPnUv0XmdH+ZIhLGKg6U2vaPaRDXb9urMyNVCE7uvTs= github.com/ipfs/go-verifcid v0.0.2/go.mod h1:40cD9x1y4OWnFXbLNJYRe7MpNvWlMn3LZAG5Wb4xnPU= @@ -841,8 +849,8 @@ github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBH github.com/ipld/go-car v0.6.1 h1:blWbEHf1j62JMWFIqWE//YR0m7k5ZMw0AuUOU5hjrH8= github.com/ipld/go-car v0.6.1/go.mod h1:oEGXdwp6bmxJCZ+rARSkDliTeYnVzv3++eXajZ+Bmr8= github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= -github.com/ipld/go-car/v2 v2.10.1 h1:MRDqkONNW9WRhB79u+Z3U5b+NoN7lYA5B8n8qI3+BoI= -github.com/ipld/go-car/v2 v2.10.1/go.mod h1:sQEkXVM3csejlb1kCCb+vQ/pWBKX9QtvsrysMQjOgOg= +github.com/ipld/go-car/v2 v2.13.1 h1:KnlrKvEPEzr5IZHKTXLAEub+tPrzeAFQVRlSQvuxBO4= +github.com/ipld/go-car/v2 v2.13.1/go.mod h1:QkdjjFNGit2GIkpQ953KBwowuoukoM75nP/JI1iDJdo= github.com/ipld/go-codec-dagpb v1.2.0/go.mod h1:6nBN7X7h8EOsEejZGqC7tej5drsdBAXbMHyBT+Fne5s= github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= @@ -856,8 +864,8 @@ github.com/ipld/go-ipld-prime v0.10.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/ github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= github.com/ipld/go-ipld-prime v0.19.0/go.mod h1:Q9j3BaVXwaA3o5JUDNvptDDr/x8+F7FG6XJ8WI3ILg4= -github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g= -github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M= +github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= +github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd h1:gMlw/MhNr2Wtp5RwGdsW23cs+yCuj9k2ON7i9MiJlRo= @@ -940,13 +948,13 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= +github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= +github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/koalacxr/quantile v0.0.1 h1:wAW+SQ286Erny9wOjVww96t8ws+x5Zj6AKHDULUK+o0= github.com/koalacxr/quantile v0.0.1/go.mod h1:bGN/mCZLZ4lrSDHRQ6Lglj9chowGux8sGUIND+DQeD0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -997,10 +1005,10 @@ github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xS github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= -github.com/libp2p/go-libp2p v0.31.1 h1:mUiFPwdzC2zMLIATKVddjCuPXVbtC3BsKKVPMs4+jzY= -github.com/libp2p/go-libp2p v0.31.1/go.mod h1:+9TCv+XySSOdaxPF1WIgTK8rXP9jBb8WbemlMCSXGsU= -github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= -github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= +github.com/libp2p/go-libp2p v0.33.1 h1:tvJl9b9M6nSLBtZSXSguq+/lRhRj2oLRkyhBmQNMFLA= +github.com/libp2p/go-libp2p v0.33.1/go.mod h1:zOUTMjG4I7TXwMndNyOBn/CNtVBLlvBlnxfi+8xzx+E= +github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= +github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= @@ -1044,10 +1052,10 @@ github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQO github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= github.com/libp2p/go-libp2p-gostream v0.6.0 h1:QfAiWeQRce6pqnYfmIVWJFXNdDyfiR/qkCnjyaZUPYU= github.com/libp2p/go-libp2p-gostream v0.6.0/go.mod h1:Nywu0gYZwfj7Jc91PQvbGU8dIpqbQQkjWgDuOrFaRdA= -github.com/libp2p/go-libp2p-kad-dht v0.24.0 h1:nZnFDQEFU4N8GzclnR+IGxIgR7k4PPCDk/GK9A28onk= -github.com/libp2p/go-libp2p-kad-dht v0.24.0/go.mod h1:lfu5T01EH+r6uDZ/8G+ObhwgzVyd0b1nb54AdT8XGhc= -github.com/libp2p/go-libp2p-kbucket v0.6.1 h1:Y/NIvALuY5/fJlOpaJor9Azg4eor15JskGs9Lb2EhH0= -github.com/libp2p/go-libp2p-kbucket v0.6.1/go.mod h1:dvWO707Oq/vhMVuUhyfLkw0QsOrJFETepbNfpVHSELI= +github.com/libp2p/go-libp2p-kad-dht v0.25.2 h1:FOIk9gHoe4YRWXTu8SY9Z1d0RILol0TrtApsMDPjAVQ= +github.com/libp2p/go-libp2p-kad-dht v0.25.2/go.mod h1:6za56ncRHYXX4Nc2vn8z7CZK0P4QiMcrn77acKLM2Oo= +github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= +github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= @@ -1069,14 +1077,14 @@ github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRj github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= -github.com/libp2p/go-libp2p-pubsub v0.9.3 h1:ihcz9oIBMaCK9kcx+yHWm3mLAFBMAUsM4ux42aikDxo= -github.com/libp2p/go-libp2p-pubsub v0.9.3/go.mod h1:RYA7aM9jIic5VV47WXu4GkcRxRhrdElWf8xtyli+Dzc= +github.com/libp2p/go-libp2p-pubsub v0.10.0 h1:wS0S5FlISavMaAbxyQn3dxMOe2eegMfswM471RuHJwA= +github.com/libp2p/go-libp2p-pubsub v0.10.0/go.mod h1:1OxbaT/pFRO5h+Dpze8hdHQ63R0ke55XTs6b6NwLLkw= github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= -github.com/libp2p/go-libp2p-routing-helpers v0.7.0 h1:sirOYVD0wGWjkDwHZvinunIpaqPLBXkcnXApVHwZFGA= -github.com/libp2p/go-libp2p-routing-helpers v0.7.0/go.mod h1:R289GUxUMzRXIbWGSuUUTPrlVJZ3Y/pPz495+qgXJX8= +github.com/libp2p/go-libp2p-routing-helpers v0.7.3 h1:u1LGzAMVRK9Nqq5aYDVOiq/HaB93U9WWczBzGyAC5ZY= +github.com/libp2p/go-libp2p-routing-helpers v0.7.3/go.mod h1:cN4mJAD/7zfPKXBcs9ze31JGYAZgzdABEm+q/hkswb8= github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= @@ -1217,8 +1225,8 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= @@ -1228,16 +1236,14 @@ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= -github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -1339,8 +1345,8 @@ github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wS github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= -github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo= -github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q= +github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= +github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= @@ -1373,25 +1379,24 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= -github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= -github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 h1:CznVS40zms0Dj5he4ERo+fRPtO0qxUk8lA8Xu3ddet0= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333/go.mod h1:Ag6rSXkHIckQmjFBCweJEEt1mrTPBv8b9W4aU/NQWfI= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= -github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= @@ -1445,16 +1450,16 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1466,8 +1471,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k= +github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1479,20 +1484,18 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= github.com/puzpuzpuz/xsync/v2 v2.4.0 h1:5sXAMHrtx1bg9nbRZTOn8T4MkWe5V+o8yKRH02Eznag= github.com/puzpuzpuz/xsync/v2 v2.4.0/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-20 v0.3.3 h1:17/glZSLI9P9fDAeyCHBFSWSqJcwx1byhLwP5eUIDCM= -github.com/quic-go/qtls-go1-20 v0.3.3/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= -github.com/quic-go/quic-go v0.38.2 h1:VWv/6gxIoB8hROQJhx1JEyiegsUQ+zMN3em3kynTGdg= -github.com/quic-go/quic-go v0.38.2/go.mod h1:ijnZM7JsFIkp4cRyjxJNIzdSfCLmUMg9wdyhGmg+SN4= -github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= -github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/quic-go/quic-go v0.42.0 h1:uSfdap0eveIl8KXnipv9K7nlwZ5IqLlYOpJ58u5utpM= +github.com/quic-go/quic-go v0.42.0/go.mod h1:132kz4kL3F9vxhW3CtQJLDVwcFe5wdWeJXXijhsO57M= +github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY= +github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc= github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -1517,8 +1520,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM= -github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= +github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA= +github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sercand/kuberesolver/v4 v4.0.0 h1:frL7laPDG/lFm5n98ODmWnn+cvPpzlkf3LhzuPhcHP4= @@ -1594,8 +1597,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1606,8 +1610,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= @@ -1649,8 +1653,8 @@ github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49u github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-testmark v0.10.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= -github.com/warpfork/go-testmark v0.11.0 h1:J6LnV8KpceDvo7spaNU4+DauH2n1x+6RaO2rJrmpQ9U= -github.com/warpfork/go-testmark v0.11.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= +github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= +github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= @@ -1660,6 +1664,8 @@ github.com/weaveworks/common v0.0.0-20230531151736-e2613bee6b73 h1:CMM9+/AgM77va github.com/weaveworks/common v0.0.0-20230531151736-e2613bee6b73/go.mod h1:rgbeLfJUtEr+G74cwFPR1k/4N0kDeaeSv/qhUNE4hm8= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba h1:X4n8JG2e2biEZZXdBKt9HX7DN3bYGFUqljqqy0DqgnY= github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba/go.mod h1:CHQnYnQUEPydYCwuy8lmTHfGmdw9TKrhWV0xLx8l0oM= github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= @@ -1741,24 +1747,24 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= -go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel/bridge/opencensus v0.39.0 h1:YHivttTaDhbZIHuPlg1sWsy2P5gj57vzqPfkHItgbwQ= go.opentelemetry.io/otel/bridge/opencensus v0.39.0/go.mod h1:vZ4537pNjFDXEx//WldAR6Ro2LC8wwmFC76njAXwNPE= go.opentelemetry.io/otel/exporters/jaeger v1.14.0 h1:CjbUNd4iN2hHmWekmOqZ+zSCU+dzZppG8XsV+A3oc8Q= go.opentelemetry.io/otel/exporters/jaeger v1.14.0/go.mod h1:4Ay9kk5vELRrbg5z4cpP9EtmQRFap2Wb0woPG4lujZA= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= -go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= -go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= go.opentelemetry.io/otel/sdk/metric v0.39.0 h1:Kun8i1eYf48kHH83RucG93ffz0zGV1sh46FAScOTuDI= go.opentelemetry.io/otel/sdk/metric v0.39.0/go.mod h1:piDIRgjcK7u0HCL5pCA4e74qpK/jk3NiUoAHATVAmiI= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= -go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1766,14 +1772,16 @@ go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= -go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= -go.uber.org/fx v1.20.0 h1:ZMC/pnRvhsthOZh9MZjMq5U8Or3mA9zBSPaLnzs3ihQ= -go.uber.org/fx v1.20.0/go.mod h1:qCUj0btiR3/JnanEr1TYEePfSw6o/4qYJscgvzQ5Ub0= +go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= +go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk= +go.uber.org/fx v1.20.1/go.mod h1:iSYNbHf2y55acNCwCXKx7LbWb5WG1Bnue5RDXz1OREg= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -1789,8 +1797,8 @@ go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= -go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= @@ -1831,8 +1839,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1846,8 +1854,8 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= golang.org/x/exp v0.0.0-20210714144626-1041f73d31d8/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= -golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= -golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1874,8 +1882,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1939,8 +1947,8 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1965,8 +1973,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2066,8 +2074,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2076,8 +2084,8 @@ golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2094,8 +2102,8 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 h1:ftMN5LMiBFjbzleLqtoBZk7KdJwhuybIU+FckUHgoyQ= -golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2157,16 +2165,16 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= -gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= +gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -2229,12 +2237,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= -google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= -google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= -google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg= +google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917/go.mod h1:pZqR+glSb11aJ+JQcczCvgf47+duRuzNSKqE8YAQnV0= +google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 h1:OPXtXn7fNMaXwO3JvOmF1QyTc00jsSFFz1vXXBOdCDo= +google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:B5xPO//w8qmBDjGReYLpR6UJPnkldGkCSMoH/2vxJeg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 h1:gphdwh0npgs8elJ4T6J+DQJHPVF7RsuJHCfwztUb4J4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2257,8 +2265,8 @@ google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2274,8 +2282,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/itests/cli_test.go b/itests/cli_test.go index a323c0863d4..d2a0876356b 100644 --- a/itests/cli_test.go +++ b/itests/cli_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/cli/clicommands" "github.com/filecoin-project/lotus/itests/kit" ) @@ -23,5 +23,5 @@ func TestClient(t *testing.T) { blockTime := 5 * time.Millisecond client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC()) ens.InterconnectAll().BeginMining(blockTime) - kit.RunClientTest(t, cli.Commands, client) + kit.RunClientTest(t, clicommands.Commands, client) } diff --git a/itests/direct_data_onboard_verified_test.go b/itests/direct_data_onboard_verified_test.go index 2ac6142c0db..7415570a352 100644 --- a/itests/direct_data_onboard_verified_test.go +++ b/itests/direct_data_onboard_verified_test.go @@ -43,6 +43,8 @@ import ( "github.com/filecoin-project/lotus/storage/pipeline/piece" ) +var bogusPieceCid = cid.MustParse("baga6ea4seaaqa") + func TestOnboardRawPieceVerified_WithActorEvents(t *testing.T) { kit.QuietMiningLogs() @@ -122,11 +124,13 @@ func TestOnboardRawPieceVerified_WithActorEvents(t *testing.T) { require.NoError(t, err) /* --- Allocate datacap for the piece by the verified client --- */ - - clientId, allocationId := ddoVerifiedSetupAllocations(ctx, t, client, minerId, dc, verifiedClientAddr, true, 0) - head, err := client.ChainHead(ctx) require.NoError(t, err) + bogusAllocationExpiry := head.Height() + 100 + clientId, allocationId := ddoVerifiedSetupAllocations(ctx, t, client, minerId, dc, verifiedClientAddr, bogusAllocationExpiry, 0) + + head, err = client.ChainHead(ctx) + require.NoError(t, err) // subscribe to actor events up until the current head initialEventsChan, err := miner.FullNode.SubscribeActorEventsRaw(ctx, &types.ActorEventFilter{ @@ -212,8 +216,13 @@ func TestOnboardRawPieceVerified_WithActorEvents(t *testing.T) { require.NoError(t, err) verifierEntry := types.EventEntry{Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "verifier", Value: must.One(ipld.Encode(basicnode.NewInt(int64(verifierId)), dagcbor.Encode))} + require.Len(t, verifierBalanceEvents[0].Entries, 3) // $type, "verifier", "balance" require.Contains(t, verifierBalanceEvents[0].Entries, verifierEntry) + + require.Len(t, verifierBalanceEvents[1].Entries, 4) // $type, "verifier", "balance", "client" require.Contains(t, verifierBalanceEvents[1].Entries, verifierEntry) + clientEntry := types.EventEntry{Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "client", Value: must.One(ipld.Encode(basicnode.NewInt(int64(clientId)), dagcbor.Encode))} + require.Contains(t, verifierBalanceEvents[1].Entries, clientEntry) } { @@ -226,11 +235,18 @@ func TestOnboardRawPieceVerified_WithActorEvents(t *testing.T) { {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "id", Value: must.One(ipld.Encode(basicnode.NewInt(int64(allocationId)-1), dagcbor.Encode))}, {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "provider", Value: must.One(ipld.Encode(basicnode.NewInt(int64(minerId)), dagcbor.Encode))}, {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "client", Value: must.One(ipld.Encode(basicnode.NewInt(int64(clientId)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "piece-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: bogusPieceCid}), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "piece-size", Value: must.One(ipld.Encode(basicnode.NewInt(int64(pieceSize.Padded())), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "term-min", Value: must.One(ipld.Encode(basicnode.NewInt(verifregtypes13.MinimumVerifiedAllocationTerm), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "term-max", Value: must.One(ipld.Encode(basicnode.NewInt(verifregtypes13.MaximumVerifiedAllocationTerm), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "expiration", Value: must.One(ipld.Encode(basicnode.NewInt(int64(bogusAllocationExpiry)), dagcbor.Encode))}, } require.ElementsMatch(t, expectedEntries, allocationEvents[0].Entries) // the second, real allocation - expectedEntries[1].Value = must.One(ipld.Encode(basicnode.NewInt(int64(allocationId)), dagcbor.Encode)) + expectedEntries[1].Value = must.One(ipld.Encode(basicnode.NewInt(int64(allocationId)), dagcbor.Encode)) // "id" + expectedEntries[4].Value = must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: dc.PieceCID}), dagcbor.Encode)) // "piece-cid" + expectedEntries[8].Value = must.One(ipld.Encode(basicnode.NewInt(verifregtypes13.MaximumVerifiedAllocationExpiration), dagcbor.Encode)) // "expiration" require.ElementsMatch(t, expectedEntries, allocationEvents[1].Entries) } @@ -244,6 +260,11 @@ func TestOnboardRawPieceVerified_WithActorEvents(t *testing.T) { {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "id", Value: must.One(ipld.Encode(basicnode.NewInt(int64(allocationId)-1), dagcbor.Encode))}, {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "provider", Value: must.One(ipld.Encode(basicnode.NewInt(int64(minerId)), dagcbor.Encode))}, {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "client", Value: must.One(ipld.Encode(basicnode.NewInt(int64(clientId)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "piece-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: bogusPieceCid}), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "piece-size", Value: must.One(ipld.Encode(basicnode.NewInt(int64(pieceSize.Padded())), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "term-min", Value: must.One(ipld.Encode(basicnode.NewInt(verifregtypes13.MinimumVerifiedAllocationTerm), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "term-max", Value: must.One(ipld.Encode(basicnode.NewInt(verifregtypes13.MaximumVerifiedAllocationTerm), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "expiration", Value: must.One(ipld.Encode(basicnode.NewInt(int64(bogusAllocationExpiry)), dagcbor.Encode))}, } require.ElementsMatch(t, expectedEntries, allocationEvents[0].Entries) } @@ -258,6 +279,12 @@ func TestOnboardRawPieceVerified_WithActorEvents(t *testing.T) { {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "id", Value: must.One(ipld.Encode(basicnode.NewInt(int64(allocationId)), dagcbor.Encode))}, {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "provider", Value: must.One(ipld.Encode(basicnode.NewInt(int64(minerId)), dagcbor.Encode))}, {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "client", Value: must.One(ipld.Encode(basicnode.NewInt(int64(clientId)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "piece-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: dc.PieceCID}), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "piece-size", Value: must.One(ipld.Encode(basicnode.NewInt(int64(pieceSize.Padded())), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "term-min", Value: must.One(ipld.Encode(basicnode.NewInt(verifregtypes13.MinimumVerifiedAllocationTerm), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "term-max", Value: must.One(ipld.Encode(basicnode.NewInt(verifregtypes13.MaximumVerifiedAllocationTerm), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "term-start", Value: must.One(ipld.Encode(basicnode.NewInt(int64(claimEvents[0].Height)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "sector", Value: must.One(ipld.Encode(basicnode.NewInt(int64(si.SectorID)), dagcbor.Encode))}, } require.ElementsMatch(t, expectedEntries, claimEvents[0].Entries) } @@ -398,7 +425,7 @@ func ddoVerifiedSetupAllocations( minerId uint64, dc abi.PieceInfo, verifiedClientAddr address.Address, - setupBorkAlloc bool, + bogusAllocExpiration abi.ChainEpoch, // zero if we don't want to make one tmax abi.ChainEpoch, ) (clientID abi.ActorID, allocationID verifregtypes13.AllocationId) { if tmax == 0 { @@ -407,20 +434,17 @@ func ddoVerifiedSetupAllocations( var requests []verifregtypes13.AllocationRequest - // design this one to expire so we can observe allocation-removed - if setupBorkAlloc { - head, err := node.ChainHead(ctx) - require.NoError(t, err) - expiringAllocationHeight := head.Height() + 100 - allocationRequestBork := verifregtypes13.AllocationRequest{ + if bogusAllocExpiration != 0 { + // design this one to expire so we can observe allocation-removed + allocationRequestBogus := verifregtypes13.AllocationRequest{ Provider: abi.ActorID(minerId), - Data: cid.MustParse("baga6ea4seaaqa"), + Data: bogusPieceCid, Size: dc.Size, TermMin: verifregtypes13.MinimumVerifiedAllocationTerm, TermMax: tmax, - Expiration: expiringAllocationHeight, + Expiration: bogusAllocExpiration, } - requests = append(requests, allocationRequestBork) + requests = append(requests, allocationRequestBogus) } allocationRequest := verifregtypes13.AllocationRequest{ @@ -442,7 +466,7 @@ func ddoVerifiedSetupAllocations( var amt abi.TokenAmount amt = big.Mul(big.NewInt(int64(dc.Size)), builtin.TokenPrecision) - if setupBorkAlloc { + if bogusAllocExpiration != 0 { amt = big.Mul(big.NewInt(int64(dc.Size*2)), builtin.TokenPrecision) } @@ -471,7 +495,7 @@ func ddoVerifiedSetupAllocations( // check that we have an allocation allocations, err := node.StateGetAllocations(ctx, verifiedClientAddr, types.EmptyTSK) require.NoError(t, err) - if setupBorkAlloc { + if bogusAllocExpiration != 0 { require.Len(t, allocations, 2) // allocation waiting to be claimed } else { require.Len(t, allocations, 1) // allocation waiting to be claimed @@ -567,13 +591,13 @@ func ddoVerifiedBuildClaimsFromMessages(ctx context.Context, t *testing.T, event require.NoError(t, err) providerId = *bindnode.Unwrap(nd).(*int64) } - if isClaim && claimId != -1 && providerId != -1 { - provider, err := address.NewIDAddress(uint64(providerId)) - require.NoError(t, err) - claim, err := node.StateGetClaim(ctx, provider, verifregtypes9.ClaimId(claimId), types.EmptyTSK) - require.NoError(t, err) - claims = append(claims, claim) - } + } + if isClaim && claimId != -1 && providerId != -1 { + provider, err := address.NewIDAddress(uint64(providerId)) + require.NoError(t, err) + claim, err := node.StateGetClaim(ctx, provider, verifregtypes9.ClaimId(claimId), types.EmptyTSK) + require.NoError(t, err) + claims = append(claims, claim) } } return claims @@ -797,7 +821,7 @@ func TestVerifiedDDOExtendClaim(t *testing.T) { require.NoError(t, err) /* --- Allocate datacap for the piece by the verified client --- */ - clientId, allocationId := ddoVerifiedSetupAllocations(ctx, t, client, minerId, dc, verifiedClientAddr1, false, builtin.EpochsInYear*3) + clientId, allocationId := ddoVerifiedSetupAllocations(ctx, t, client, minerId, dc, verifiedClientAddr1, 0, builtin.EpochsInYear*3) /* --- Onboard the piece --- */ @@ -816,12 +840,11 @@ func TestVerifiedDDOExtendClaim(t *testing.T) { pcm[verifregtypes13.ClaimId(allocationId)] = prov // Extend claim with same client - msgs, err := cli.CreateExtendClaimMsg(ctx, client.FullNode, pcm, []string{}, verifiedClientAddr1, (builtin.EpochsInYear*3)+3000, false, true) + msgs, err := cli.CreateExtendClaimMsg(ctx, client.FullNode, pcm, []string{}, verifiedClientAddr1, (builtin.EpochsInYear*3)+3000, false, true, 100) require.NoError(t, err) require.NotNil(t, msgs) require.Len(t, msgs, 1) - // MpoolBatchPushMessage method will take care of gas estimation and funds check smsg, err := client.MpoolPushMessage(ctx, msgs[0], nil) require.NoError(t, err) @@ -835,11 +858,11 @@ func TestVerifiedDDOExtendClaim(t *testing.T) { require.EqualValues(t, newclaim.TermMax-oldclaim.TermMax, 3000) // Extend claim with non-verified client | should fail - _, err = cli.CreateExtendClaimMsg(ctx, client.FullNode, pcm, []string{}, unverifiedClient.Address, verifregtypes13.MaximumVerifiedAllocationTerm, false, true) + _, err = cli.CreateExtendClaimMsg(ctx, client.FullNode, pcm, []string{}, unverifiedClient.Address, verifregtypes13.MaximumVerifiedAllocationTerm, false, true, 100) require.ErrorContains(t, err, "does not have any datacap") // Extend all claim with verified client - msgs, err = cli.CreateExtendClaimMsg(ctx, client.FullNode, nil, []string{miner.ActorAddr.String()}, verifiedClientAddr2, verifregtypes13.MaximumVerifiedAllocationTerm, true, true) + msgs, err = cli.CreateExtendClaimMsg(ctx, client.FullNode, nil, []string{miner.ActorAddr.String()}, verifiedClientAddr2, verifregtypes13.MaximumVerifiedAllocationTerm, true, true, 100) require.NoError(t, err) require.Len(t, msgs, 1) smsg, err = client.MpoolPushMessage(ctx, msgs[0], nil) @@ -849,12 +872,15 @@ func TestVerifiedDDOExtendClaim(t *testing.T) { require.True(t, wait.Receipt.ExitCode.IsSuccess()) // Extend all claims with lower TermMax - msgs, err = cli.CreateExtendClaimMsg(ctx, client.FullNode, pcm, []string{}, verifiedClientAddr2, builtin.EpochsInYear*4, false, true) + msgs, err = cli.CreateExtendClaimMsg(ctx, client.FullNode, pcm, []string{}, verifiedClientAddr2, builtin.EpochsInYear*4, false, true, 100) require.NoError(t, err) require.Nil(t, msgs) newclaim, err = client.StateGetClaim(ctx, miner.ActorAddr, verifreg.ClaimId(allocationId), types.EmptyTSK) require.NoError(t, err) require.NotNil(t, newclaim) - require.EqualValues(t, newclaim.TermMax, verifregtypes13.MaximumVerifiedAllocationTerm) + + // TODO: check "claim-updated" event + // New TermMax should be more than 5 years + require.Greater(t, int(newclaim.TermMax), verifregtypes13.MaximumVerifiedAllocationTerm) } diff --git a/itests/gateway_test.go b/itests/gateway_test.go index d20b3bd1a09..2dc4e1034d5 100644 --- a/itests/gateway_test.go +++ b/itests/gateway_test.go @@ -24,7 +24,7 @@ import ( "github.com/filecoin-project/lotus/api/client" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/cli/clicommands" "github.com/filecoin-project/lotus/gateway" "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/itests/multisig" @@ -231,7 +231,7 @@ func TestGatewayCLIDealFlow(t *testing.T) { ctx := context.Background() nodes := startNodesWithFunds(ctx, t, blocktime, maxLookbackCap, maxStateWaitLookbackLimit) - kit.RunClientTest(t, cli.Commands, nodes.lite) + kit.RunClientTest(t, clicommands.Commands, nodes.lite) } type testNodes struct { diff --git a/itests/harmonytask_test.go b/itests/harmonytask_test.go index 463f131d8fc..beef04c8d88 100644 --- a/itests/harmonytask_test.go +++ b/itests/harmonytask_test.go @@ -90,7 +90,7 @@ func TestHarmonyTasks(t *testing.T) { e, err := harmonytask.New(cdb, []harmonytask.TaskInterface{t1}, "test:1") require.NoError(t, err) time.Sleep(time.Second) // do the work. FLAKYNESS RISK HERE. - e.GracefullyTerminate(time.Minute) + e.GracefullyTerminate() expected := []string{"taskResult56", "taskResult73"} sort.Strings(t1.WorkCompleted) require.Equal(t, expected, t1.WorkCompleted, "unexpected results") @@ -173,8 +173,8 @@ func TestHarmonyTasksWith2PartiesPolling(t *testing.T) { worker, err := harmonytask.New(cdb, []harmonytask.TaskInterface{workerParty}, "test:2") require.NoError(t, err) time.Sleep(time.Second) // do the work. FLAKYNESS RISK HERE. - sender.GracefullyTerminate(time.Second * 5) - worker.GracefullyTerminate(time.Second * 5) + sender.GracefullyTerminate() + worker.GracefullyTerminate() sort.Strings(dest) require.Equal(t, []string{"A", "B"}, dest) }) @@ -204,7 +204,7 @@ func TestWorkStealing(t *testing.T) { worker, err := harmonytask.New(cdb, []harmonytask.TaskInterface{fooLetterSaver(t, cdb, &dest)}, "test:2") require.ErrorIs(t, err, nil) time.Sleep(time.Second) // do the work. FLAKYNESS RISK HERE. - worker.GracefullyTerminate(time.Second * 5) + worker.GracefullyTerminate() require.Equal(t, []string{"M"}, dest) }) } @@ -243,8 +243,8 @@ func TestTaskRetry(t *testing.T) { rcv, err := harmonytask.New(cdb, []harmonytask.TaskInterface{fails2xPerMsg}, "test:2") require.NoError(t, err) time.Sleep(time.Second) - sender.GracefullyTerminate(time.Hour) - rcv.GracefullyTerminate(time.Hour) + sender.GracefullyTerminate() + rcv.GracefullyTerminate() sort.Strings(dest) require.Equal(t, []string{"A", "B"}, dest) type hist struct { diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go index 9588d252695..03a36dc4525 100644 --- a/itests/kit/ensemble.go +++ b/itests/kit/ensemble.go @@ -913,7 +913,7 @@ func (n *Ensemble) Start() *Ensemble { if err != nil { return nil } - defer taskEngine.GracefullyTerminate(time.Hour) + defer taskEngine.GracefullyTerminate() err = rpc.ListenAndServe(ctx, p.Deps, shutdownChan) // Monitor for shutdown. require.NoError(n.t, err) diff --git a/itests/kit/node_opts.go b/itests/kit/node_opts.go index 09e78995147..1f4f9f6a4db 100644 --- a/itests/kit/node_opts.go +++ b/itests/kit/node_opts.go @@ -65,7 +65,7 @@ var DefaultNodeOpts = nodeOpts{ // test defaults cfg.Fevm.EnableEthRPC = true - cfg.Fevm.Events.MaxFilterHeightRange = math.MaxInt64 + cfg.Events.MaxFilterHeightRange = math.MaxInt64 cfg.Events.EnableActorEventsAPI = true return nil }, diff --git a/itests/multisig/suite.go b/itests/multisig/suite.go index 9a81d0bf99d..61ca68d5e9e 100644 --- a/itests/multisig/suite.go +++ b/itests/multisig/suite.go @@ -13,14 +13,14 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/cli/clicommands" "github.com/filecoin-project/lotus/itests/kit" ) func RunMultisigTests(t *testing.T, client *kit.TestFullNode) { // Create mock CLI ctx := context.Background() - mockCLI := kit.NewMockCLI(ctx, t, cli.Commands, api.NodeFull) + mockCLI := kit.NewMockCLI(ctx, t, clicommands.Commands, api.NodeFull) clientCLI := mockCLI.Client(client.ListenAddr) // Create some wallets on the node to use for testing multisig diff --git a/itests/path_type_filters_test.go b/itests/path_type_filters_test.go index c668976ac2d..a2e2049323b 100644 --- a/itests/path_type_filters_test.go +++ b/itests/path_type_filters_test.go @@ -15,6 +15,7 @@ import ( ) func TestPathTypeFilters(t *testing.T) { + kit.QuietMiningLogs() runTest := func(t *testing.T, name string, asserts func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func())) { t.Run(name, func(t *testing.T) { diff --git a/itests/paych_cli_test.go b/itests/paych_cli_test.go index f86f5d8deca..1079aade9fb 100644 --- a/itests/paych_cli_test.go +++ b/itests/paych_cli_test.go @@ -25,7 +25,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/cli/clicommands" "github.com/filecoin-project/lotus/itests/kit" ) @@ -51,7 +51,7 @@ func TestPaymentChannelsBasic(t *testing.T) { creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime) // Create mock CLI - mockCLI := kit.NewMockCLI(ctx, t, cli.Commands, api.NodeFull) + mockCLI := kit.NewMockCLI(ctx, t, clicommands.Commands, api.NodeFull) creatorCLI := mockCLI.Client(paymentCreator.ListenAddr) receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr) @@ -126,7 +126,7 @@ func TestPaymentChannelStatus(t *testing.T) { creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime) // Create mock CLI - mockCLI := kit.NewMockCLI(ctx, t, cli.Commands, api.NodeFull) + mockCLI := kit.NewMockCLI(ctx, t, clicommands.Commands, api.NodeFull) creatorCLI := mockCLI.Client(paymentCreator.ListenAddr) // creator: paych status-by-from-to @@ -212,7 +212,7 @@ func TestPaymentChannelVouchers(t *testing.T) { creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime) // Create mock CLI - mockCLI := kit.NewMockCLI(ctx, t, cli.Commands, api.NodeFull) + mockCLI := kit.NewMockCLI(ctx, t, clicommands.Commands, api.NodeFull) creatorCLI := mockCLI.Client(paymentCreator.ListenAddr) receiverCLI := mockCLI.Client(paymentReceiver.ListenAddr) @@ -350,7 +350,7 @@ func TestPaymentChannelVoucherCreateShortfall(t *testing.T) { creatorAddr, receiverAddr := startPaychCreatorReceiverMiner(ctx, t, &paymentCreator, &paymentReceiver, blocktime) // Create mock CLI - mockCLI := kit.NewMockCLI(ctx, t, cli.Commands, api.NodeFull) + mockCLI := kit.NewMockCLI(ctx, t, clicommands.Commands, api.NodeFull) creatorCLI := mockCLI.Client(paymentCreator.ListenAddr) // creator: paych add-funds diff --git a/lib/harmony/harmonytask/harmonytask.go b/lib/harmony/harmonytask/harmonytask.go index 2d8036e2fc8..dc71b299c2d 100644 --- a/lib/harmony/harmonytask/harmonytask.go +++ b/lib/harmony/harmonytask/harmonytask.go @@ -193,22 +193,56 @@ func New( // GracefullyTerminate hangs until all present tasks have completed. // Call this to cleanly exit the process. As some processes are long-running, // passing a deadline will ignore those still running (to be picked-up later). -func (e *TaskEngine) GracefullyTerminate(deadline time.Duration) { +func (e *TaskEngine) GracefullyTerminate() { + + // call the cancel func to avoid picking up any new tasks. Running tasks have context.Background() + // Call shutdown to stop posting heartbeat to DB. e.grace() e.reg.Shutdown() - deadlineChan := time.NewTimer(deadline).C -top: - for _, h := range e.handlers { - if h.Count.Load() > 0 { - select { - case <-deadlineChan: - return - default: - time.Sleep(time.Millisecond) - goto top + + // If there are any Post tasks then wait till Timeout and check again + // When no Post tasks are active, break out of loop and call the shutdown function + for { + timeout := time.Millisecond + for _, h := range e.handlers { + if h.TaskTypeDetails.Name == "WinPost" && h.Count.Load() > 0 { + timeout = time.Second + log.Infof("node shutdown deferred for %f seconds", timeout.Seconds()) + continue + } + if h.TaskTypeDetails.Name == "WdPost" && h.Count.Load() > 0 { + timeout = time.Second * 3 + log.Infof("node shutdown deferred for %f seconds due to running WdPost task", timeout.Seconds()) + continue + } + + if h.TaskTypeDetails.Name == "WdPostSubmit" && h.Count.Load() > 0 { + timeout = time.Second + log.Infof("node shutdown deferred for %f seconds due to running WdPostSubmit task", timeout.Seconds()) + continue + } + + if h.TaskTypeDetails.Name == "WdPostRecover" && h.Count.Load() > 0 { + timeout = time.Second + log.Infof("node shutdown deferred for %f seconds due to running WdPostRecover task", timeout.Seconds()) + continue + } + + // Test tasks for itest + if h.TaskTypeDetails.Name == "ThingOne" && h.Count.Load() > 0 { + timeout = time.Second + log.Infof("node shutdown deferred for %f seconds due to running itest task", timeout.Seconds()) + continue } } + if timeout > time.Millisecond { + time.Sleep(timeout) + continue + } + break } + + return } func (e *TaskEngine) poller() { diff --git a/node/builder_chain.go b/node/builder_chain.go index a4a0babf736..73c9cfb1831 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -253,13 +253,13 @@ func ConfigFullNode(c interface{}) Option { // Actor event filtering support Override(new(events.EventHelperAPI), From(new(modules.EventHelperAPI))), - Override(new(*filter.EventFilterManager), modules.EventFilterManager(cfg.Fevm)), + Override(new(*filter.EventFilterManager), modules.EventFilterManager(cfg.Events)), // in lite-mode Eth api is provided by gateway ApplyIf(isFullNode, If(cfg.Fevm.EnableEthRPC, Override(new(full.EthModuleAPI), modules.EthModuleAPI(cfg.Fevm)), - Override(new(full.EthEventAPI), modules.EthEventHandler(cfg.Fevm)), + Override(new(full.EthEventAPI), modules.EthEventHandler(cfg.Events, cfg.Fevm.EnableEthRPC)), ), If(!cfg.Fevm.EnableEthRPC, Override(new(full.EthModuleAPI), &full.EthModuleDummy{}), @@ -269,7 +269,7 @@ func ConfigFullNode(c interface{}) Option { ApplyIf(isFullNode, If(cfg.Events.EnableActorEventsAPI, - Override(new(full.ActorEventAPI), modules.ActorEventHandler(cfg.Events.EnableActorEventsAPI, cfg.Fevm)), + Override(new(full.ActorEventAPI), modules.ActorEventHandler(cfg.Events)), ), If(!cfg.Events.EnableActorEventsAPI, Override(new(full.ActorEventAPI), &full.ActorEventDummy{}), diff --git a/node/config/cfgdocgen/gen.go b/node/config/cfgdocgen/gen.go index b13b7d799d0..6c7371a4082 100644 --- a/node/config/cfgdocgen/gen.go +++ b/node/config/cfgdocgen/gen.go @@ -74,6 +74,11 @@ func run() error { name := f[0] typ := f[1] + if len(comment) > 0 && strings.HasPrefix(comment[0], fmt.Sprintf("%s is DEPRECATED", name)) { + // don't document deprecated fields + continue + } + out[currentType] = append(out[currentType], field{ Name: name, Type: typ, diff --git a/node/config/def.go b/node/config/def.go index 2e5fb80327e..f725f60d36d 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -109,17 +109,15 @@ func DefaultFullNode() *FullNode { Fevm: FevmConfig{ EnableEthRPC: false, EthTxHashMappingLifetimeDays: 0, - Events: Events{ - DisableRealTimeFilterAPI: false, - DisableHistoricFilterAPI: false, - FilterTTL: Duration(time.Hour * 24), - MaxFilters: 100, - MaxFilterResults: 10000, - MaxFilterHeightRange: 2880, // conservative limit of one day - }, }, Events: EventsConfig{ - EnableActorEventsAPI: false, + DisableRealTimeFilterAPI: false, + DisableHistoricFilterAPI: false, + EnableActorEventsAPI: false, + FilterTTL: Duration(time.Hour * 24), + MaxFilters: 100, + MaxFilterResults: 10000, + MaxFilterHeightRange: 2880, // conservative limit of one day }, } } diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index dcb832976dd..b1b91d9c99c 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -665,13 +665,13 @@ see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/# Comment: ``, }, }, - "Events": { + "EventsConfig": { { Name: "DisableRealTimeFilterAPI", Type: "bool", Comment: `DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. -The API is enabled when EnableEthRPC or Events.EnableActorEventsAPI is true, but can be disabled selectively with this flag.`, +The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag.`, }, { Name: "DisableHistoricFilterAPI", @@ -679,7 +679,16 @@ The API is enabled when EnableEthRPC or Events.EnableActorEventsAPI is true, but Comment: `DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events that occurred in the past. HistoricFilterAPI maintains a queryable index of events. -The API is enabled when EnableEthRPC or Events.EnableActorEventsAPI is true, but can be disabled selectively with this flag.`, +The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag.`, + }, + { + Name: "EnableActorEventsAPI", + Type: "bool", + + Comment: `EnableActorEventsAPI enables the Actor events API that enables clients to consume events +emitted by (smart contracts + built-in Actors). +This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be +disabled by setting their respective Disable* options.`, }, { Name: "FilterTTL", @@ -717,17 +726,6 @@ the database must already exist and be writeable. If a relative path is provided relative to the CWD (current working directory).`, }, }, - "EventsConfig": { - { - Name: "EnableActorEventsAPI", - Type: "bool", - - Comment: `EnableActorEventsAPI enables the Actor events API that enables clients to consume events -emitted by (smart contracts + built-in Actors). -This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be -disabled by setting their respective Disable* options in Fevm.Events.`, - }, - }, "FaultReporterConfig": { { Name: "EnableConsensusFaultReporter", @@ -782,7 +780,7 @@ Set to 0 to keep all mappings`, }, { Name: "Events", - Type: "Events", + Type: "DeprecatedEvents", Comment: ``, }, diff --git a/node/config/load.go b/node/config/load.go index 96a0429410f..1b8df169645 100644 --- a/node/config/load.go +++ b/node/config/load.go @@ -18,12 +18,9 @@ import ( // FromFile loads config from a specified file overriding defaults specified in // the def parameter. If file does not exist or is empty defaults are assumed. func FromFile(path string, opts ...LoadCfgOpt) (interface{}, error) { - var loadOpts cfgLoadOpts - var err error - for _, opt := range opts { - if err = opt(&loadOpts); err != nil { - return nil, xerrors.Errorf("failed to apply load cfg option: %w", err) - } + loadOpts, err := applyOpts(opts...) + if err != nil { + return nil, err } var def interface{} if loadOpts.defaultCfg != nil { @@ -56,17 +53,44 @@ func FromFile(path string, opts ...LoadCfgOpt) (interface{}, error) { return nil, xerrors.Errorf("config failed validation: %w", err) } } - return FromReader(buf, def) + return FromReader(buf, def, opts...) } // FromReader loads config from a reader instance. -func FromReader(reader io.Reader, def interface{}) (interface{}, error) { +func FromReader(reader io.Reader, def interface{}, opts ...LoadCfgOpt) (interface{}, error) { + loadOpts, err := applyOpts(opts...) + if err != nil { + return nil, err + } cfg := def - _, err := toml.NewDecoder(reader).Decode(cfg) + md, err := toml.NewDecoder(reader).Decode(cfg) if err != nil { return nil, err } + // find any fields with a tag: `moved:"New.Config.Location"` and move any set values there over to + // the new location if they are not already set there. + movedFields := findMovedFields(nil, cfg) + var warningOut io.Writer = os.Stderr + if loadOpts.warningWriter != nil { + warningOut = loadOpts.warningWriter + } + for _, d := range movedFields { + if md.IsDefined(d.Field...) { + fmt.Fprintf( + warningOut, + "WARNING: Use of deprecated configuration option '%s' will be removed in a future release, use '%s' instead\n", + strings.Join(d.Field, "."), + strings.Join(d.NewField, ".")) + if !md.IsDefined(d.NewField...) { + // new value isn't set but old is, we should move what the user set there + if err := moveFieldValue(cfg, d.Field, d.NewField); err != nil { + return nil, fmt.Errorf("failed to move field value: %w", err) + } + } + } + } + err = envconfig.Process("LOTUS", cfg) if err != nil { return nil, fmt.Errorf("processing env vars overrides: %s", err) @@ -75,14 +99,105 @@ func FromReader(reader io.Reader, def interface{}) (interface{}, error) { return cfg, nil } +// move a value from the location in the valPtr struct specified by oldPath, to the location +// specified by newPath; where the path is an array of nested field names. +func moveFieldValue(valPtr interface{}, oldPath []string, newPath []string) error { + oldValue, err := getFieldValue(valPtr, oldPath) + if err != nil { + return err + } + val := reflect.ValueOf(valPtr).Elem() + for { + field := val.FieldByName(newPath[0]) + if !field.IsValid() { + return fmt.Errorf("unexpected error fetching field value") + } + if len(newPath) == 1 { + if field.Kind() != oldValue.Kind() { + return fmt.Errorf("unexpected error, old kind != new kind") + } + // set field on val to be the new one, and we're done + field.Set(oldValue) + return nil + } + if field.Kind() != reflect.Struct { + return fmt.Errorf("unexpected error fetching field value, is not a struct") + } + newPath = newPath[1:] + val = field + } +} + +// recursively iterate into `path` to find the terminal value +func getFieldValue(val interface{}, path []string) (reflect.Value, error) { + if reflect.ValueOf(val).Kind() == reflect.Ptr { + val = reflect.ValueOf(val).Elem().Interface() + } + field := reflect.ValueOf(val).FieldByName(path[0]) + if !field.IsValid() { + return reflect.Value{}, fmt.Errorf("unexpected error fetching field value") + } + if len(path) > 1 { + if field.Kind() != reflect.Struct { + return reflect.Value{}, fmt.Errorf("unexpected error fetching field value, is not a struct") + } + return getFieldValue(field.Interface(), path[1:]) + } + return field, nil +} + +type movedField struct { + Field []string + NewField []string +} + +// inspect the fields recursively within a struct and find any with "moved" tags +func findMovedFields(path []string, val interface{}) []movedField { + dep := make([]movedField, 0) + if reflect.ValueOf(val).Kind() == reflect.Ptr { + val = reflect.ValueOf(val).Elem().Interface() + } + t := reflect.TypeOf(val) + if t.Kind() != reflect.Struct { + return nil + } + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + // could also do a "deprecated" in here + if idx := field.Tag.Get("moved"); idx != "" && idx != "-" { + dep = append(dep, movedField{ + Field: append(path, field.Name), + NewField: strings.Split(idx, "."), + }) + } + if field.Type.Kind() == reflect.Struct && reflect.ValueOf(val).FieldByName(field.Name).IsValid() { + deps := findMovedFields(append(path, field.Name), reflect.ValueOf(val).FieldByName(field.Name).Interface()) + dep = append(dep, deps...) + } + } + return dep +} + type cfgLoadOpts struct { defaultCfg func() (interface{}, error) canFallbackOnDefault func() error validate func(string) error + warningWriter io.Writer } type LoadCfgOpt func(opts *cfgLoadOpts) error +func applyOpts(opts ...LoadCfgOpt) (cfgLoadOpts, error) { + var loadOpts cfgLoadOpts + var err error + for _, opt := range opts { + if err = opt(&loadOpts); err != nil { + return loadOpts, fmt.Errorf("failed to apply load cfg option: %w", err) + } + } + return loadOpts, nil +} + func SetDefault(f func() (interface{}, error)) LoadCfgOpt { return func(opts *cfgLoadOpts) error { opts.defaultCfg = f @@ -104,6 +219,13 @@ func SetValidate(f func(string) error) LoadCfgOpt { } } +func SetWarningWriter(w io.Writer) LoadCfgOpt { + return func(opts *cfgLoadOpts) error { + opts.warningWriter = w + return nil + } +} + func NoDefaultForSplitstoreTransition() error { return xerrors.Errorf("FullNode config not found and fallback to default disallowed while we transition to splitstore discard default. Use `lotus config default` to set this repo up with a default config. Be sure to set `EnableSplitstore` to `false` if you are running a full archive node") } diff --git a/node/config/load_test.go b/node/config/load_test.go index e17660c19f9..2edef259bc6 100644 --- a/node/config/load_test.go +++ b/node/config/load_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func fullNodeDefault() (interface{}, error) { return DefaultFullNode(), nil } @@ -138,3 +139,77 @@ func TestFailToFallbackToDefault(t *testing.T) { _, err = FromFile(nonExistantFileName, SetDefault(fullNodeDefault), SetCanFallbackOnDefault(NoDefaultForSplitstoreTransition)) assert.Error(t, err) } + +func TestPrintDeprecated(t *testing.T) { + type ChildCfg struct { + Field string `moved:"Bang"` + NewField string + } + type Old struct { + Thing1 int `moved:"New.Thing1"` + Thing2 int `moved:"New.Thing2"` + } + type New struct { + Thing1 int + Thing2 int + } + type ParentCfg struct { + Child ChildCfg + Old Old + New New + Foo int + Baz string `moved:"Child.NewField"` + Boom int `moved:"Foo"` + Bang string + } + + t.Run("warning output", func(t *testing.T) { + cfg := ` + Baz = "baz" + Foo = 100 + [Child] + Field = "bip" + NewField = "bop" + ` + + warningWriter := bytes.NewBuffer(nil) + + v, err := FromReader(bytes.NewReader([]byte(cfg)), &ParentCfg{Boom: 200, Bang: "300"}, SetWarningWriter(warningWriter)) + + require.NoError(t, err) + require.Equal(t, &ParentCfg{ + Child: ChildCfg{ + Field: "bip", + NewField: "bop", + }, + Baz: "baz", + Foo: 100, + Boom: 200, + Bang: "bip", + }, v) + require.Regexp(t, `\WChild\.Field\W.+use 'Bang' instead`, warningWriter.String()) + require.Regexp(t, `\WBaz\W.+use 'Child\.NewField' instead`, warningWriter.String()) + require.NotContains(t, warningWriter.String(), "don't use this at all") + require.NotContains(t, warningWriter.String(), "Boom") + }) + + defaultNew := New{Thing1: 42, Thing2: 800} + testCases := []struct { + name string + cfg string + expected New + }{ + {"simple", ``, defaultNew}, + {"set new", "[New]\nThing1 = 101\nThing2 = 102\n", New{Thing1: 101, Thing2: 102}}, + // should move old to new fields if new isn't set + {"set old", "[Old]\nThing1 = 101\nThing2 = 102\n", New{Thing1: 101, Thing2: 102}}, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + v, err := FromReader(bytes.NewReader([]byte(tc.cfg)), &ParentCfg{New: defaultNew}) + require.NoError(t, err) + require.Equal(t, tc.expected, v.(*ParentCfg).New) + }) + } +} diff --git a/node/config/types.go b/node/config/types.go index b86da656933..3872bffde5d 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -873,19 +873,48 @@ type FevmConfig struct { // Set to 0 to keep all mappings EthTxHashMappingLifetimeDays int - Events Events + Events DeprecatedEvents `toml:"Events,omitempty"` } -type Events struct { +type DeprecatedEvents struct { + // DisableRealTimeFilterAPI is DEPRECATED and will be removed in a future release. Use Events.DisableRealTimeFilterAPI instead. + DisableRealTimeFilterAPI bool `moved:"Events.DisableRealTimeFilterAPI" toml:"DisableRealTimeFilterAPI,omitempty"` + + // DisableHistoricFilterAPI is DEPRECATED and will be removed in a future release. Use Events.DisableHistoricFilterAPI instead. + DisableHistoricFilterAPI bool `moved:"Events.DisableHistoricFilterAPI" toml:"DisableHistoricFilterAPI,omitempty"` + + // FilterTTL is DEPRECATED and will be removed in a future release. Use Events.FilterTTL instead. + FilterTTL Duration `moved:"Events.FilterTTL" toml:"FilterTTL,omitzero"` + + // MaxFilters is DEPRECATED and will be removed in a future release. Use Events.MaxFilters instead. + MaxFilters int `moved:"Events.MaxFilters" toml:"MaxFilters,omitzero"` + + // MaxFilterResults is DEPRECATED and will be removed in a future release. Use Events.MaxFilterResults instead. + MaxFilterResults int `moved:"Events.MaxFilterResults" toml:"MaxFilterResults,omitzero"` + + // MaxFilterHeightRange is DEPRECATED and will be removed in a future release. Use Events.MaxFilterHeightRange instead. + MaxFilterHeightRange uint64 `moved:"Events.MaxFilterHeightRange" toml:"MaxFilterHeightRange,omitzero"` + + // DatabasePath is DEPRECATED and will be removed in a future release. Use Events.DatabasePath instead. + DatabasePath string `moved:"Events.DatabasePath" toml:"DatabasePath,omitempty"` +} + +type EventsConfig struct { // DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. - // The API is enabled when EnableEthRPC or Events.EnableActorEventsAPI is true, but can be disabled selectively with this flag. + // The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. DisableRealTimeFilterAPI bool // DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events // that occurred in the past. HistoricFilterAPI maintains a queryable index of events. - // The API is enabled when EnableEthRPC or Events.EnableActorEventsAPI is true, but can be disabled selectively with this flag. + // The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. DisableHistoricFilterAPI bool + // EnableActorEventsAPI enables the Actor events API that enables clients to consume events + // emitted by (smart contracts + built-in Actors). + // This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be + // disabled by setting their respective Disable* options. + EnableActorEventsAPI bool + // FilterTTL specifies the time to live for actor event filters. Filters that haven't been accessed longer than // this time become eligible for automatic deletion. FilterTTL Duration @@ -912,14 +941,6 @@ type Events struct { // Set upper bound on index size } -type EventsConfig struct { - // EnableActorEventsAPI enables the Actor events API that enables clients to consume events - // emitted by (smart contracts + built-in Actors). - // This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be - // disabled by setting their respective Disable* options in Fevm.Events. - EnableActorEventsAPI bool -} - type IndexConfig struct { // EXPERIMENTAL FEATURE. USE WITH CAUTION // EnableMsgIndex enables indexing of messages on chain. diff --git a/node/impl/storminer.go b/node/impl/storminer.go index 90248a355a4..bd482494017 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -1377,15 +1377,15 @@ func (sm *StorageMinerAPI) RuntimeSubsystems(context.Context) (res api.MinerSubs } func (sm *StorageMinerAPI) ActorWithdrawBalance(ctx context.Context, amount abi.TokenAmount) (cid.Cid, error) { - return sm.withdrawBalance(ctx, amount, true) + return WithdrawBalance(ctx, sm.Full, sm.Miner.Address(), amount, true) } func (sm *StorageMinerAPI) BeneficiaryWithdrawBalance(ctx context.Context, amount abi.TokenAmount) (cid.Cid, error) { - return sm.withdrawBalance(ctx, amount, false) + return WithdrawBalance(ctx, sm.Full, sm.Miner.Address(), amount, false) } -func (sm *StorageMinerAPI) withdrawBalance(ctx context.Context, amount abi.TokenAmount, fromOwner bool) (cid.Cid, error) { - available, err := sm.Full.StateMinerAvailableBalance(ctx, sm.Miner.Address(), types.EmptyTSK) +func WithdrawBalance(ctx context.Context, full api.FullNode, maddr address.Address, amount abi.TokenAmount, fromOwner bool) (cid.Cid, error) { + available, err := full.StateMinerAvailableBalance(ctx, maddr, types.EmptyTSK) if err != nil { return cid.Undef, xerrors.Errorf("Error getting miner balance: %w", err) } @@ -1405,7 +1405,7 @@ func (sm *StorageMinerAPI) withdrawBalance(ctx context.Context, amount abi.Token return cid.Undef, err } - mi, err := sm.Full.StateMinerInfo(ctx, sm.Miner.Address(), types.EmptyTSK) + mi, err := full.StateMinerInfo(ctx, maddr, types.EmptyTSK) if err != nil { return cid.Undef, xerrors.Errorf("Error getting miner's owner address: %w", err) } @@ -1417,8 +1417,8 @@ func (sm *StorageMinerAPI) withdrawBalance(ctx context.Context, amount abi.Token sender = mi.Beneficiary } - smsg, err := sm.Full.MpoolPushMessage(ctx, &types.Message{ - To: sm.Miner.Address(), + smsg, err := full.MpoolPushMessage(ctx, &types.Message{ + To: maddr, From: sender, Value: types.NewInt(0), Method: builtintypes.MethodsMiner.WithdrawBalance, diff --git a/node/modules/actorevent.go b/node/modules/actorevent.go index 135a34e5be7..d92da1940a9 100644 --- a/node/modules/actorevent.go +++ b/node/modules/actorevent.go @@ -32,17 +32,17 @@ type EventHelperAPI struct { var _ events.EventHelperAPI = &EventHelperAPI{} -func EthEventHandler(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *filter.EventFilterManager, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI) (*full.EthEventHandler, error) { +func EthEventHandler(cfg config.EventsConfig, enableEthRPC bool) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *filter.EventFilterManager, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI) (*full.EthEventHandler, error) { return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, fm *filter.EventFilterManager, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI) (*full.EthEventHandler, error) { ctx := helpers.LifecycleCtx(mctx, lc) ee := &full.EthEventHandler{ Chain: cs, - MaxFilterHeightRange: abi.ChainEpoch(cfg.Events.MaxFilterHeightRange), + MaxFilterHeightRange: abi.ChainEpoch(cfg.MaxFilterHeightRange), SubscribtionCtx: ctx, } - if !cfg.EnableEthRPC || cfg.Events.DisableRealTimeFilterAPI { + if !enableEthRPC || cfg.DisableRealTimeFilterAPI { // all event functionality is disabled // the historic filter API relies on the real time one return ee, nil @@ -53,21 +53,21 @@ func EthEventHandler(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.Locked StateAPI: stateapi, ChainAPI: chainapi, } - ee.FilterStore = filter.NewMemFilterStore(cfg.Events.MaxFilters) + ee.FilterStore = filter.NewMemFilterStore(cfg.MaxFilters) // Start garbage collection for filters lc.Append(fx.Hook{ OnStart: func(context.Context) error { - go ee.GC(ctx, time.Duration(cfg.Events.FilterTTL)) + go ee.GC(ctx, time.Duration(cfg.FilterTTL)) return nil }, }) ee.TipSetFilterManager = &filter.TipSetFilterManager{ - MaxFilterResults: cfg.Events.MaxFilterResults, + MaxFilterResults: cfg.MaxFilterResults, } ee.MemPoolFilterManager = &filter.MemPoolFilterManager{ - MaxFilterResults: cfg.Events.MaxFilterResults, + MaxFilterResults: cfg.MaxFilterResults, } ee.EventFilterManager = fm @@ -94,22 +94,22 @@ func EthEventHandler(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.Locked } } -func EventFilterManager(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, full.ChainAPI) (*filter.EventFilterManager, error) { +func EventFilterManager(cfg config.EventsConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, full.ChainAPI) (*filter.EventFilterManager, error) { return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, chainapi full.ChainAPI) (*filter.EventFilterManager, error) { ctx := helpers.LifecycleCtx(mctx, lc) // Enable indexing of actor events var eventIndex *filter.EventIndex - if !cfg.Events.DisableHistoricFilterAPI { + if !cfg.DisableHistoricFilterAPI { var dbPath string - if cfg.Events.DatabasePath == "" { + if cfg.DatabasePath == "" { sqlitePath, err := r.SqlitePath() if err != nil { return nil, err } dbPath = filepath.Join(sqlitePath, "events.db") } else { - dbPath = cfg.Events.DatabasePath + dbPath = cfg.DatabasePath } var err error @@ -144,7 +144,7 @@ func EventFilterManager(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.Loc return *actor.Address, true }, - MaxFilterResults: cfg.Events.MaxFilterResults, + MaxFilterResults: cfg.MaxFilterResults, } lc.Append(fx.Hook{ @@ -162,18 +162,22 @@ func EventFilterManager(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.Loc } } -func ActorEventHandler(enable bool, fevmCfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *filter.EventFilterManager, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI) (*full.ActorEventHandler, error) { +func ActorEventHandler(cfg config.EventsConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *filter.EventFilterManager, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI) (*full.ActorEventHandler, error) { return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, fm *filter.EventFilterManager, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI) (*full.ActorEventHandler, error) { - - if !enable || fevmCfg.Events.DisableRealTimeFilterAPI { - fm = nil + if !cfg.EnableActorEventsAPI || cfg.DisableRealTimeFilterAPI { + return full.NewActorEventHandler( + cs, + nil, // no EventFilterManager disables API calls + time.Duration(build.BlockDelaySecs)*time.Second, + abi.ChainEpoch(cfg.MaxFilterHeightRange), + ), nil } return full.NewActorEventHandler( cs, fm, time.Duration(build.BlockDelaySecs)*time.Second, - abi.ChainEpoch(fevmCfg.Events.MaxFilterHeightRange), + abi.ChainEpoch(cfg.MaxFilterHeightRange), ), nil } } diff --git a/scripts/generate-lotus-cli.py b/scripts/generate-lotus-cli.py index 305716b0905..c06e2f2a5f6 100644 --- a/scripts/generate-lotus-cli.py +++ b/scripts/generate-lotus-cli.py @@ -51,8 +51,12 @@ def get_cmd_recursively(cur_cmd): for e in [ "LOTUS_PATH", "LOTUS_MARKETS_PATH", "LOTUS_MINER_PATH", "LOTUS_STORAGE_PATH", "LOTUS_WORKER_PATH", "WORKER_PATH", "LOTUS_PANIC_REPORT_PATH", "WALLET_PATH" ]: os.environ.pop(e, None) + # Set env var telling the binaries that we're generating docs + os.putenv("LOTUS_DOCS_GENERATION", "1") + os.putenv("LOTUS_VERSION_IGNORE_COMMIT", "1") generate_lotus_cli('lotus') generate_lotus_cli('lotus-miner') generate_lotus_cli('lotus-worker') - generate_lotus_cli('curio') \ No newline at end of file + generate_lotus_cli('curio') + generate_lotus_cli('sptool') \ No newline at end of file diff --git a/storage/paths/interface.go b/storage/paths/interface.go index 4ff206c6d4d..27d6ee54160 100644 --- a/storage/paths/interface.go +++ b/storage/paths/interface.go @@ -35,7 +35,7 @@ type PartialFileHandler interface { //go:generate go run github.com/golang/mock/mockgen -destination=mocks/store.go -package=mocks . Store type Store interface { - AcquireSector(ctx context.Context, s storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (paths storiface.SectorPaths, stores storiface.SectorPaths, err error) + AcquireSector(ctx context.Context, s storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode, opts ...storiface.AcquireOption) (paths storiface.SectorPaths, stores storiface.SectorPaths, err error) Remove(ctx context.Context, s abi.SectorID, types storiface.SectorFileType, force bool, keepIn []storiface.ID) error // like remove, but doesn't remove the primary sector copy, nor the last diff --git a/storage/paths/local.go b/storage/paths/local.go index 7dd7c12562e..68999940f23 100644 --- a/storage/paths/local.go +++ b/storage/paths/local.go @@ -53,7 +53,15 @@ type path struct { reservations map[abi.SectorID]storiface.SectorFileType } -func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) { +// statExistingSectorForReservation is optional parameter for stat method +// which will make it take into account existing sectors when calculating +// available space for new reservations +type statExistingSectorForReservation struct { + id abi.SectorID + ft storiface.SectorFileType +} + +func (p *path) stat(ls LocalStorage, newReserve ...statExistingSectorForReservation) (fsutil.FsStat, error) { start := time.Now() stat, err := ls.Stat(p.local) @@ -63,34 +71,49 @@ func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) { stat.Reserved = p.reserved - for id, ft := range p.reservations { - for _, fileType := range storiface.PathTypes { - if fileType&ft == 0 { - continue + accountExistingFiles := func(id abi.SectorID, fileType storiface.SectorFileType) error { + sp := p.sectorPath(id, fileType) + + used, err := ls.DiskUsage(sp) + if err == os.ErrNotExist { + p, ferr := tempFetchDest(sp, false) + if ferr != nil { + return ferr } - sp := p.sectorPath(id, fileType) + used, err = ls.DiskUsage(p) + } + if err != nil { + // we don't care about 'not exist' errors, as storage can be + // reserved before any files are written, so this error is not + // unexpected + if !os.IsNotExist(err) { + log.Warnf("getting disk usage of '%s': %+v", p.sectorPath(id, fileType), err) + } + return nil + } - used, err := ls.DiskUsage(sp) - if err == os.ErrNotExist { - p, ferr := tempFetchDest(sp, false) - if ferr != nil { - return fsutil.FsStat{}, ferr - } + stat.Reserved -= used + return nil + } - used, err = ls.DiskUsage(p) + for id, ft := range p.reservations { + for _, fileType := range ft.AllSet() { + if err := accountExistingFiles(id, fileType); err != nil { + return fsutil.FsStat{}, err } - if err != nil { - // we don't care about 'not exist' errors, as storage can be - // reserved before any files are written, so this error is not - // unexpected - if !os.IsNotExist(err) { - log.Warnf("getting disk usage of '%s': %+v", p.sectorPath(id, fileType), err) - } + } + } + for _, reservation := range newReserve { + for _, fileType := range reservation.ft.AllSet() { + if p.reservations[reservation.id]&fileType != 0 { + // already accounted for continue } - stat.Reserved -= used + if err := accountExistingFiles(reservation.id, fileType); err != nil { + return fsutil.FsStat{}, err + } } } @@ -414,11 +437,7 @@ func (st *Local) Reserve(ctx context.Context, sid storiface.SectorRef, ft storif deferredDone() }() - for _, fileType := range storiface.PathTypes { - if fileType&ft == 0 { - continue - } - + for _, fileType := range ft.AllSet() { id := storiface.ID(storiface.PathByType(storageIDs, fileType)) p, ok := st.paths[id] @@ -426,7 +445,7 @@ func (st *Local) Reserve(ctx context.Context, sid storiface.SectorRef, ft storif return nil, errPathNotFound } - stat, err := p.stat(st.localStorage) + stat, err := p.stat(st.localStorage, statExistingSectorForReservation{sid.ID, fileType}) if err != nil { return nil, xerrors.Errorf("getting local storage stat: %w", err) } @@ -460,7 +479,7 @@ func (st *Local) Reserve(ctx context.Context, sid storiface.SectorRef, ft storif return done, nil } -func (st *Local) AcquireSector(ctx context.Context, sid storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { +func (st *Local) AcquireSector(ctx context.Context, sid storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode, opts ...storiface.AcquireOption) (storiface.SectorPaths, storiface.SectorPaths, error) { if existing|allocate != existing^allocate { return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("can't both find and allocate a sector") } @@ -476,6 +495,22 @@ func (st *Local) AcquireSector(ctx context.Context, sid storiface.SectorRef, exi var out storiface.SectorPaths var storageIDs storiface.SectorPaths + allocPathOk := func(canSeal, canStore bool, allowTypes, denyTypes []string, fileType storiface.SectorFileType) bool { + if (pathType == storiface.PathSealing) && !canSeal { + return false + } + + if (pathType == storiface.PathStorage) && !canStore { + return false + } + + if !fileType.Allowed(allowTypes, denyTypes) { + return false + } + + return true + } + // First find existing files for _, fileType := range storiface.PathTypes { // also try to find existing sectors if we're allocating @@ -501,6 +536,10 @@ func (st *Local) AcquireSector(ctx context.Context, sid storiface.SectorRef, exi continue } + if allocate.Has(fileType) && !allocPathOk(info.CanSeal, info.CanStore, info.AllowTypes, info.DenyTypes, fileType) { + continue // allocate request for a path of different type + } + spath := p.sectorPath(sid.ID, fileType) storiface.SetPathByType(&out, fileType, spath) storiface.SetPathByType(&storageIDs, fileType, string(info.ID)) @@ -535,15 +574,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid storiface.SectorRef, exi continue } - if (pathType == storiface.PathSealing) && !si.CanSeal { - continue - } - - if (pathType == storiface.PathStorage) && !si.CanStore { - continue - } - - if !fileType.Allowed(si.AllowTypes, si.DenyTypes) { + if !allocPathOk(si.CanSeal, si.CanStore, si.AllowTypes, si.DenyTypes, fileType) { continue } diff --git a/storage/paths/mocks/store.go b/storage/paths/mocks/store.go index 244b4fc910f..d7fa226e645 100644 --- a/storage/paths/mocks/store.go +++ b/storage/paths/mocks/store.go @@ -41,9 +41,13 @@ func (m *MockStore) EXPECT() *MockStoreMockRecorder { } // AcquireSector mocks base method. -func (m *MockStore) AcquireSector(arg0 context.Context, arg1 storiface.SectorRef, arg2, arg3 storiface.SectorFileType, arg4 storiface.PathType, arg5 storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { +func (m *MockStore) AcquireSector(arg0 context.Context, arg1 storiface.SectorRef, arg2, arg3 storiface.SectorFileType, arg4 storiface.PathType, arg5 storiface.AcquireMode, arg6 ...storiface.AcquireOption) (storiface.SectorPaths, storiface.SectorPaths, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AcquireSector", arg0, arg1, arg2, arg3, arg4, arg5) + varargs := []interface{}{arg0, arg1, arg2, arg3, arg4, arg5} + for _, a := range arg6 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "AcquireSector", varargs...) ret0, _ := ret[0].(storiface.SectorPaths) ret1, _ := ret[1].(storiface.SectorPaths) ret2, _ := ret[2].(error) @@ -51,9 +55,10 @@ func (m *MockStore) AcquireSector(arg0 context.Context, arg1 storiface.SectorRef } // AcquireSector indicates an expected call of AcquireSector. -func (mr *MockStoreMockRecorder) AcquireSector(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { +func (mr *MockStoreMockRecorder) AcquireSector(arg0, arg1, arg2, arg3, arg4, arg5 interface{}, arg6 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcquireSector", reflect.TypeOf((*MockStore)(nil).AcquireSector), arg0, arg1, arg2, arg3, arg4, arg5) + varargs := append([]interface{}{arg0, arg1, arg2, arg3, arg4, arg5}, arg6...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcquireSector", reflect.TypeOf((*MockStore)(nil).AcquireSector), varargs...) } // FsStat mocks base method. diff --git a/storage/paths/remote.go b/storage/paths/remote.go index 9ff719954bb..8532357b460 100644 --- a/storage/paths/remote.go +++ b/storage/paths/remote.go @@ -93,11 +93,28 @@ func NewRemote(local Store, index SectorIndex, auth http.Header, fetchLimit int, } } -func (r *Remote) AcquireSector(ctx context.Context, s storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { +func (r *Remote) AcquireSector(ctx context.Context, s storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, pathType storiface.PathType, op storiface.AcquireMode, opts ...storiface.AcquireOption) (storiface.SectorPaths, storiface.SectorPaths, error) { if existing|allocate != existing^allocate { return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("can't both find and allocate a sector") } + settings := storiface.AcquireSettings{ + // Into will tell us which paths things should be fetched into or allocated in. + Into: nil, + } + for _, o := range opts { + o(&settings) + } + + if settings.Into != nil { + if !allocate.IsNone() { + return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("cannot specify Into with allocate") + } + if !settings.Into.HasAllSet(existing) { + return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.New("Into has to have all existing paths") + } + } + // First make sure that no other goroutines are trying to fetch this sector; // wait if there are any. for { @@ -134,47 +151,47 @@ func (r *Remote) AcquireSector(ctx context.Context, s storiface.SectorRef, exist } var toFetch storiface.SectorFileType - for _, fileType := range storiface.PathTypes { - if fileType&existing == 0 { - continue - } - + for _, fileType := range existing.AllSet() { if storiface.PathByType(paths, fileType) == "" { toFetch |= fileType } } // get a list of paths to fetch data into. Note: file type filters will apply inside this call. - fetchPaths, ids, err := r.local.AcquireSector(ctx, s, storiface.FTNone, toFetch, pathType, op) - if err != nil { - return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("allocate local sector for fetching: %w", err) - } + var fetchPaths, fetchIDs storiface.SectorPaths - overheadTable := storiface.FSOverheadSeal - if pathType == storiface.PathStorage { - overheadTable = storiface.FsOverheadFinalized - } - - // If any path types weren't found in local storage, try fetching them + if settings.Into == nil { + // fetching without existing reservation, so allocate paths and create a reservation + fetchPaths, fetchIDs, err = r.local.AcquireSector(ctx, s, storiface.FTNone, toFetch, pathType, op) + if err != nil { + return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("allocate local sector for fetching: %w", err) + } - // First reserve storage - releaseStorage, err := r.local.Reserve(ctx, s, toFetch, ids, overheadTable) - if err != nil { - return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("reserving storage space: %w", err) - } - defer releaseStorage() + log.Debugw("Fetching sector data without existing reservation", "sector", s, "toFetch", toFetch, "fetchPaths", fetchPaths, "fetchIDs", fetchIDs) - for _, fileType := range storiface.PathTypes { - if fileType&existing == 0 { - continue + overheadTable := storiface.FSOverheadSeal + if pathType == storiface.PathStorage { + overheadTable = storiface.FsOverheadFinalized } - if storiface.PathByType(paths, fileType) != "" { - continue + // If any path types weren't found in local storage, try fetching them + + // First reserve storage + releaseStorage, err := r.local.Reserve(ctx, s, toFetch, fetchIDs, overheadTable) + if err != nil { + return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("reserving storage space: %w", err) } + defer releaseStorage() + } else { + fetchPaths = settings.Into.Paths + fetchIDs = settings.Into.IDs + + log.Debugw("Fetching sector data with existing reservation", "sector", s, "toFetch", toFetch, "fetchPaths", fetchPaths, "fetchIDs", fetchIDs) + } + for _, fileType := range toFetch.AllSet() { dest := storiface.PathByType(fetchPaths, fileType) - storageID := storiface.PathByType(ids, fileType) + storageID := storiface.PathByType(fetchIDs, fileType) url, err := r.acquireFromRemote(ctx, s.ID, fileType, dest) if err != nil { diff --git a/storage/sealer/storiface/filetype.go b/storage/sealer/storiface/filetype.go index 109e494a85a..422f87cf30d 100644 --- a/storage/sealer/storiface/filetype.go +++ b/storage/sealer/storiface/filetype.go @@ -214,6 +214,10 @@ func (t SectorFileType) All() [FileTypes]bool { return out } +func (t SectorFileType) IsNone() bool { + return t == 0 +} + type SectorPaths struct { ID abi.SectorID @@ -225,6 +229,28 @@ type SectorPaths struct { Piece string } +func (sp SectorPaths) HasAllSet(ft SectorFileType) bool { + for _, fileType := range ft.AllSet() { + if PathByType(sp, fileType) == "" { + return false + } + } + + return true +} + +func (sp SectorPaths) Subset(filter SectorFileType) SectorPaths { + var out SectorPaths + + for _, fileType := range filter.AllSet() { + SetPathByType(&out, fileType, PathByType(sp, fileType)) + } + + out.ID = sp.ID + + return out +} + func ParseSectorID(baseName string) (abi.SectorID, error) { var n abi.SectorNumber var mid abi.ActorID @@ -282,3 +308,12 @@ func SetPathByType(sps *SectorPaths, fileType SectorFileType, p string) { sps.Piece = p } } + +type PathsWithIDs struct { + Paths SectorPaths + IDs SectorPaths +} + +func (p PathsWithIDs) HasAllSet(ft SectorFileType) bool { + return p.Paths.HasAllSet(ft) && p.IDs.HasAllSet(ft) +} diff --git a/storage/sealer/storiface/paths.go b/storage/sealer/storiface/paths.go index 2cb4f34d30b..0f0eaeadffd 100644 --- a/storage/sealer/storiface/paths.go +++ b/storage/sealer/storiface/paths.go @@ -25,3 +25,15 @@ type SectorLock struct { type SectorLocks struct { Locks []SectorLock } + +type AcquireSettings struct { + Into *PathsWithIDs +} + +type AcquireOption func(*AcquireSettings) + +func AcquireInto(pathIDs PathsWithIDs) AcquireOption { + return func(settings *AcquireSettings) { + settings.Into = &pathIDs + } +}