From dcc903c65d921911a297998b04c7fc9d77f5f78c Mon Sep 17 00:00:00 2001 From: Aarsh Shah Date: Thu, 31 Oct 2024 13:58:19 +0400 Subject: [PATCH] feat: a new ChainIndexer to index tipsets, messages and events (#12421) * chain index complete for msgs and txns * dont need observer changes for now * changes * fix tests * fix tests * use th right context * index empty tipsets correctly * implement automated backfilling * add event indexing and remove all old indices * fix test * revert deployment test changes * revert test changes and better error handling for eth tx index lookups * fix sql statments naming convention * address review for Index GC * more changes as per review * changes as per review * fix config * mark events as reverted during reconciliation * better reconciliation; pens down and code complete; also reconcile events * fix tests * improve config and docs * improve docs and error handling * improve read logic * improve docs * better logging and handle ennable event storage * improve logs and index init proc * better logging * fix bugs based on calibnet testing * create sqliite Indices * gc should be based on epochs * fix event query * foreign keys should be enabled on the DB * reverted tipsets should be removed as part of GC * release read lock * make it easy to backfill an empty index using reconciliation * better docs for reconciliation * fix conflicts with master * Apply suggestions from code review Co-authored-by: Rod Vagg * fix go mod * fix formatting * revert config changes * address changes in observer * remove top level chainindex package * changes as per review * changes as per review * changes as per review * handle index with reverted tipsets during reconciliation * changes as per review * fix type of max reconcile epoch * changes to reconciliation as per review * log ipld error * better logging of progress * disable chain indexer hydrate from snapshot based on config * always populate index * make config easy to reason about * fix config * fix messaging * revert config changes * Apply suggestions from code review Co-authored-by: Rod Vagg * changes as per review * make error messages homogenous * fix indentation * changes as per review * feat: recompute tipset to generate missing events if event indexing is enabled (#12463) * auto repair events * make jen * fix leaky abstraction * better docs for gc retention epoch * imrpove DB handling (#12485) * fix conflict * fix lite node config for indexer * exclude reverted events from eth get logs if client queries by epoch * Simply addressing for event lookups in the index. simply addressing for event lookups * Apply suggestions from code review Co-authored-by: Rod Vagg * Apply suggestions from code review Co-authored-by: Rod Vagg * fix tests * Apply suggestions from code review Co-authored-by: Rod Vagg * feat: migration("re-indexing"), backfilling and diasgnostics tooling for the `ChainIndexer` (#12450) * fix conflicts with chain indexer * feat: chain indexer todos [skip changelog] (#12462) * feat: finish todos of validation api * feat: add indexed data verification with chain store * feat: address comments and finish TODO * fix: build issue * address comments * fix: ci issue * Apply suggestions from code review Co-authored-by: Rod Vagg * changes to Index Validation API based on Rodds first review * build chain indexer API * improve error handling * feat: lotus-shed tooling for chain indexer (#12474) * feat: add lotus-shed command for backfilling chain indexer * feat: add lotus-shed command for inspecting the chain indexer * feat: use single lotus-shed command to inspect and backfill * fix: remove the unused queries * small changes * add change log * backfilling improvements and fixes * finish chain index validation and backfill tooling * user documentation for the * validate from epoch * Apply suggestions from code review Suggestions from Steve's read of the user doc. Co-authored-by: Steve Loeppky * changes to user doc as per review * Apply suggestions from code review Co-authored-by: Steve Loeppky * changes to user doc as per review * Apply suggestions from code review Co-authored-by: Steve Loeppky * changes as per review * feat: add event entries count in validation API (#12506) * feat: add event entry count in validation API * address comments * use sqllite defaults (#12504) * Apply suggestions from code review Co-authored-by: Steve Loeppky * write chain index to a different dir * Apply suggestions from code review Co-authored-by: Steve Loeppky * fix conflicts * UX improvements to backfilling * feat: tests for the chain indexer (#12521) * ddl tests * tests for the chain indexer * finish unit tests for chain indexer * fix formatting * cleanup reverted tipsets to avoid db bloat * fix logging * test for filter by address * test gc cascade delete * fix db locked error during backfilling * fix var name * increase db locked timeout * fix db locked issue * reduce db lock timeout * no lock in gc * reconcile does not need lock * improved error handling * Update chain-indexing-overview-for-rpc-providers.md Doc updates based on @jennijuju feedack. * Update chain-indexing-overview-for-rpc-providers.MD Fixes after reviewing https://github.com/filecoin-project/lotus/pull/12450/commits/33c1ca1831ca65695b0f268b319f0c5539d14109 * better metrics for backfilling * Update chain/index/chain-indexing-overview-for-rpc-providers.MD Co-authored-by: Rod Vagg * Update chain/index/chain-indexing-overview-for-rpc-providers.MD Co-authored-by: Rod Vagg * Update chain/index/chain-indexing-overview-for-rpc-providers.MD Co-authored-by: Rod Vagg * Update chain/index/chain-indexing-overview-for-rpc-providers.MD Co-authored-by: Rod Vagg * Update chain/index/chain-indexing-overview-for-rpc-providers.MD Co-authored-by: Rod Vagg * Update chain/index/chain-indexing-overview-for-rpc-providers.MD Co-authored-by: Rod Vagg * Update chain/index/chain-indexing-overview-for-rpc-providers.MD Co-authored-by: Rod Vagg * tests for changes to event addressing * Apply suggestions from code review Co-authored-by: Rod Vagg * changes as per review -> round 1 * Apply suggestions from code review Co-authored-by: Rod Vagg * Apply suggestions from code review Co-authored-by: Rod Vagg * log tipset key cid * Apply suggestions from code review Co-authored-by: Rod Vagg * Apply suggestions from code review Co-authored-by: Rod Vagg * fix docs * Apply suggestions from code review Co-authored-by: Rod Vagg * fix tests * fix tests * make jen * fix conflicts --------- Co-authored-by: Aryan Tikarya Co-authored-by: Rod Vagg Co-authored-by: Steve Loeppky * fix lint * Apply suggestions from code review Co-authored-by: Rod Vagg * Apply suggestions from code review Co-authored-by: Rod Vagg * remove reverted flag from RPC * Apply suggestions from code review Co-authored-by: Rod Vagg * fix testing of events and dummy chain store * remove lotus shed commands for old Indices * change type of event counts to uint64 * only recompute events if theyre not found * short-circuit empty events path for older tipsets * chain indexer must be enabled if ETH RPC is enabled * change name of message_id column to id in tipset_message table * only expose SetRecomputeTipSetStateFunc * dont block on head indexing for reading messages * document why we're only checking for missing events for a single tipset * document when we query for reverted events * simplify event collection * Apply suggestions from code review Co-authored-by: Rod Vagg * fix test * change event_id to id in the event table * change head indexed timeout * remove deprecated config options * fail ETH RPC calls if ChainIndexer is disabled * fix docs * remove the tipset key cid func from lotus shed * address review comments * Apply suggestions from code review Co-authored-by: Rod Vagg * chore(events): remove unnecessary DisableRealTimeFilterAPI (#12610) * feat(cli): add --quiet to chainindex validate-backfill + cleanups (#12611) * fix tests * Apply suggestions from code review Co-authored-by: Rod Vagg * error type for disabled chainindexer * fix(chainindex): recompute tipset when we find no receipts * fix(chainindexer): backfilling should halt when chain state data is missing and not backfill parents (#12619) * fix backfilling UX * Update chain/index/api.go Co-authored-by: Rod Vagg * address review --------- Co-authored-by: Rod Vagg * reduce log noise * make jen * make jen * docs: finishing chain-indexer-overview-for-operators.md (#12600) * Followup to PR #12450 for doc updates This is being used to resolve the unresolved items in https://github.com/filecoin-project/lotus/pull/12450 since that PR is unwieldly at this point. * Incorporated some items and added TODOs based on unresolved items from https://github.com/filecoin-project/lotus/pull/12450 * Incorporating more feedback * Pointing to issue to learn about benefits * Formatting fixes * Apply most of the suggestions from @rvagg code review Co-authored-by: Rod Vagg * Incorporating feedback from https://github.com/filecoin-project/lotus/pull/12600#discussion_r1802519453 * Addressing https://github.com/filecoin-project/lotus/pull/12600#discussion_r1802540042 and more * Moved chain-indexer docs to documentation Renamed Added ToC We can move to lotus-docs later * Update documentation/en/chain-indexer-overview-for-operators.md Co-authored-by: Rod Vagg * Update documentation/en/chain-indexer-overview-for-operators.md Co-authored-by: Rod Vagg * Added upgrade path when importing chain state from a snapshot. * Typo fixes * Update documentation/en/chain-indexer-overview-for-operators.md Co-authored-by: Rod Vagg * chore(doc): "regular checks" section for chainindexer docs (#12612) * Apply suggestions from @rvagg code review Co-authored-by: Rod Vagg * Incorporating @aarshkshah1992 feedback * Update documentation/en/chain-indexer-overview-for-operators.md Co-authored-by: Rod Vagg --------- Co-authored-by: Rod Vagg Co-authored-by: Aarsh Shah * remove go mod replace * remove unnecessary changes from CHANGELOG * fix test * compare events AMT root (#12632) * fix(chainindex): retry transaction if database connection is lost (#12657) * retry database lost connection * log context cancellation * address review * fix gateway itest: no chainindexer for lite nodes * fix changelog --------- Co-authored-by: Rod Vagg Co-authored-by: Aryan Tikarya Co-authored-by: Steve Loeppky --- CHANGELOG.md | 1 + api/api_full.go | 31 + api/mocks/mock_full.go | 15 + api/proxy_gen.go | 13 + build/openrpc/full.json | 572 +++++---- build/openrpc/gateway.json | 208 ++-- build/openrpc/miner.json | 176 +-- build/openrpc/worker.json | 74 +- .../eth_transaction_hash_lookup.go | 150 --- chain/events/filter/event.go | 62 +- chain/events/filter/event_test.go | 7 +- chain/events/filter/index.go | 672 ---------- chain/events/filter/index_migrations.go | 260 ---- chain/events/filter/index_test.go | 1046 ---------------- chain/events/observer.go | 55 +- chain/gen/gen.go | 3 +- chain/index/api.go | 444 +++++++ chain/index/api_test.go | 522 ++++++++ chain/index/ddls.go | 111 ++ chain/index/ddls_test.go | 866 +++++++++++++ chain/index/events.go | 602 +++++++++ chain/index/events_test.go | 441 +++++++ chain/index/gc.go | 96 ++ chain/index/gc_test.go | 123 ++ chain/index/helpers.go | 155 +++ chain/index/indexer.go | 419 +++++++ chain/index/indexer_test.go | 56 + chain/index/interface.go | 72 +- chain/index/msgindex.go | 499 -------- chain/index/msgindex_test.go | 307 ----- chain/index/pub_sub.go | 59 + chain/index/read.go | 133 ++ chain/index/read_test.go | 292 +++++ chain/index/reconcile.go | 276 +++++ chain/stmgr/forks_test.go | 13 +- chain/stmgr/searchwait.go | 5 +- chain/stmgr/stmgr.go | 11 +- chain/store/store_test.go | 3 +- chain/types/index.go | 23 + cmd/lotus-bench/import.go | 4 +- cmd/lotus-shed/balances.go | 6 +- cmd/lotus-shed/chain_index.go | 213 ++++ cmd/lotus-shed/gas-estimation.go | 7 +- cmd/lotus-shed/indexes.go | 1081 ----------------- cmd/lotus-shed/invariants.go | 3 +- cmd/lotus-shed/main.go | 2 +- cmd/lotus-shed/migrations.go | 4 +- cmd/lotus-shed/state-stats.go | 3 +- cmd/lotus-sim/simulation/node.go | 7 +- cmd/lotus-sim/simulation/simulation.go | 4 +- cmd/lotus/daemon.go | 31 +- conformance/driver.go | 3 +- documentation/en/api-v1-unstable-methods.md | 60 + .../chain-indexer-overview-for-operators.md | 418 +++++++ documentation/en/default-lotus-config.toml | 94 +- itests/eth_config_test.go | 2 +- itests/eth_filter_test.go | 50 + itests/eth_transactions_test.go | 6 - itests/kit/node_opts.go | 3 + itests/msgindex_test.go | 124 -- lib/sqlite/sqlite.go | 27 +- lib/sqlite/sqlite_test.go | 13 +- node/builder.go | 4 +- node/builder_chain.go | 32 +- node/config/def.go | 25 +- node/config/doc_gen.go | 116 +- node/config/types.go | 105 +- node/impl/full.go | 1 + node/impl/full/actor_events.go | 3 +- node/impl/full/actor_events_test.go | 21 +- node/impl/full/chain_index.go | 46 + node/impl/full/eth.go | 200 ++- node/impl/full/eth_events.go | 11 +- node/impl/full/txhashmanager.go | 201 --- node/modules/actorevent.go | 42 +- node/modules/chain.go | 3 +- node/modules/chainindex.go | 134 ++ node/modules/ethmodule.go | 78 +- node/modules/msgindex.go | 37 - node/modules/stmgr.go | 4 +- node/repo/fsrepo.go | 20 +- node/repo/interface.go | 4 +- node/repo/memrepo.go | 8 +- 83 files changed, 6724 insertions(+), 5409 deletions(-) delete mode 100644 chain/ethhashlookup/eth_transaction_hash_lookup.go delete mode 100644 chain/events/filter/index.go delete mode 100644 chain/events/filter/index_migrations.go delete mode 100644 chain/events/filter/index_test.go create mode 100644 chain/index/api.go create mode 100644 chain/index/api_test.go create mode 100644 chain/index/ddls.go create mode 100644 chain/index/ddls_test.go create mode 100644 chain/index/events.go create mode 100644 chain/index/events_test.go create mode 100644 chain/index/gc.go create mode 100644 chain/index/gc_test.go create mode 100644 chain/index/helpers.go create mode 100644 chain/index/indexer.go create mode 100644 chain/index/indexer_test.go delete mode 100644 chain/index/msgindex.go delete mode 100644 chain/index/msgindex_test.go create mode 100644 chain/index/pub_sub.go create mode 100644 chain/index/read.go create mode 100644 chain/index/read_test.go create mode 100644 chain/index/reconcile.go create mode 100644 chain/types/index.go create mode 100644 cmd/lotus-shed/chain_index.go delete mode 100644 cmd/lotus-shed/indexes.go create mode 100644 documentation/en/chain-indexer-overview-for-operators.md delete mode 100644 itests/msgindex_test.go create mode 100644 node/impl/full/chain_index.go delete mode 100644 node/impl/full/txhashmanager.go create mode 100644 node/modules/chainindex.go delete mode 100644 node/modules/msgindex.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 350048fcb8d..009c2197243 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ # UNRELEASED ## New features +- New ChainIndexer subsystem to index Filecoin chain state such as tipsets, messages, events and ETH transactions for accurate and faster RPC responses. The `ChainIndexer` replaces the existing `MsgIndex`, `EthTxHashLookup` and `EventIndex` implementations in Lotus, which [suffer from a multitude of known problems](https://github.com/filecoin-project/lotus/issues/12293). If you are an RPC provider or a node operator who uses or exposes Ethereum and/or events APIs, please refer to the [ChainIndexer documentation for operators](./documentation/en/chain-indexer-overview-for-operators.md) for information on how to enable, configure and use the new Indexer. While there is no automated data migration and one can upgrade and downgrade without backups, there are manual steps that need to be taken to backfill data when upgrading to this Lotus version, or downgrading to the previous version without ChainIndexer. Please be aware that that this feature removes some options in the Lotus configuration file, if these have been set, Lotus will report an error when starting. See the documentation for more information. - Return a consistent error when encountering null rounds in ETH RPC method calls. ([filecoin-project/lotus#12655](https://github.com/filecoin-project/lotus/pull/12655)) - Reduce size of embedded genesis CAR files by removing WASM actor blocks and compressing with zstd. This reduces the `lotus` binary size by approximately 10 MiB. ([filecoin-project/lotus#12439](https://github.com/filecoin-project/lotus/pull/12439)) - Add ChainSafe operated Calibration archival node to the bootstrap list ([filecoin-project/lotus#12517](https://github.com/filecoin-project/lotus/pull/12517)) diff --git a/api/api_full.go b/api/api_full.go index d48200f4143..857bd98024e 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -63,6 +63,37 @@ type FullNode interface { Common Net + // MethodGroup: ChainIndexer + // The ChainIndexer method group contains methods for interacting with the chain indexer. + + // ChainValidateIndex validates the integrity of and optionally backfills + // the chain index at a specific epoch. + // + // It can be used to: + // + // 1. Validate the chain index at a specific epoch: + // - Ensures consistency between indexed data and actual chain state + // - Reports any errors found during validation (i.e. the indexed data does not match the actual chain state, missing data, etc.) + // + // 2. Optionally backfill missing data: + // - Backfills data if the index is missing information for the specified epoch + // - Backfilling only occurs when the `backfill` parameter is set to `true` + // + // 3. Detect "holes" in the index: + // - If `backfill` is `false` and the index lacks data for the specified epoch, the API returns an error indicating missing data + // + // Parameters: + // - epoch: The specific chain epoch for which to validate/backfill the index. + // - backfill: A boolean flag indicating whether to attempt backfilling of missing data if the index does not have data for the + // specified epoch. + // + // Returns: + // - *types.IndexValidation: A pointer to an IndexValidation struct containing the results of the validation/backfill. + // - error: An error object if the validation/backfill fails. The error message will contain details about the index + // corruption if the call fails because of an incosistency between indexed data and the actual chain state. + // Note: The API returns an error if the index does not have data for the specified epoch and backfill is set to false. + ChainValidateIndex(ctx context.Context, epoch abi.ChainEpoch, backfill bool) (*types.IndexValidation, error) //perm:write + // MethodGroup: Chain // The Chain method group contains methods for interacting with the // blockchain, but that do not require any form of state computation. diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go index dea365d334d..9e7f4403446 100644 --- a/api/mocks/mock_full.go +++ b/api/mocks/mock_full.go @@ -511,6 +511,21 @@ func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1) } +// ChainValidateIndex mocks base method. +func (m *MockFullNode) ChainValidateIndex(arg0 context.Context, arg1 abi.ChainEpoch, arg2 bool) (*types.IndexValidation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainValidateIndex", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.IndexValidation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainValidateIndex indicates an expected call of ChainValidateIndex. +func (mr *MockFullNodeMockRecorder) ChainValidateIndex(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainValidateIndex", reflect.TypeOf((*MockFullNode)(nil).ChainValidateIndex), arg0, arg1, arg2) +} + // Closing mocks base method. func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) { m.ctrl.T.Helper() diff --git a/api/proxy_gen.go b/api/proxy_gen.go index f8c186cd6b9..f8aa37f87ce 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -170,6 +170,8 @@ type FullNodeMethods struct { ChainTipSetWeight func(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) `perm:"read"` + ChainValidateIndex func(p0 context.Context, p1 abi.ChainEpoch, p2 bool) (*types.IndexValidation, error) `perm:"write"` + CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"` EthAccounts func(p0 context.Context) ([]ethtypes.EthAddress, error) `perm:"read"` @@ -1655,6 +1657,17 @@ func (s *FullNodeStub) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) return *new(types.BigInt), ErrNotSupported } +func (s *FullNodeStruct) ChainValidateIndex(p0 context.Context, p1 abi.ChainEpoch, p2 bool) (*types.IndexValidation, error) { + if s.Internal.ChainValidateIndex == nil { + return nil, ErrNotSupported + } + return s.Internal.ChainValidateIndex(p0, p1, p2) +} + +func (s *FullNodeStub) ChainValidateIndex(p0 context.Context, p1 abi.ChainEpoch, p2 bool) (*types.IndexValidation, error) { + return nil, ErrNotSupported +} + func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error { if s.Internal.CreateBackup == nil { return ErrNotSupported diff --git a/build/openrpc/full.json b/build/openrpc/full.json index 2656d2dc955..7dc3a43e8d9 100644 --- a/build/openrpc/full.json +++ b/build/openrpc/full.json @@ -37,7 +37,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1350" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1352" } }, { @@ -60,7 +60,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1361" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1363" } }, { @@ -103,7 +103,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1372" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1374" } }, { @@ -214,7 +214,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1394" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1396" } }, { @@ -454,7 +454,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1405" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1407" } }, { @@ -685,7 +685,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1416" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1418" } }, { @@ -784,7 +784,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1427" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1429" } }, { @@ -816,7 +816,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1438" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1440" } }, { @@ -922,7 +922,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1449" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1451" } }, { @@ -1019,7 +1019,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1460" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1462" } }, { @@ -1078,7 +1078,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1471" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1473" } }, { @@ -1171,7 +1171,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1482" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1484" } }, { @@ -1255,7 +1255,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1493" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1495" } }, { @@ -1355,7 +1355,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1504" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1506" } }, { @@ -1411,7 +1411,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1515" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1517" } }, { @@ -1484,7 +1484,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1526" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1528" } }, { @@ -1557,7 +1557,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1537" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1539" } }, { @@ -1604,7 +1604,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1548" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1550" } }, { @@ -1636,7 +1636,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1559" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1561" } }, { @@ -1691,7 +1691,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1570" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1572" } }, { @@ -1743,7 +1743,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1592" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1594" } }, { @@ -1780,7 +1780,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1603" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1605" } }, { @@ -1827,7 +1827,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1614" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1616" } }, { @@ -1874,7 +1874,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1625" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1627" } }, { @@ -1954,7 +1954,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1636" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1638" } }, { @@ -2006,7 +2006,111 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1647" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1649" + } + }, + { + "name": "Filecoin.ChainValidateIndex", + "description": "```go\nfunc (s *FullNodeStruct) ChainValidateIndex(p0 context.Context, p1 abi.ChainEpoch, p2 bool) (*types.IndexValidation, error) {\n\tif s.Internal.ChainValidateIndex == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn s.Internal.ChainValidateIndex(p0, p1, p2)\n}\n```", + "summary": "ChainValidateIndex validates the integrity of and optionally backfills\nthe chain index at a specific epoch.\n\nIt can be used to:\n\n1. Validate the chain index at a specific epoch:\n - Ensures consistency between indexed data and actual chain state\n - Reports any errors found during validation (i.e. the indexed data does not match the actual chain state, missing data, etc.)\n\n2. Optionally backfill missing data:\n - Backfills data if the index is missing information for the specified epoch\n - Backfilling only occurs when the `backfill` parameter is set to `true`\n\n3. Detect \"holes\" in the index:\n - If `backfill` is `false` and the index lacks data for the specified epoch, the API returns an error indicating missing data\n\nParameters:\n - epoch: The specific chain epoch for which to validate/backfill the index.\n - backfill: A boolean flag indicating whether to attempt backfilling of missing data if the index does not have data for the\n specified epoch.\n\nReturns:\n - *types.IndexValidation: A pointer to an IndexValidation struct containing the results of the validation/backfill.\n - error: An error object if the validation/backfill fails. The error message will contain details about the index\n corruption if the call fails because of an incosistency between indexed data and the actual chain state.\n Note: The API returns an error if the index does not have data for the specified epoch and backfill is set to false.\n", + "paramStructure": "by-position", + "params": [ + { + "name": "p1", + "description": "abi.ChainEpoch", + "summary": "", + "schema": { + "title": "number", + "description": "Number is a number", + "examples": [ + 10101 + ], + "type": [ + "number" + ] + }, + "required": true, + "deprecated": false + }, + { + "name": "p2", + "description": "bool", + "summary": "", + "schema": { + "examples": [ + true + ], + "type": [ + "boolean" + ] + }, + "required": true, + "deprecated": false + } + ], + "result": { + "name": "*types.IndexValidation", + "description": "*types.IndexValidation", + "summary": "", + "schema": { + "examples": [ + { + "TipSetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101, + "IndexedMessagesCount": 42, + "IndexedEventsCount": 42, + "IndexedEventEntriesCount": 42, + "Backfilled": true, + "IsNullRound": true + } + ], + "additionalProperties": false, + "properties": { + "Backfilled": { + "type": "boolean" + }, + "Height": { + "title": "number", + "type": "number" + }, + "IndexedEventEntriesCount": { + "title": "number", + "type": "number" + }, + "IndexedEventsCount": { + "title": "number", + "type": "number" + }, + "IndexedMessagesCount": { + "title": "number", + "type": "number" + }, + "IsNullRound": { + "type": "boolean" + }, + "TipSetKey": { + "additionalProperties": false, + "type": "object" + } + }, + "type": [ + "object" + ] + }, + "required": true, + "deprecated": false + }, + "deprecated": false, + "externalDocs": { + "description": "Github remote link", + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1660" } }, { @@ -2045,7 +2149,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1658" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1671" } }, { @@ -2092,7 +2196,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1669" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1682" } }, { @@ -2147,7 +2251,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1680" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1693" } }, { @@ -2176,7 +2280,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1691" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1704" } }, { @@ -2313,7 +2417,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1702" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1715" } }, { @@ -2342,7 +2446,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1713" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1726" } }, { @@ -2396,7 +2500,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1724" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1737" } }, { @@ -2487,7 +2591,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1735" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1748" } }, { @@ -2515,7 +2619,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1746" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1759" } }, { @@ -2605,7 +2709,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1757" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1770" } }, { @@ -2861,7 +2965,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1768" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1781" } }, { @@ -3106,7 +3210,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1779" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1792" } }, { @@ -3382,7 +3486,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1790" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1803" } }, { @@ -3675,7 +3779,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1801" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1814" } }, { @@ -3731,7 +3835,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1812" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1825" } }, { @@ -3778,7 +3882,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1823" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1836" } }, { @@ -3876,7 +3980,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1834" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1847" } }, { @@ -3942,7 +4046,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1845" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1858" } }, { @@ -4008,7 +4112,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1856" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1869" } }, { @@ -4117,7 +4221,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1867" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1880" } }, { @@ -4175,7 +4279,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1878" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1891" } }, { @@ -4297,7 +4401,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1889" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1902" } }, { @@ -4506,7 +4610,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1900" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1913" } }, { @@ -4704,7 +4808,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1911" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1924" } }, { @@ -4896,7 +5000,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1922" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1935" } }, { @@ -5105,7 +5209,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1933" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1946" } }, { @@ -5196,7 +5300,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1944" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1957" } }, { @@ -5254,7 +5358,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1955" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1968" } }, { @@ -5512,7 +5616,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1966" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1979" } }, { @@ -5787,7 +5891,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1977" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1990" } }, { @@ -5815,7 +5919,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1988" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2001" } }, { @@ -5853,7 +5957,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1999" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2012" } }, { @@ -5961,7 +6065,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2010" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2023" } }, { @@ -5999,7 +6103,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2021" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2034" } }, { @@ -6028,7 +6132,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2032" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2045" } }, { @@ -6091,7 +6195,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2043" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2056" } }, { @@ -6154,7 +6258,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2054" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2067" } }, { @@ -6217,7 +6321,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2065" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2078" } }, { @@ -6262,7 +6366,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2076" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2089" } }, { @@ -6384,7 +6488,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2087" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2100" } }, { @@ -6560,7 +6664,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2098" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2111" } }, { @@ -6715,7 +6819,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2109" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2122" } }, { @@ -6837,7 +6941,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2120" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2133" } }, { @@ -6891,7 +6995,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2131" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2144" } }, { @@ -6945,7 +7049,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2142" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2155" } }, { @@ -7130,7 +7234,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2153" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2166" } }, { @@ -7213,7 +7317,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2164" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2177" } }, { @@ -7296,7 +7400,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2175" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2188" } }, { @@ -7463,7 +7567,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2186" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2199" } }, { @@ -7668,7 +7772,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2197" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2210" } }, { @@ -7762,7 +7866,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2208" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2221" } }, { @@ -7808,7 +7912,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2219" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2232" } }, { @@ -7835,7 +7939,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2230" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2243" } }, { @@ -7890,7 +7994,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2241" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2254" } }, { @@ -7969,7 +8073,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2252" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2265" } }, { @@ -8032,7 +8136,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2263" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2276" } }, { @@ -8175,7 +8279,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2274" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2287" } }, { @@ -8302,7 +8406,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2285" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2298" } }, { @@ -8404,7 +8508,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2296" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2309" } }, { @@ -8627,7 +8731,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2307" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2320" } }, { @@ -8810,7 +8914,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2318" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2331" } }, { @@ -8890,7 +8994,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2329" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2342" } }, { @@ -8935,7 +9039,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2340" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2353" } }, { @@ -8991,7 +9095,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2351" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2364" } }, { @@ -9071,7 +9175,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2362" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2375" } }, { @@ -9151,7 +9255,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2373" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2386" } }, { @@ -9636,7 +9740,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2384" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2397" } }, { @@ -9830,7 +9934,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2395" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2408" } }, { @@ -9985,7 +10089,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2406" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2419" } }, { @@ -10234,7 +10338,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2417" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2430" } }, { @@ -10389,7 +10493,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2428" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2441" } }, { @@ -10566,7 +10670,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2439" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2452" } }, { @@ -10664,7 +10768,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2450" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2463" } }, { @@ -10829,7 +10933,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2461" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2474" } }, { @@ -10868,7 +10972,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2472" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2485" } }, { @@ -10933,7 +11037,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2483" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2496" } }, { @@ -10979,7 +11083,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2494" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2507" } }, { @@ -11129,7 +11233,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2505" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2518" } }, { @@ -11266,7 +11370,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2516" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2529" } }, { @@ -11497,7 +11601,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2527" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2540" } }, { @@ -11634,7 +11738,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2538" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2551" } }, { @@ -11799,7 +11903,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2549" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2562" } }, { @@ -11876,7 +11980,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2560" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2573" } }, { @@ -12071,7 +12175,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2582" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2595" } }, { @@ -12250,7 +12354,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2593" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2606" } }, { @@ -12412,7 +12516,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2604" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2617" } }, { @@ -12560,7 +12664,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2615" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2628" } }, { @@ -12788,7 +12892,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2626" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2639" } }, { @@ -12936,7 +13040,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2637" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2650" } }, { @@ -13148,7 +13252,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2648" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2661" } }, { @@ -13354,7 +13458,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2659" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2672" } }, { @@ -13422,7 +13526,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2670" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2683" } }, { @@ -13539,7 +13643,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2681" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2694" } }, { @@ -13630,7 +13734,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2692" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2705" } }, { @@ -13716,7 +13820,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2703" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2716" } }, { @@ -13911,7 +14015,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2714" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2727" } }, { @@ -14073,7 +14177,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2725" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2738" } }, { @@ -14269,7 +14373,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2736" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2749" } }, { @@ -14449,7 +14553,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2747" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2760" } }, { @@ -14612,7 +14716,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2758" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2771" } }, { @@ -14639,7 +14743,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2769" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2782" } }, { @@ -14666,7 +14770,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2780" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2793" } }, { @@ -14765,7 +14869,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2791" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2804" } }, { @@ -14811,7 +14915,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2802" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2815" } }, { @@ -14911,7 +15015,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2813" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2826" } }, { @@ -15027,7 +15131,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2824" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2837" } }, { @@ -15075,7 +15179,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2835" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2848" } }, { @@ -15167,7 +15271,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2846" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2859" } }, { @@ -15282,7 +15386,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2857" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2870" } }, { @@ -15330,7 +15434,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2868" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2881" } }, { @@ -15367,7 +15471,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2879" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2892" } }, { @@ -15639,7 +15743,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2890" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2903" } }, { @@ -15687,7 +15791,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2901" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2914" } }, { @@ -15745,7 +15849,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2912" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2925" } }, { @@ -15950,7 +16054,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2923" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2936" } }, { @@ -16153,7 +16257,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2934" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2947" } }, { @@ -16322,7 +16426,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2945" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2958" } }, { @@ -16526,7 +16630,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2956" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2969" } }, { @@ -16693,7 +16797,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2967" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2980" } }, { @@ -16900,7 +17004,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2978" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2991" } }, { @@ -16968,7 +17072,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2989" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3002" } }, { @@ -17020,7 +17124,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3000" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3013" } }, { @@ -17069,7 +17173,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3011" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3024" } }, { @@ -17160,7 +17264,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3022" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3035" } }, { @@ -17666,7 +17770,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3033" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3046" } }, { @@ -17772,7 +17876,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3044" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3057" } }, { @@ -17824,7 +17928,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3055" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3068" } }, { @@ -18376,7 +18480,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3066" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3079" } }, { @@ -18490,7 +18594,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3077" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3090" } }, { @@ -18587,7 +18691,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3088" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3101" } }, { @@ -18687,7 +18791,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3099" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3112" } }, { @@ -18775,7 +18879,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3110" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3123" } }, { @@ -18875,7 +18979,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3121" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3134" } }, { @@ -18962,7 +19066,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3132" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3145" } }, { @@ -19053,7 +19157,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3143" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3156" } }, { @@ -19178,7 +19282,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3154" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3167" } }, { @@ -19287,7 +19391,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3165" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3178" } }, { @@ -19357,7 +19461,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3176" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3189" } }, { @@ -19460,7 +19564,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3187" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3200" } }, { @@ -19521,7 +19625,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3198" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3211" } }, { @@ -19651,7 +19755,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3209" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3222" } }, { @@ -19758,7 +19862,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3220" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3233" } }, { @@ -19977,7 +20081,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3231" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3244" } }, { @@ -20054,7 +20158,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3242" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3255" } }, { @@ -20131,7 +20235,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3253" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3266" } }, { @@ -20240,7 +20344,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3264" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3277" } }, { @@ -20349,7 +20453,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3275" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3288" } }, { @@ -20410,7 +20514,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3286" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3299" } }, { @@ -20520,7 +20624,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3297" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3310" } }, { @@ -20581,7 +20685,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3308" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3321" } }, { @@ -20649,7 +20753,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3319" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3332" } }, { @@ -20717,7 +20821,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3330" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3343" } }, { @@ -20798,7 +20902,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3341" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3354" } }, { @@ -20952,7 +21056,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3352" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3365" } }, { @@ -21024,7 +21128,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3363" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3376" } }, { @@ -21188,7 +21292,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3374" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3387" } }, { @@ -21353,7 +21457,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3385" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3398" } }, { @@ -21423,7 +21527,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3396" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3409" } }, { @@ -21491,7 +21595,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3407" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3420" } }, { @@ -21584,7 +21688,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3418" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3431" } }, { @@ -21655,7 +21759,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3429" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3442" } }, { @@ -21856,7 +21960,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3440" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3453" } }, { @@ -21988,7 +22092,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3451" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3464" } }, { @@ -22091,7 +22195,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3462" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3475" } }, { @@ -22228,7 +22332,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3473" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3486" } }, { @@ -22339,7 +22443,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3484" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3497" } }, { @@ -22471,7 +22575,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3495" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3508" } }, { @@ -22602,7 +22706,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3506" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3519" } }, { @@ -22673,7 +22777,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3517" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3530" } }, { @@ -22757,7 +22861,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3528" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3541" } }, { @@ -22843,7 +22947,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3539" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3552" } }, { @@ -23026,7 +23130,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3550" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3563" } }, { @@ -23053,7 +23157,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3561" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3574" } }, { @@ -23106,7 +23210,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3572" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3585" } }, { @@ -23194,7 +23298,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3583" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3596" } }, { @@ -23645,7 +23749,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3594" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3607" } }, { @@ -23812,7 +23916,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3605" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3618" } }, { @@ -23910,7 +24014,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3616" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3629" } }, { @@ -24083,7 +24187,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3627" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3640" } }, { @@ -24181,7 +24285,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3638" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3651" } }, { @@ -24332,7 +24436,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3649" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3662" } }, { @@ -24417,7 +24521,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3660" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3673" } }, { @@ -24485,7 +24589,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3671" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3684" } }, { @@ -24537,7 +24641,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3682" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3695" } }, { @@ -24605,7 +24709,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3693" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3706" } }, { @@ -24766,7 +24870,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3704" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3717" } }, { @@ -24813,7 +24917,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3726" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3739" } }, { @@ -24860,7 +24964,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3737" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3750" } }, { @@ -24903,7 +25007,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3759" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3772" } }, { @@ -24999,7 +25103,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3770" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3783" } }, { @@ -25265,7 +25369,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3781" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3794" } }, { @@ -25288,7 +25392,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3792" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3805" } }, { @@ -25331,7 +25435,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3803" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3816" } }, { @@ -25382,7 +25486,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3814" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3827" } }, { @@ -25427,7 +25531,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3825" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3838" } }, { @@ -25455,7 +25559,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3836" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3849" } }, { @@ -25495,7 +25599,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3847" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3860" } }, { @@ -25554,7 +25658,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3858" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3871" } }, { @@ -25598,7 +25702,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3869" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3882" } }, { @@ -25657,7 +25761,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3880" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3893" } }, { @@ -25694,7 +25798,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3891" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3904" } }, { @@ -25738,7 +25842,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3902" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3915" } }, { @@ -25778,7 +25882,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3913" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3926" } }, { @@ -25853,7 +25957,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3924" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3937" } }, { @@ -26061,7 +26165,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3935" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3948" } }, { @@ -26105,7 +26209,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3946" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3959" } }, { @@ -26195,7 +26299,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3957" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3970" } }, { @@ -26222,7 +26326,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3968" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3981" } } ] diff --git a/build/openrpc/gateway.json b/build/openrpc/gateway.json index 2b8e807bbb2..29e14386a11 100644 --- a/build/openrpc/gateway.json +++ b/build/openrpc/gateway.json @@ -242,7 +242,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3979" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3992" } }, { @@ -473,7 +473,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3990" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4003" } }, { @@ -572,7 +572,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4001" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4014" } }, { @@ -604,7 +604,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4012" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4025" } }, { @@ -710,7 +710,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4023" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4036" } }, { @@ -803,7 +803,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4034" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4047" } }, { @@ -887,7 +887,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4045" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4058" } }, { @@ -987,7 +987,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4056" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4069" } }, { @@ -1043,7 +1043,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4067" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4080" } }, { @@ -1116,7 +1116,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4078" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4091" } }, { @@ -1189,7 +1189,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4089" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4102" } }, { @@ -1236,7 +1236,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4100" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4113" } }, { @@ -1268,7 +1268,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4111" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4124" } }, { @@ -1305,7 +1305,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4133" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4146" } }, { @@ -1352,7 +1352,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4144" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4157" } }, { @@ -1392,7 +1392,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4155" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4168" } }, { @@ -1439,7 +1439,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4166" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4179" } }, { @@ -1494,7 +1494,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4177" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4190" } }, { @@ -1523,7 +1523,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4188" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4201" } }, { @@ -1660,7 +1660,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4199" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4212" } }, { @@ -1689,7 +1689,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4210" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4223" } }, { @@ -1743,7 +1743,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4221" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4234" } }, { @@ -1834,7 +1834,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4232" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4245" } }, { @@ -1862,7 +1862,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4243" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4256" } }, { @@ -1952,7 +1952,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4254" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4267" } }, { @@ -2208,7 +2208,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4265" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4278" } }, { @@ -2453,7 +2453,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4276" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4289" } }, { @@ -2729,7 +2729,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4287" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4300" } }, { @@ -3022,7 +3022,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4298" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4311" } }, { @@ -3078,7 +3078,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4309" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4322" } }, { @@ -3125,7 +3125,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4320" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4333" } }, { @@ -3223,7 +3223,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4331" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4344" } }, { @@ -3289,7 +3289,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4342" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4355" } }, { @@ -3355,7 +3355,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4353" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4366" } }, { @@ -3464,7 +3464,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4364" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4377" } }, { @@ -3522,7 +3522,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4375" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4388" } }, { @@ -3644,7 +3644,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4386" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4399" } }, { @@ -3853,7 +3853,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4397" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4410" } }, { @@ -4051,7 +4051,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4408" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4421" } }, { @@ -4243,7 +4243,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4419" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4432" } }, { @@ -4452,7 +4452,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4430" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4443" } }, { @@ -4543,7 +4543,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4441" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4454" } }, { @@ -4601,7 +4601,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4452" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4465" } }, { @@ -4859,7 +4859,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4463" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4476" } }, { @@ -5134,7 +5134,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4474" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4487" } }, { @@ -5162,7 +5162,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4485" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4498" } }, { @@ -5200,7 +5200,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4496" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4509" } }, { @@ -5308,7 +5308,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4507" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4520" } }, { @@ -5346,7 +5346,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4518" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4531" } }, { @@ -5375,7 +5375,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4529" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4542" } }, { @@ -5438,7 +5438,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4540" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4553" } }, { @@ -5501,7 +5501,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4551" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4564" } }, { @@ -5546,7 +5546,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4562" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4575" } }, { @@ -5668,7 +5668,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4573" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4586" } }, { @@ -5844,7 +5844,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4584" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4597" } }, { @@ -5999,7 +5999,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4595" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4608" } }, { @@ -6121,7 +6121,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4606" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4619" } }, { @@ -6175,7 +6175,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4617" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4630" } }, { @@ -6229,7 +6229,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4628" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4641" } }, { @@ -6292,7 +6292,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4639" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4652" } }, { @@ -6394,7 +6394,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4650" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4663" } }, { @@ -6617,7 +6617,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4661" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4674" } }, { @@ -6800,7 +6800,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4672" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4685" } }, { @@ -6994,7 +6994,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4683" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4696" } }, { @@ -7040,7 +7040,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4694" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4707" } }, { @@ -7190,7 +7190,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4705" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4718" } }, { @@ -7327,7 +7327,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4716" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4729" } }, { @@ -7395,7 +7395,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4727" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4740" } }, { @@ -7512,7 +7512,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4738" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4751" } }, { @@ -7603,7 +7603,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4749" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4762" } }, { @@ -7689,7 +7689,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4760" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4773" } }, { @@ -7716,7 +7716,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4771" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4784" } }, { @@ -7743,7 +7743,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4782" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4795" } }, { @@ -7811,7 +7811,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4793" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4806" } }, { @@ -8317,7 +8317,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4804" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4817" } }, { @@ -8414,7 +8414,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4815" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4828" } }, { @@ -8514,7 +8514,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4826" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4839" } }, { @@ -8614,7 +8614,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4837" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4850" } }, { @@ -8739,7 +8739,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4848" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4861" } }, { @@ -8848,7 +8848,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4859" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4872" } }, { @@ -8951,7 +8951,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4870" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4883" } }, { @@ -9081,7 +9081,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4881" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4894" } }, { @@ -9188,7 +9188,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4892" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4905" } }, { @@ -9249,7 +9249,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4903" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4916" } }, { @@ -9317,7 +9317,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4914" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4927" } }, { @@ -9398,7 +9398,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4925" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4938" } }, { @@ -9562,7 +9562,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4936" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4949" } }, { @@ -9655,7 +9655,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4947" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4960" } }, { @@ -9856,7 +9856,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4958" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4971" } }, { @@ -9967,7 +9967,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4969" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4982" } }, { @@ -10098,7 +10098,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4980" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4993" } }, { @@ -10184,7 +10184,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4991" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5004" } }, { @@ -10211,7 +10211,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5002" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5015" } }, { @@ -10264,7 +10264,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5013" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5026" } }, { @@ -10352,7 +10352,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5024" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5037" } }, { @@ -10803,7 +10803,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5035" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5048" } }, { @@ -10970,7 +10970,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5046" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5059" } }, { @@ -11143,7 +11143,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5057" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5070" } }, { @@ -11211,7 +11211,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5068" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5081" } }, { @@ -11279,7 +11279,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5079" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5092" } }, { @@ -11440,7 +11440,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5090" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5103" } }, { @@ -11485,7 +11485,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5112" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5125" } }, { @@ -11530,7 +11530,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5123" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5136" } }, { @@ -11557,7 +11557,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5134" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5147" } } ] diff --git a/build/openrpc/miner.json b/build/openrpc/miner.json index 6e06356cc55..9bffe335a8e 100644 --- a/build/openrpc/miner.json +++ b/build/openrpc/miner.json @@ -30,7 +30,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5420" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5433" } }, { @@ -109,7 +109,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5431" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5444" } }, { @@ -155,7 +155,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5442" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5455" } }, { @@ -203,7 +203,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5453" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5466" } }, { @@ -251,7 +251,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5464" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5477" } }, { @@ -354,7 +354,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5475" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5488" } }, { @@ -428,7 +428,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5486" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5499" } }, { @@ -591,7 +591,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5497" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5510" } }, { @@ -742,7 +742,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5508" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5521" } }, { @@ -781,7 +781,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5519" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5532" } }, { @@ -913,7 +913,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5530" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5543" } }, { @@ -945,7 +945,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5541" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5554" } }, { @@ -986,7 +986,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5552" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5565" } }, { @@ -1054,7 +1054,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5563" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5576" } }, { @@ -1185,7 +1185,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5574" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5587" } }, { @@ -1316,7 +1316,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5585" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5598" } }, { @@ -1416,7 +1416,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5596" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5609" } }, { @@ -1516,7 +1516,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5607" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5620" } }, { @@ -1616,7 +1616,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5618" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5631" } }, { @@ -1716,7 +1716,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5629" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5642" } }, { @@ -1816,7 +1816,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5640" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5653" } }, { @@ -1916,7 +1916,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5651" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5664" } }, { @@ -2040,7 +2040,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5662" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5675" } }, { @@ -2164,7 +2164,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5673" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5686" } }, { @@ -2279,7 +2279,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5684" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5697" } }, { @@ -2379,7 +2379,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5695" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5708" } }, { @@ -2512,7 +2512,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5706" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5719" } }, { @@ -2636,7 +2636,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5717" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5730" } }, { @@ -2760,7 +2760,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5728" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5741" } }, { @@ -2884,7 +2884,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5739" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5752" } }, { @@ -3017,7 +3017,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5750" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5763" } }, { @@ -3117,7 +3117,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5761" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5774" } }, { @@ -3157,7 +3157,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5772" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5785" } }, { @@ -3229,7 +3229,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5783" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5796" } }, { @@ -3279,7 +3279,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5794" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5807" } }, { @@ -3323,7 +3323,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5805" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5818" } }, { @@ -3364,7 +3364,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5816" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5829" } }, { @@ -3608,7 +3608,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5827" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5840" } }, { @@ -3682,7 +3682,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5838" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5851" } }, { @@ -3732,7 +3732,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5849" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5862" } }, { @@ -3761,7 +3761,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5860" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5873" } }, { @@ -3790,7 +3790,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5871" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5884" } }, { @@ -3846,7 +3846,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5882" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5895" } }, { @@ -3869,7 +3869,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5893" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5906" } }, { @@ -3929,7 +3929,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5904" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5917" } }, { @@ -3968,7 +3968,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5915" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5928" } }, { @@ -4008,7 +4008,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5926" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5939" } }, { @@ -4081,7 +4081,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5937" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5950" } }, { @@ -4145,7 +4145,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5948" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5961" } }, { @@ -4208,7 +4208,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5959" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5972" } }, { @@ -4258,7 +4258,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5970" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5983" } }, { @@ -4817,7 +4817,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5981" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5994" } }, { @@ -4858,7 +4858,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5992" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6005" } }, { @@ -4899,7 +4899,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6003" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6016" } }, { @@ -4940,7 +4940,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6014" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6027" } }, { @@ -4981,7 +4981,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6025" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6038" } }, { @@ -5022,7 +5022,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6036" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6049" } }, { @@ -5053,7 +5053,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6047" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6060" } }, { @@ -5103,7 +5103,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6058" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6071" } }, { @@ -5144,7 +5144,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6069" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6082" } }, { @@ -5183,7 +5183,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6080" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6093" } }, { @@ -5247,7 +5247,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6091" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6104" } }, { @@ -5305,7 +5305,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6102" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6115" } }, { @@ -5752,7 +5752,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6113" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6126" } }, { @@ -5788,7 +5788,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6124" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6137" } }, { @@ -5931,7 +5931,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6135" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6148" } }, { @@ -5987,7 +5987,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6146" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6159" } }, { @@ -6026,7 +6026,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6157" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6170" } }, { @@ -6203,7 +6203,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6168" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6181" } }, { @@ -6255,7 +6255,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6179" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6192" } }, { @@ -6447,7 +6447,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6190" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6203" } }, { @@ -6547,7 +6547,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6201" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6214" } }, { @@ -6601,7 +6601,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6212" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6225" } }, { @@ -6640,7 +6640,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6223" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6236" } }, { @@ -6725,7 +6725,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6234" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6247" } }, { @@ -6919,7 +6919,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6245" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6258" } }, { @@ -7017,7 +7017,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6256" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6269" } }, { @@ -7149,7 +7149,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6267" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6280" } }, { @@ -7203,7 +7203,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6278" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6291" } }, { @@ -7237,7 +7237,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6289" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6302" } }, { @@ -7324,7 +7324,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6300" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6313" } }, { @@ -7378,7 +7378,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6311" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6324" } }, { @@ -7478,7 +7478,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6322" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6335" } }, { @@ -7555,7 +7555,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6333" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6346" } }, { @@ -7646,7 +7646,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6344" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6357" } }, { @@ -7685,7 +7685,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6355" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6368" } }, { @@ -7801,7 +7801,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6366" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6379" } }, { @@ -9901,7 +9901,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6377" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6390" } } ] diff --git a/build/openrpc/worker.json b/build/openrpc/worker.json index 6e0c472a07e..2ee6fcfd3a9 100644 --- a/build/openrpc/worker.json +++ b/build/openrpc/worker.json @@ -161,7 +161,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6465" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6478" } }, { @@ -252,7 +252,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6476" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6489" } }, { @@ -420,7 +420,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6487" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6500" } }, { @@ -447,7 +447,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6498" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6511" } }, { @@ -597,7 +597,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6509" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6522" } }, { @@ -700,7 +700,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6520" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6533" } }, { @@ -803,7 +803,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6531" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6544" } }, { @@ -925,7 +925,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6542" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6555" } }, { @@ -1135,7 +1135,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6553" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6566" } }, { @@ -1306,7 +1306,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6564" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6577" } }, { @@ -3350,7 +3350,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6575" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6588" } }, { @@ -3470,7 +3470,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6586" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6599" } }, { @@ -3531,7 +3531,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6597" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6610" } }, { @@ -3569,7 +3569,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6608" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6621" } }, { @@ -3729,7 +3729,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6619" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6632" } }, { @@ -3913,7 +3913,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6630" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6643" } }, { @@ -4054,7 +4054,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6641" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6654" } }, { @@ -4107,7 +4107,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6652" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6665" } }, { @@ -4250,7 +4250,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6663" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6676" } }, { @@ -4474,7 +4474,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6674" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6687" } }, { @@ -4601,7 +4601,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6685" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6698" } }, { @@ -4768,7 +4768,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6696" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6709" } }, { @@ -4895,7 +4895,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6707" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6720" } }, { @@ -4933,7 +4933,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6718" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6731" } }, { @@ -4972,7 +4972,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6729" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6742" } }, { @@ -4995,7 +4995,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6740" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6753" } }, { @@ -5034,7 +5034,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6751" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6764" } }, { @@ -5057,7 +5057,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6762" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6775" } }, { @@ -5096,7 +5096,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6773" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6786" } }, { @@ -5130,7 +5130,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6784" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6797" } }, { @@ -5184,7 +5184,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6795" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6808" } }, { @@ -5223,7 +5223,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6806" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6819" } }, { @@ -5262,7 +5262,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6817" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6830" } }, { @@ -5297,7 +5297,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6828" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6841" } }, { @@ -5477,7 +5477,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6839" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6852" } }, { @@ -5506,7 +5506,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6850" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6863" } }, { @@ -5529,7 +5529,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6861" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6874" } } ] diff --git a/chain/ethhashlookup/eth_transaction_hash_lookup.go b/chain/ethhashlookup/eth_transaction_hash_lookup.go deleted file mode 100644 index 2a34e37aa03..00000000000 --- a/chain/ethhashlookup/eth_transaction_hash_lookup.go +++ /dev/null @@ -1,150 +0,0 @@ -package ethhashlookup - -import ( - "context" - "database/sql" - "errors" - "strconv" - - "github.com/ipfs/go-cid" - _ "github.com/mattn/go-sqlite3" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/chain/types/ethtypes" - "github.com/filecoin-project/lotus/lib/sqlite" -) - -const DefaultDbFilename = "txhash.db" - -var ErrNotFound = errors.New("not found") - -var ddls = []string{ - `CREATE TABLE IF NOT EXISTS eth_tx_hashes ( - hash TEXT PRIMARY KEY NOT NULL, - cid TEXT NOT NULL UNIQUE, - insertion_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL - )`, - - `CREATE INDEX IF NOT EXISTS insertion_time_index ON eth_tx_hashes (insertion_time)`, -} - -const ( - insertTxHash = `INSERT INTO eth_tx_hashes (hash, cid) VALUES(?, ?) ON CONFLICT (hash) DO UPDATE SET insertion_time = CURRENT_TIMESTAMP` - getCidFromHash = `SELECT cid FROM eth_tx_hashes WHERE hash = ?` - getHashFromCid = `SELECT hash FROM eth_tx_hashes WHERE cid = ?` - deleteOlderThan = `DELETE FROM eth_tx_hashes WHERE insertion_time < datetime('now', ?);` -) - -type EthTxHashLookup struct { - db *sql.DB - - stmtInsertTxHash *sql.Stmt - stmtGetCidFromHash *sql.Stmt - stmtGetHashFromCid *sql.Stmt - stmtDeleteOlderThan *sql.Stmt -} - -func NewTransactionHashLookup(ctx context.Context, path string) (*EthTxHashLookup, error) { - db, _, err := sqlite.Open(path) - if err != nil { - return nil, xerrors.Errorf("failed to setup eth transaction hash lookup db: %w", err) - } - - if err := sqlite.InitDb(ctx, "eth transaction hash lookup", db, ddls, []sqlite.MigrationFunc{}); err != nil { - _ = db.Close() - return nil, xerrors.Errorf("failed to init eth transaction hash lookup db: %w", err) - } - - ei := &EthTxHashLookup{db: db} - - if err = ei.initStatements(); err != nil { - _ = ei.Close() - return nil, xerrors.Errorf("error preparing eth transaction hash lookup db statements: %w", err) - } - - return ei, nil -} - -func (ei *EthTxHashLookup) initStatements() (err error) { - ei.stmtInsertTxHash, err = ei.db.Prepare(insertTxHash) - if err != nil { - return xerrors.Errorf("prepare stmtInsertTxHash: %w", err) - } - ei.stmtGetCidFromHash, err = ei.db.Prepare(getCidFromHash) - if err != nil { - return xerrors.Errorf("prepare stmtGetCidFromHash: %w", err) - } - ei.stmtGetHashFromCid, err = ei.db.Prepare(getHashFromCid) - if err != nil { - return xerrors.Errorf("prepare stmtGetHashFromCid: %w", err) - } - ei.stmtDeleteOlderThan, err = ei.db.Prepare(deleteOlderThan) - if err != nil { - return xerrors.Errorf("prepare stmtDeleteOlderThan: %w", err) - } - return nil -} - -func (ei *EthTxHashLookup) UpsertHash(txHash ethtypes.EthHash, c cid.Cid) error { - if ei.db == nil { - return xerrors.New("db closed") - } - - _, err := ei.stmtInsertTxHash.Exec(txHash.String(), c.String()) - return err -} - -func (ei *EthTxHashLookup) GetCidFromHash(txHash ethtypes.EthHash) (cid.Cid, error) { - if ei.db == nil { - return cid.Undef, xerrors.New("db closed") - } - - row := ei.stmtGetCidFromHash.QueryRow(txHash.String()) - var c string - err := row.Scan(&c) - if err != nil { - if err == sql.ErrNoRows { - return cid.Undef, ErrNotFound - } - return cid.Undef, err - } - return cid.Decode(c) -} - -func (ei *EthTxHashLookup) GetHashFromCid(c cid.Cid) (ethtypes.EthHash, error) { - if ei.db == nil { - return ethtypes.EmptyEthHash, xerrors.New("db closed") - } - - row := ei.stmtGetHashFromCid.QueryRow(c.String()) - var hashString string - err := row.Scan(&c) - if err != nil { - if err == sql.ErrNoRows { - return ethtypes.EmptyEthHash, ErrNotFound - } - return ethtypes.EmptyEthHash, err - } - return ethtypes.ParseEthHash(hashString) -} - -func (ei *EthTxHashLookup) DeleteEntriesOlderThan(days int) (int64, error) { - if ei.db == nil { - return 0, xerrors.New("db closed") - } - - res, err := ei.stmtDeleteOlderThan.Exec("-" + strconv.Itoa(days) + " day") - if err != nil { - return 0, err - } - return res.RowsAffected() -} - -func (ei *EthTxHashLookup) Close() (err error) { - if ei.db == nil { - return nil - } - db := ei.db - ei.db = nil - return db.Close() -} diff --git a/chain/events/filter/event.go b/chain/events/filter/event.go index 8592dfb423e..ccf3bd0c941 100644 --- a/chain/events/filter/event.go +++ b/chain/events/filter/event.go @@ -16,6 +16,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/lotus/chain/index" cstore "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" ) @@ -32,7 +33,7 @@ type AddressResolver func(context.Context, abi.ActorID, *types.TipSet) (address. type EventFilter interface { Filter - TakeCollectedEvents(context.Context) []*CollectedEvent + TakeCollectedEvents(context.Context) []*index.CollectedEvent CollectEvents(context.Context, *TipSetEvents, bool, AddressResolver) error } @@ -47,24 +48,13 @@ type eventFilter struct { maxResults int // maximum number of results to collect, 0 is unlimited mu sync.Mutex - collected []*CollectedEvent + collected []*index.CollectedEvent lastTaken time.Time ch chan<- interface{} } var _ Filter = (*eventFilter)(nil) -type CollectedEvent struct { - Entries []types.EventEntry - EmitterAddr address.Address // address of emitter - EventIdx int // index of the event within the list of emitted events in a given tipset - Reverted bool - Height abi.ChainEpoch - TipSetKey types.TipSetKey // tipset that contained the message - MsgIdx int // index of the message in the tipset - MsgCid cid.Cid // cid of message that produced event -} - func (f *eventFilter) ID() types.FilterID { return f.id } @@ -119,7 +109,7 @@ func (f *eventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, rever } // event matches filter, so record it - cev := &CollectedEvent{ + cev := &index.CollectedEvent{ Entries: ev.Entries, EmitterAddr: addr, EventIdx: eventCount, @@ -151,13 +141,13 @@ func (f *eventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, rever return nil } -func (f *eventFilter) setCollectedEvents(ces []*CollectedEvent) { +func (f *eventFilter) setCollectedEvents(ces []*index.CollectedEvent) { f.mu.Lock() f.collected = ces f.mu.Unlock() } -func (f *eventFilter) TakeCollectedEvents(ctx context.Context) []*CollectedEvent { +func (f *eventFilter) TakeCollectedEvents(ctx context.Context) []*index.CollectedEvent { f.mu.Lock() collected := f.collected f.collected = nil @@ -307,7 +297,7 @@ type EventFilterManager struct { ChainStore *cstore.ChainStore AddressResolver func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) MaxFilterResults int - EventIndex *EventIndex + ChainIndexer index.Indexer mu sync.Mutex // guards mutations to filters filters map[types.FilterID]EventFilter @@ -319,7 +309,7 @@ func (m *EventFilterManager) Apply(ctx context.Context, from, to *types.TipSet) defer m.mu.Unlock() m.currentHeight = to.Height() - if len(m.filters) == 0 && m.EventIndex == nil { + if len(m.filters) == 0 { return nil } @@ -329,12 +319,6 @@ func (m *EventFilterManager) Apply(ctx context.Context, from, to *types.TipSet) load: m.loadExecutedMessages, } - if m.EventIndex != nil { - if err := m.EventIndex.CollectEvents(ctx, tse, false, m.AddressResolver); err != nil { - return err - } - } - // TODO: could run this loop in parallel with errgroup if there are many filters for _, f := range m.filters { if err := f.CollectEvents(ctx, tse, false, m.AddressResolver); err != nil { @@ -350,7 +334,7 @@ func (m *EventFilterManager) Revert(ctx context.Context, from, to *types.TipSet) defer m.mu.Unlock() m.currentHeight = to.Height() - if len(m.filters) == 0 && m.EventIndex == nil { + if len(m.filters) == 0 { return nil } @@ -360,12 +344,6 @@ func (m *EventFilterManager) Revert(ctx context.Context, from, to *types.TipSet) load: m.loadExecutedMessages, } - if m.EventIndex != nil { - if err := m.EventIndex.CollectEvents(ctx, tse, true, m.AddressResolver); err != nil { - return err - } - } - // TODO: could run this loop in parallel with errgroup if there are many filters for _, f := range m.filters { if err := f.CollectEvents(ctx, tse, true, m.AddressResolver); err != nil { @@ -392,7 +370,7 @@ func (m *EventFilterManager) Fill( currentHeight := m.currentHeight m.mu.Unlock() - if m.EventIndex == nil && minHeight != -1 && minHeight < currentHeight { + if m.ChainIndexer == nil && minHeight != -1 && minHeight < currentHeight { return nil, xerrors.Errorf("historic event index disabled") } @@ -411,12 +389,22 @@ func (m *EventFilterManager) Fill( maxResults: m.MaxFilterResults, } - if m.EventIndex != nil && minHeight != -1 && minHeight < currentHeight { - // Filter needs historic events - excludeReverted := tipsetCid == cid.Undef - if err := m.EventIndex.prefillFilter(ctx, f, excludeReverted); err != nil { - return nil, err + if m.ChainIndexer != nil && minHeight != -1 && minHeight < currentHeight { + ef := &index.EventFilter{ + MinHeight: minHeight, + MaxHeight: maxHeight, + TipsetCid: tipsetCid, + Addresses: addresses, + KeysWithCodec: keysWithCodec, + MaxResults: m.MaxFilterResults, } + + ces, err := m.ChainIndexer.GetEventsForFilter(ctx, ef) + if err != nil { + return nil, xerrors.Errorf("get events for filter: %w", err) + } + + f.setCollectedEvents(ces) } return f, nil diff --git a/chain/events/filter/event_test.go b/chain/events/filter/event_test.go index c650b71eb6f..5ffb678c65e 100644 --- a/chain/events/filter/event_test.go +++ b/chain/events/filter/event_test.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/types" ) @@ -70,8 +71,8 @@ func TestEventFilterCollectEvents(t *testing.T) { cid14000, err := events14000.msgTs.Key().Cid() require.NoError(t, err, "tipset cid") - noCollectedEvents := []*CollectedEvent{} - oneCollectedEvent := []*CollectedEvent{ + noCollectedEvents := []*index.CollectedEvent{} + oneCollectedEvent := []*index.CollectedEvent{ { Entries: ev1.Entries, EmitterAddr: a1, @@ -88,7 +89,7 @@ func TestEventFilterCollectEvents(t *testing.T) { name string filter *eventFilter te *TipSetEvents - want []*CollectedEvent + want []*index.CollectedEvent }{ { name: "nomatch tipset min height", diff --git a/chain/events/filter/index.go b/chain/events/filter/index.go deleted file mode 100644 index ff7f1aeaa7e..00000000000 --- a/chain/events/filter/index.go +++ /dev/null @@ -1,672 +0,0 @@ -package filter - -import ( - "context" - "database/sql" - "errors" - "fmt" - "sort" - "strings" - "sync" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - _ "github.com/mattn/go-sqlite3" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/sqlite" -) - -const DefaultDbFilename = "events.db" - -// Any changes to this schema should be matched for the `lotus-shed indexes backfill-events` command - -var ddls = []string{ - `CREATE TABLE IF NOT EXISTS event ( - id INTEGER PRIMARY KEY, - height INTEGER NOT NULL, - tipset_key BLOB NOT NULL, - tipset_key_cid BLOB NOT NULL, - emitter_addr BLOB NOT NULL, - event_index INTEGER NOT NULL, - message_cid BLOB NOT NULL, - message_index INTEGER NOT NULL, - reverted INTEGER NOT NULL - )`, - - createIndexEventTipsetKeyCid, - createIndexEventHeight, - - `CREATE TABLE IF NOT EXISTS event_entry ( - event_id INTEGER, - indexed INTEGER NOT NULL, - flags BLOB NOT NULL, - key TEXT NOT NULL, - codec INTEGER, - value BLOB NOT NULL - )`, - - createTableEventsSeen, - - createIndexEventEntryEventId, - createIndexEventsSeenHeight, - createIndexEventsSeenTipsetKeyCid, -} - -var ( - log = logging.Logger("filter") -) - -const ( - createTableEventsSeen = `CREATE TABLE IF NOT EXISTS events_seen ( - id INTEGER PRIMARY KEY, - height INTEGER NOT NULL, - tipset_key_cid BLOB NOT NULL, - reverted INTEGER NOT NULL, - UNIQUE(height, tipset_key_cid) - )` - - // When modifying indexes in this file, it is critical to test the query plan (EXPLAIN QUERY PLAN) - // of all the variations of queries built by prefillFilter to ensure that the query first hits - // an index that narrows down results to an epoch or a reasonable range of epochs. Specifically, - // event_tipset_key_cid or event_height should be the first index. Then further narrowing can take - // place within the small subset of results. - // Unfortunately SQLite has some quirks in index selection that mean that certain query types will - // bypass these indexes if alternatives are available. This has been observed specifically on - // queries with height ranges: `height>=X AND height<=Y`. - // - // e.g. we want to see that `event_height` is the first index used in this query: - // - // EXPLAIN QUERY PLAN - // SELECT - // event.height, event.tipset_key_cid, event_entry.indexed, event_entry.codec, event_entry.key, event_entry.value - // FROM event - // JOIN - // event_entry ON event.id=event_entry.event_id, - // event_entry ee2 ON event.id=ee2.event_id - // WHERE event.height>=? AND event.height<=? AND event.reverted=? AND event.emitter_addr=? AND ee2.indexed=1 AND ee2.key=? - // ORDER BY event.height DESC, event_entry._rowid_ ASC - // - // -> - // - // QUERY PLAN - // |--SEARCH event USING INDEX event_height (height>? AND height 0 FROM events_seen WHERE tipset_key_cid=?`, // QUERY PLAN: SEARCH events_seen USING COVERING INDEX events_seen_tipset_key_cid (tipset_key_cid=?) - &ps.getMaxHeightInIndex: `SELECT MAX(height) FROM events_seen`, // QUERY PLAN: SEARCH events_seen USING COVERING INDEX events_seen_height - &ps.isHeightProcessed: `SELECT COUNT(*) > 0 FROM events_seen WHERE height=?`, // QUERY PLAN: SEARCH events_seen USING COVERING INDEX events_seen_height (height=?) - - } -} - -type preparedStatements struct { - insertEvent *sql.Stmt - insertEntry *sql.Stmt - revertEventsInTipset *sql.Stmt - restoreEvent *sql.Stmt - upsertEventsSeen *sql.Stmt - revertEventSeen *sql.Stmt - restoreEventSeen *sql.Stmt - eventExists *sql.Stmt - isTipsetProcessed *sql.Stmt - getMaxHeightInIndex *sql.Stmt - isHeightProcessed *sql.Stmt -} - -type EventIndex struct { - db *sql.DB - - stmt *preparedStatements - - mu sync.Mutex - subIdCounter uint64 - updateSubs map[uint64]*updateSub -} - -type updateSub struct { - ctx context.Context - ch chan EventIndexUpdated - cancel context.CancelFunc -} - -type EventIndexUpdated struct{} - -func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStore) (*EventIndex, error) { - db, _, err := sqlite.Open(path) - if err != nil { - return nil, xerrors.Errorf("failed to setup event index db: %w", err) - } - - err = sqlite.InitDb(ctx, "event index", db, ddls, []sqlite.MigrationFunc{ - migrationVersion2(db, chainStore), - migrationVersion3, - migrationVersion4, - migrationVersion5, - migrationVersion6, - migrationVersion7, - }) - if err != nil { - _ = db.Close() - return nil, xerrors.Errorf("failed to setup event index db: %w", err) - } - - eventIndex := EventIndex{ - db: db, - stmt: &preparedStatements{}, - } - - if err = eventIndex.initStatements(); err != nil { - _ = db.Close() - return nil, xerrors.Errorf("error preparing eventIndex database statements: %w", err) - } - - eventIndex.updateSubs = make(map[uint64]*updateSub) - - return &eventIndex, nil -} - -func (ei *EventIndex) initStatements() error { - stmtMapping := preparedStatementMapping(ei.stmt) - for stmtPointer, query := range stmtMapping { - var err error - *stmtPointer, err = ei.db.Prepare(query) - if err != nil { - return xerrors.Errorf("prepare statement [%s]: %w", query, err) - } - } - - return nil -} - -func (ei *EventIndex) Close() error { - if ei.db == nil { - return nil - } - return ei.db.Close() -} - -func (ei *EventIndex) SubscribeUpdates() (chan EventIndexUpdated, func()) { - subCtx, subCancel := context.WithCancel(context.Background()) - ch := make(chan EventIndexUpdated) - - tSub := &updateSub{ - ctx: subCtx, - cancel: subCancel, - ch: ch, - } - - ei.mu.Lock() - subId := ei.subIdCounter - ei.subIdCounter++ - ei.updateSubs[subId] = tSub - ei.mu.Unlock() - - unSubscribeF := func() { - ei.mu.Lock() - tSub, ok := ei.updateSubs[subId] - if !ok { - ei.mu.Unlock() - return - } - delete(ei.updateSubs, subId) - ei.mu.Unlock() - - // cancel the subscription - tSub.cancel() - } - - return tSub.ch, unSubscribeF -} - -func (ei *EventIndex) GetMaxHeightInIndex(ctx context.Context) (uint64, error) { - row := ei.stmt.getMaxHeightInIndex.QueryRowContext(ctx) - var maxHeight uint64 - err := row.Scan(&maxHeight) - return maxHeight, err -} - -func (ei *EventIndex) IsHeightPast(ctx context.Context, height uint64) (bool, error) { - maxHeight, err := ei.GetMaxHeightInIndex(ctx) - if err != nil { - return false, err - } - return height <= maxHeight, nil -} - -func (ei *EventIndex) IsTipsetProcessed(ctx context.Context, tipsetKeyCid []byte) (bool, error) { - row := ei.stmt.isTipsetProcessed.QueryRowContext(ctx, tipsetKeyCid) - var exists bool - err := row.Scan(&exists) - return exists, err -} - -func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, revert bool, resolver func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool)) error { - tx, err := ei.db.BeginTx(ctx, nil) - if err != nil { - return xerrors.Errorf("begin transaction: %w", err) - } - // rollback the transaction (a no-op if the transaction was already committed) - defer func() { _ = tx.Rollback() }() - - tsKeyCid, err := te.msgTs.Key().Cid() - if err != nil { - return xerrors.Errorf("tipset key cid: %w", err) - } - - // lets handle the revert case first, since its simpler and we can simply mark all events in this tipset as reverted and return - if revert { - _, err = tx.Stmt(ei.stmt.revertEventsInTipset).Exec(te.msgTs.Height(), te.msgTs.Key().Bytes()) - if err != nil { - return xerrors.Errorf("revert event: %w", err) - } - - _, err = tx.Stmt(ei.stmt.revertEventSeen).Exec(te.msgTs.Height(), tsKeyCid.Bytes()) - if err != nil { - return xerrors.Errorf("revert event seen: %w", err) - } - - err = tx.Commit() - if err != nil { - return xerrors.Errorf("commit transaction: %w", err) - } - - ei.mu.Lock() - tSubs := make([]*updateSub, 0, len(ei.updateSubs)) - for _, tSub := range ei.updateSubs { - tSubs = append(tSubs, tSub) - } - ei.mu.Unlock() - - for _, tSub := range tSubs { - tSub := tSub - select { - case tSub.ch <- EventIndexUpdated{}: - case <-tSub.ctx.Done(): - // subscription was cancelled, ignore - case <-ctx.Done(): - return ctx.Err() - } - } - - return nil - } - - // cache of lookups between actor id and f4 address - addressLookups := make(map[abi.ActorID]address.Address) - - ems, err := te.messages(ctx) - if err != nil { - return xerrors.Errorf("load executed messages: %w", err) - } - - eventCount := 0 - // iterate over all executed messages in this tipset and insert them into the database if they - // don't exist, otherwise mark them as not reverted - for msgIdx, em := range ems { - for _, ev := range em.Events() { - addr, found := addressLookups[ev.Emitter] - if !found { - var ok bool - addr, ok = resolver(ctx, ev.Emitter, te.rctTs) - if !ok { - // not an address we will be able to match against - continue - } - addressLookups[ev.Emitter] = addr - } - - // check if this event already exists in the database - var entryID sql.NullInt64 - err = tx.Stmt(ei.stmt.eventExists).QueryRow( - te.msgTs.Height(), // height - te.msgTs.Key().Bytes(), // tipset_key - tsKeyCid.Bytes(), // tipset_key_cid - addr.Bytes(), // emitter_addr - eventCount, // event_index - em.Message().Cid().Bytes(), // message_cid - msgIdx, // message_index - ).Scan(&entryID) - if err != nil { - return xerrors.Errorf("error checking if event exists: %w", err) - } - - if !entryID.Valid { - // event does not exist, lets insert it - res, err := tx.Stmt(ei.stmt.insertEvent).Exec( - te.msgTs.Height(), // height - te.msgTs.Key().Bytes(), // tipset_key - tsKeyCid.Bytes(), // tipset_key_cid - addr.Bytes(), // emitter_addr - eventCount, // event_index - em.Message().Cid().Bytes(), // message_cid - msgIdx, // message_index - false, // reverted - ) - if err != nil { - return xerrors.Errorf("exec insert event: %w", err) - } - - entryID.Int64, err = res.LastInsertId() - if err != nil { - return xerrors.Errorf("get last row id: %w", err) - } - - // insert all the entries for this event - for _, entry := range ev.Entries { - _, err = tx.Stmt(ei.stmt.insertEntry).Exec( - entryID.Int64, // event_id - isIndexedValue(entry.Flags), // indexed - []byte{entry.Flags}, // flags - entry.Key, // key - entry.Codec, // codec - entry.Value, // value - ) - if err != nil { - return xerrors.Errorf("exec insert entry: %w", err) - } - } - } else { - // event already exists, lets mark it as not reverted - res, err := tx.Stmt(ei.stmt.restoreEvent).Exec( - te.msgTs.Height(), // height - te.msgTs.Key().Bytes(), // tipset_key - tsKeyCid.Bytes(), // tipset_key_cid - addr.Bytes(), // emitter_addr - eventCount, // event_index - em.Message().Cid().Bytes(), // message_cid - msgIdx, // message_index - ) - if err != nil { - return xerrors.Errorf("exec restore event: %w", err) - } - - rowsAffected, err := res.RowsAffected() - if err != nil { - return xerrors.Errorf("error getting rows affected: %s", err) - } - - // this is a sanity check as we should only ever be updating one event - if rowsAffected != 1 { - log.Warnf("restored %d events but expected only one to exist", rowsAffected) - } - } - eventCount++ - } - } - - // this statement will mark the tipset as processed and will insert a new row if it doesn't exist - // or update the reverted field to false if it does - _, err = tx.Stmt(ei.stmt.upsertEventsSeen).Exec( - te.msgTs.Height(), - tsKeyCid.Bytes(), - ) - if err != nil { - return xerrors.Errorf("exec upsert events seen: %w", err) - } - - err = tx.Commit() - if err != nil { - return xerrors.Errorf("commit transaction: %w", err) - } - - ei.mu.Lock() - tSubs := make([]*updateSub, 0, len(ei.updateSubs)) - for _, tSub := range ei.updateSubs { - tSubs = append(tSubs, tSub) - } - ei.mu.Unlock() - - for _, tSub := range tSubs { - tSub := tSub - select { - case tSub.ch <- EventIndexUpdated{}: - case <-tSub.ctx.Done(): - // subscription was cancelled, ignore - case <-ctx.Done(): - return ctx.Err() - } - } - - return nil -} - -// prefillFilter fills a filter's collection of events from the historic index -func (ei *EventIndex) prefillFilter(ctx context.Context, f *eventFilter, excludeReverted bool) error { - values, query := makePrefillFilterQuery(f, excludeReverted) - - stmt, err := ei.db.Prepare(query) - if err != nil { - return xerrors.Errorf("prepare prefill query: %w", err) - } - defer func() { _ = stmt.Close() }() - - q, err := stmt.QueryContext(ctx, values...) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil - } - return xerrors.Errorf("exec prefill query: %w", err) - } - defer func() { _ = q.Close() }() - - var ces []*CollectedEvent - var currentID int64 = -1 - var ce *CollectedEvent - - for q.Next() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - var row struct { - id int64 - height uint64 - tipsetKey []byte - tipsetKeyCid []byte - emitterAddr []byte - eventIndex int - messageCid []byte - messageIndex int - reverted bool - flags []byte - key string - codec uint64 - value []byte - } - - if err := q.Scan( - &row.id, - &row.height, - &row.tipsetKey, - &row.tipsetKeyCid, - &row.emitterAddr, - &row.eventIndex, - &row.messageCid, - &row.messageIndex, - &row.reverted, - &row.flags, - &row.key, - &row.codec, - &row.value, - ); err != nil { - return xerrors.Errorf("read prefill row: %w", err) - } - - if row.id != currentID { - if ce != nil { - ces = append(ces, ce) - ce = nil - // Unfortunately we can't easily incorporate the max results limit into the query due to the - // unpredictable number of rows caused by joins - // Break here to stop collecting rows - if f.maxResults > 0 && len(ces) >= f.maxResults { - break - } - } - - currentID = row.id - ce = &CollectedEvent{ - EventIdx: row.eventIndex, - Reverted: row.reverted, - Height: abi.ChainEpoch(row.height), - MsgIdx: row.messageIndex, - } - - ce.EmitterAddr, err = address.NewFromBytes(row.emitterAddr) - if err != nil { - return xerrors.Errorf("parse emitter addr: %w", err) - } - - ce.TipSetKey, err = types.TipSetKeyFromBytes(row.tipsetKey) - if err != nil { - return xerrors.Errorf("parse tipsetkey: %w", err) - } - - ce.MsgCid, err = cid.Cast(row.messageCid) - if err != nil { - return xerrors.Errorf("parse message cid: %w", err) - } - } - - ce.Entries = append(ce.Entries, types.EventEntry{ - Flags: row.flags[0], - Key: row.key, - Codec: row.codec, - Value: row.value, - }) - } - - if ce != nil { - ces = append(ces, ce) - } - - if len(ces) == 0 { - return nil - } - - // collected event list is in inverted order since we selected only the most recent events - // sort it into height order - sort.Slice(ces, func(i, j int) bool { return ces[i].Height < ces[j].Height }) - f.setCollectedEvents(ces) - - return nil -} - -func makePrefillFilterQuery(f *eventFilter, excludeReverted bool) ([]any, string) { - clauses := []string{} - values := []any{} - joins := []string{} - - if f.tipsetCid != cid.Undef { - clauses = append(clauses, "event.tipset_key_cid=?") - values = append(values, f.tipsetCid.Bytes()) - } else { - if f.minHeight >= 0 && f.minHeight == f.maxHeight { - clauses = append(clauses, "event.height=?") - values = append(values, f.minHeight) - } else { - if f.maxHeight >= 0 && f.minHeight >= 0 { - clauses = append(clauses, "event.height BETWEEN ? AND ?") - values = append(values, f.minHeight, f.maxHeight) - } else if f.minHeight >= 0 { - clauses = append(clauses, "event.height >= ?") - values = append(values, f.minHeight) - } else if f.maxHeight >= 0 { - clauses = append(clauses, "event.height <= ?") - values = append(values, f.maxHeight) - } - } - } - - if excludeReverted { - clauses = append(clauses, "event.reverted=?") - values = append(values, false) - } - - if len(f.addresses) > 0 { - for _, addr := range f.addresses { - values = append(values, addr.Bytes()) - } - clauses = append(clauses, "event.emitter_addr IN ("+strings.Repeat("?,", len(f.addresses)-1)+"?)") - } - - if len(f.keysWithCodec) > 0 { - join := 0 - for key, vals := range f.keysWithCodec { - if len(vals) > 0 { - join++ - joinAlias := fmt.Sprintf("ee%d", join) - joins = append(joins, fmt.Sprintf("event_entry %s ON event.id=%[1]s.event_id", joinAlias)) - clauses = append(clauses, fmt.Sprintf("%s.indexed=1 AND %[1]s.key=?", joinAlias)) - values = append(values, key) - subclauses := make([]string, 0, len(vals)) - for _, val := range vals { - subclauses = append(subclauses, fmt.Sprintf("(%s.value=? AND %[1]s.codec=?)", joinAlias)) - values = append(values, val.Value, val.Codec) - } - clauses = append(clauses, "("+strings.Join(subclauses, " OR ")+")") - } - } - } - - s := `SELECT - event.id, - event.height, - event.tipset_key, - event.tipset_key_cid, - event.emitter_addr, - event.event_index, - event.message_cid, - event.message_index, - event.reverted, - event_entry.flags, - event_entry.key, - event_entry.codec, - event_entry.value - FROM event JOIN event_entry ON event.id=event_entry.event_id` - - if len(joins) > 0 { - s = s + ", " + strings.Join(joins, ", ") - } - - if len(clauses) > 0 { - s = s + " WHERE " + strings.Join(clauses, " AND ") - } - - // retain insertion order of event_entry rows with the implicit _rowid_ column - s += " ORDER BY event.height DESC, event_entry._rowid_ ASC" - return values, s -} diff --git a/chain/events/filter/index_migrations.go b/chain/events/filter/index_migrations.go deleted file mode 100644 index bf8fd2f943c..00000000000 --- a/chain/events/filter/index_migrations.go +++ /dev/null @@ -1,260 +0,0 @@ -package filter - -import ( - "context" - "database/sql" - "errors" - "fmt" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/lib/sqlite" -) - -func migrationVersion2(db *sql.DB, chainStore *store.ChainStore) sqlite.MigrationFunc { - return func(ctx context.Context, tx *sql.Tx) error { - // create some temporary indices to help speed up the migration - _, err := tx.ExecContext(ctx, "CREATE INDEX IF NOT EXISTS tmp_height_tipset_key_cid ON event (height,tipset_key_cid)") - if err != nil { - return xerrors.Errorf("create index tmp_height_tipset_key_cid: %w", err) - } - _, err = tx.ExecContext(ctx, "CREATE INDEX IF NOT EXISTS tmp_tipset_key_cid ON event (tipset_key_cid)") - if err != nil { - return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err) - } - - stmtDeleteOffChainEvent, err := tx.PrepareContext(ctx, "DELETE FROM event WHERE tipset_key_cid!=? and height=?") - if err != nil { - return xerrors.Errorf("prepare stmtDeleteOffChainEvent: %w", err) - } - - stmtSelectEvent, err := tx.PrepareContext(ctx, "SELECT id FROM event WHERE tipset_key_cid=? ORDER BY message_index ASC, event_index ASC, id DESC LIMIT 1") - if err != nil { - return xerrors.Errorf("prepare stmtSelectEvent: %w", err) - } - - stmtDeleteEvent, err := tx.PrepareContext(ctx, "DELETE FROM event WHERE tipset_key_cid=? AND id= minHeight.Int64 { - if currTs.Height()%1000 == 0 { - log.Infof("Migrating height %d (remaining %d)", currTs.Height(), int64(currTs.Height())-minHeight.Int64) - } - - tsKey := currTs.Parents() - currTs, err = chainStore.GetTipSetFromKey(ctx, tsKey) - if err != nil { - return xerrors.Errorf("get tipset from key: %w", err) - } - log.Debugf("Migrating height %d", currTs.Height()) - - tsKeyCid, err := currTs.Key().Cid() - if err != nil { - return fmt.Errorf("tipset key cid: %w", err) - } - - // delete all events that are not in the canonical chain - _, err = stmtDeleteOffChainEvent.Exec(tsKeyCid.Bytes(), currTs.Height()) - if err != nil { - return xerrors.Errorf("delete off chain event: %w", err) - } - - // find the first eventId from the last time the tipset was applied - var eventId sql.NullInt64 - err = stmtSelectEvent.QueryRow(tsKeyCid.Bytes()).Scan(&eventId) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - continue - } - return xerrors.Errorf("select event: %w", err) - } - - // this tipset might not have any events which is ok - if !eventId.Valid { - continue - } - log.Debugf("Deleting all events with id < %d at height %d", eventId.Int64, currTs.Height()) - - res, err := stmtDeleteEvent.Exec(tsKeyCid.Bytes(), eventId.Int64) - if err != nil { - return xerrors.Errorf("delete event: %w", err) - } - - nrRowsAffected, err := res.RowsAffected() - if err != nil { - return xerrors.Errorf("rows affected: %w", err) - } - log.Debugf("deleted %d events from tipset %s", nrRowsAffected, tsKeyCid.String()) - } - - // delete all entries that have an event_id that doesn't exist (since we don't have a foreign - // key constraint that gives us cascading deletes) - res, err := tx.ExecContext(ctx, "DELETE FROM event_entry WHERE event_id NOT IN (SELECT id FROM event)") - if err != nil { - return xerrors.Errorf("delete event_entry: %w", err) - } - - nrRowsAffected, err := res.RowsAffected() - if err != nil { - return xerrors.Errorf("rows affected: %w", err) - } - log.Infof("Cleaned up %d entries that had deleted events\n", nrRowsAffected) - - // drop the temporary indices after the migration - _, err = tx.ExecContext(ctx, "DROP INDEX IF EXISTS tmp_tipset_key_cid") - if err != nil { - return xerrors.Errorf("drop index tmp_tipset_key_cid: %w", err) - } - _, err = tx.ExecContext(ctx, "DROP INDEX IF EXISTS tmp_height_tipset_key_cid") - if err != nil { - return xerrors.Errorf("drop index tmp_height_tipset_key_cid: %w", err) - } - - // original v2 migration introduced an index: - // CREATE INDEX IF NOT EXISTS height_tipset_key ON event (height,tipset_key) - // which has subsequently been removed in v4, so it's omitted here - - return nil - } -} - -// migrationVersion3 migrates the schema from version 2 to version 3 by creating two indices: -// 1) an index on the event.emitter_addr column, and 2) an index on the event_entry.key column. -// -// As of version 7, these indices have been removed as they were found to be a performance -// hindrance. This migration is now a no-op. -func migrationVersion3(ctx context.Context, tx *sql.Tx) error { - return nil -} - -// migrationVersion4 migrates the schema from version 3 to version 4 by adjusting indexes to match -// the query patterns of the event filter. -// -// First it drops indexes introduced in previous migrations: -// 1. the index on the event.height and event.tipset_key columns -// 2. the index on the event_entry.key column -// -// And then creating the following indices: -// 1. an index on the event.tipset_key_cid column -// 2. an index on the event.height column -// 3. an index on the event.reverted column (removed in version 7) -// 4. a composite index on the event_entry.indexed and event_entry.key columns (removed in version 7) -// 5. a composite index on the event_entry.codec and event_entry.value columns (removed in version 7) -// 6. an index on the event_entry.event_id column -// -// Indexes 3, 4, and 5 were removed in version 7 as they were found to be a performance hindrance so -// are omitted here. -func migrationVersion4(ctx context.Context, tx *sql.Tx) error { - for _, create := range []struct { - desc string - query string - }{ - {"drop index height_tipset_key", "DROP INDEX IF EXISTS height_tipset_key;"}, - {"drop index event_entry_key_index", "DROP INDEX IF EXISTS event_entry_key_index;"}, - {"create index event_tipset_key_cid", createIndexEventTipsetKeyCid}, - {"create index event_height", createIndexEventHeight}, - {"create index event_entry_event_id", createIndexEventEntryEventId}, - } { - if _, err := tx.ExecContext(ctx, create.query); err != nil { - return xerrors.Errorf("%s: %w", create.desc, err) - } - } - - return nil -} - -// migrationVersion5 migrates the schema from version 4 to version 5 by updating the event_index -// to be 0-indexed within a tipset. -func migrationVersion5(ctx context.Context, tx *sql.Tx) error { - stmtEventIndexUpdate, err := tx.PrepareContext(ctx, "UPDATE event SET event_index = (SELECT COUNT(*) FROM event e2 WHERE e2.tipset_key_cid = event.tipset_key_cid AND e2.id <= event.id) - 1") - if err != nil { - return xerrors.Errorf("prepare stmtEventIndexUpdate: %w", err) - } - - _, err = stmtEventIndexUpdate.ExecContext(ctx) - if err != nil { - return xerrors.Errorf("update event index: %w", err) - } - - return nil -} - -// migrationVersion6 migrates the schema from version 5 to version 6 by creating a new table -// events_seen that tracks the tipsets that have been seen by the event filter and populating it -// with the tipsets that have events in the event table. -func migrationVersion6(ctx context.Context, tx *sql.Tx) error { - stmtCreateTableEventsSeen, err := tx.PrepareContext(ctx, createTableEventsSeen) - if err != nil { - return xerrors.Errorf("prepare stmtCreateTableEventsSeen: %w", err) - } - _, err = stmtCreateTableEventsSeen.ExecContext(ctx) - if err != nil { - return xerrors.Errorf("create table events_seen: %w", err) - } - - _, err = tx.ExecContext(ctx, createIndexEventsSeenHeight) - if err != nil { - return xerrors.Errorf("create index events_seen_height: %w", err) - } - _, err = tx.ExecContext(ctx, createIndexEventsSeenTipsetKeyCid) - if err != nil { - return xerrors.Errorf("create index events_seen_tipset_key_cid: %w", err) - } - - // INSERT an entry in the events_seen table for all epochs we do have events for in our DB - _, err = tx.ExecContext(ctx, ` - INSERT OR IGNORE INTO events_seen (height, tipset_key_cid, reverted) - SELECT DISTINCT height, tipset_key_cid, reverted FROM event -`) - if err != nil { - return xerrors.Errorf("insert events into events_seen: %w", err) - } - - return nil -} - -// migrationVersion7 migrates the schema from version 6 to version 7 by dropping the following -// indices: -// 1. the index on the event.emitter_addr column -// 2. the index on the event.reverted column -// 3. the composite index on the event_entry.indexed and event_entry.key columns -// 4. the composite index on the event_entry.codec and event_entry.value columns -// -// These indices were found to be a performance hindrance as they prevent SQLite from using the -// intended initial indexes on height or tipset_key_cid in many query variations. Without additional -// indices to fall-back on, SQLite is forced to narrow down each query via height or tipset_key_cid -// which is the desired behavior. -func migrationVersion7(ctx context.Context, tx *sql.Tx) error { - for _, drop := range []struct { - desc string - query string - }{ - {"drop index event_emitter_addr", "DROP INDEX IF EXISTS event_emitter_addr;"}, - {"drop index event_reverted", "DROP INDEX IF EXISTS event_reverted;"}, - {"drop index event_entry_indexed_key", "DROP INDEX IF EXISTS event_entry_indexed_key;"}, - {"drop index event_entry_codec_value", "DROP INDEX IF EXISTS event_entry_codec_value;"}, - } { - if _, err := tx.ExecContext(ctx, drop.query); err != nil { - return xerrors.Errorf("%s: %w", drop.desc, err) - } - } - - return nil -} diff --git a/chain/events/filter/index_test.go b/chain/events/filter/index_test.go deleted file mode 100644 index 008b5697130..00000000000 --- a/chain/events/filter/index_test.go +++ /dev/null @@ -1,1046 +0,0 @@ -package filter - -import ( - "context" - pseudo "math/rand" - "os" - "path/filepath" - "regexp" - "strings" - "testing" - - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/types" -) - -func TestEventIndexPrefillFilter(t *testing.T) { - rng := pseudo.New(pseudo.NewSource(299792458)) - a1 := randomF4Addr(t, rng) - a2 := randomF4Addr(t, rng) - - a1ID := abi.ActorID(1) - a2ID := abi.ActorID(2) - - addrMap := addressMap{} - addrMap.add(a1ID, a1) - addrMap.add(a2ID, a2) - - ev1 := fakeEvent( - a1ID, - []kv{ - {k: "type", v: []byte("approval")}, - {k: "signer", v: []byte("addr1")}, - }, - []kv{ - {k: "amount", v: []byte("2988181")}, - }, - ) - - st := newStore() - events := []*types.Event{ev1} - em := executedMessage{ - msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)), - rct: fakeReceipt(t, rng, st, events), - evs: events, - } - - events14000 := buildTipSetEvents(t, rng, 14000, em) - cid14000, err := events14000.msgTs.Key().Cid() - require.NoError(t, err, "tipset cid") - - noCollectedEvents := []*CollectedEvent{} - oneCollectedEvent := []*CollectedEvent{ - { - Entries: ev1.Entries, - EmitterAddr: a1, - EventIdx: 0, - Reverted: false, - Height: 14000, - TipSetKey: events14000.msgTs.Key(), - MsgIdx: 0, - MsgCid: em.msg.Cid(), - }, - } - - workDir, err := os.MkdirTemp("", "lotusevents") - require.NoError(t, err, "create temporary work directory") - - defer func() { - _ = os.RemoveAll(workDir) - }() - t.Logf("using work dir %q", workDir) - - dbPath := filepath.Join(workDir, "actorevents.db") - - ei, err := NewEventIndex(context.Background(), dbPath, nil) - require.NoError(t, err, "create event index") - - subCh, unSubscribe := ei.SubscribeUpdates() - defer unSubscribe() - - out := make(chan EventIndexUpdated, 1) - go func() { - tu := <-subCh - out <- tu - }() - - if err := ei.CollectEvents(context.Background(), events14000, false, addrMap.ResolveAddress); err != nil { - require.NoError(t, err, "collect events") - } - - mh, err := ei.GetMaxHeightInIndex(context.Background()) - require.NoError(t, err) - require.Equal(t, uint64(14000), mh) - - b, err := ei.IsHeightPast(context.Background(), 14000) - require.NoError(t, err) - require.True(t, b) - - b, err = ei.IsHeightPast(context.Background(), 14001) - require.NoError(t, err) - require.False(t, b) - - b, err = ei.IsHeightPast(context.Background(), 13000) - require.NoError(t, err) - require.True(t, b) - - tsKey := events14000.msgTs.Key() - tsKeyCid, err := tsKey.Cid() - require.NoError(t, err, "tipset key cid") - - seen, err := ei.IsTipsetProcessed(context.Background(), tsKeyCid.Bytes()) - require.NoError(t, err) - require.True(t, seen, "tipset key should be seen") - - seen, err = ei.IsTipsetProcessed(context.Background(), []byte{1}) - require.NoError(t, err) - require.False(t, seen, "tipset key should not be seen") - - _ = <-out - - testCases := []struct { - name string - filter *eventFilter - te *TipSetEvents - want []*CollectedEvent - }{ - { - name: "nomatch tipset min height", - filter: &eventFilter{ - minHeight: 14001, - maxHeight: -1, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch tipset max height", - filter: &eventFilter{ - minHeight: -1, - maxHeight: 13999, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match tipset min height", - filter: &eventFilter{ - minHeight: 14000, - maxHeight: -1, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match tipset cid", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - tipsetCid: cid14000, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "nomatch address", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a2}, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match address", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a1}, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry with alternate values", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - []byte("approval"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "nomatch one entry by missing value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry by missing key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "method": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match one entry with multiple keys", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "nomatch one entry with one mismatching key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "approver": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one mismatching value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr2"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one unindexed key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "amount": { - []byte("2988181"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - } - - for _, tc := range testCases { - tc := tc // appease lint - t.Run(tc.name, func(t *testing.T) { - if err := ei.prefillFilter(context.Background(), tc.filter, false); err != nil { - require.NoError(t, err, "prefill filter events") - } - - coll := tc.filter.TakeCollectedEvents(context.Background()) - require.ElementsMatch(t, coll, tc.want) - }) - } -} - -func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { - rng := pseudo.New(pseudo.NewSource(299792458)) - a1 := randomF4Addr(t, rng) - a2 := randomF4Addr(t, rng) - a3 := randomF4Addr(t, rng) - - a1ID := abi.ActorID(1) - a2ID := abi.ActorID(2) - - addrMap := addressMap{} - addrMap.add(a1ID, a1) - addrMap.add(a2ID, a2) - - ev1 := fakeEvent( - a1ID, - []kv{ - {k: "type", v: []byte("approval")}, - {k: "signer", v: []byte("addr1")}, - }, - []kv{ - {k: "amount", v: []byte("2988181")}, - }, - ) - ev2 := fakeEvent( - a2ID, - []kv{ - {k: "type", v: []byte("approval")}, - {k: "signer", v: []byte("addr2")}, - }, - []kv{ - {k: "amount", v: []byte("2988182")}, - }, - ) - - st := newStore() - events := []*types.Event{ev1} - revertedEvents := []*types.Event{ev2} - em := executedMessage{ - msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)), - rct: fakeReceipt(t, rng, st, events), - evs: events, - } - revertedEm := executedMessage{ - msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)), - rct: fakeReceipt(t, rng, st, revertedEvents), - evs: revertedEvents, - } - - events14000 := buildTipSetEvents(t, rng, 14000, em) - revertedEvents14000 := buildTipSetEvents(t, rng, 14000, revertedEm) - cid14000, err := events14000.msgTs.Key().Cid() - require.NoError(t, err, "tipset cid") - reveredCID14000, err := revertedEvents14000.msgTs.Key().Cid() - require.NoError(t, err, "tipset cid") - - noCollectedEvents := []*CollectedEvent{} - oneCollectedEvent := []*CollectedEvent{ - { - Entries: ev1.Entries, - EmitterAddr: a1, - EventIdx: 0, - Reverted: false, - Height: 14000, - TipSetKey: events14000.msgTs.Key(), - MsgIdx: 0, - MsgCid: em.msg.Cid(), - }, - } - twoCollectedEvent := []*CollectedEvent{ - { - Entries: ev1.Entries, - EmitterAddr: a1, - EventIdx: 0, - Reverted: false, - Height: 14000, - TipSetKey: events14000.msgTs.Key(), - MsgIdx: 0, - MsgCid: em.msg.Cid(), - }, - { - Entries: ev2.Entries, - EmitterAddr: a2, - EventIdx: 0, - Reverted: true, - Height: 14000, - TipSetKey: revertedEvents14000.msgTs.Key(), - MsgIdx: 0, - MsgCid: revertedEm.msg.Cid(), - }, - } - oneCollectedRevertedEvent := []*CollectedEvent{ - { - Entries: ev2.Entries, - EmitterAddr: a2, - EventIdx: 0, - Reverted: true, - Height: 14000, - TipSetKey: revertedEvents14000.msgTs.Key(), - MsgIdx: 0, - MsgCid: revertedEm.msg.Cid(), - }, - } - - workDir, err := os.MkdirTemp("", "lotusevents") - require.NoError(t, err, "create temporary work directory") - - defer func() { - _ = os.RemoveAll(workDir) - }() - t.Logf("using work dir %q", workDir) - - dbPath := filepath.Join(workDir, "actorevents.db") - - ei, err := NewEventIndex(context.Background(), dbPath, nil) - require.NoError(t, err, "create event index") - - tCh := make(chan EventIndexUpdated, 3) - subCh, unSubscribe := ei.SubscribeUpdates() - defer unSubscribe() - go func() { - cnt := 0 - for tu := range subCh { - tCh <- tu - cnt++ - if cnt == 3 { - close(tCh) - return - } - } - }() - - if err := ei.CollectEvents(context.Background(), revertedEvents14000, false, addrMap.ResolveAddress); err != nil { - require.NoError(t, err, "collect reverted events") - } - if err := ei.CollectEvents(context.Background(), revertedEvents14000, true, addrMap.ResolveAddress); err != nil { - require.NoError(t, err, "revert reverted events") - } - if err := ei.CollectEvents(context.Background(), events14000, false, addrMap.ResolveAddress); err != nil { - require.NoError(t, err, "collect events") - } - - _ = <-tCh - _ = <-tCh - _ = <-tCh - - inclusiveTestCases := []struct { - name string - filter *eventFilter - te *TipSetEvents - want []*CollectedEvent - }{ - { - name: "nomatch tipset min height", - filter: &eventFilter{ - minHeight: 14001, - maxHeight: -1, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch tipset max height", - filter: &eventFilter{ - minHeight: -1, - maxHeight: 13999, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match tipset min height", - filter: &eventFilter{ - minHeight: 14000, - maxHeight: -1, - }, - te: events14000, - want: twoCollectedEvent, - }, - { - name: "match tipset cid", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - tipsetCid: cid14000, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match tipset cid", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - tipsetCid: reveredCID14000, - }, - te: revertedEvents14000, - want: oneCollectedRevertedEvent, - }, - { - name: "nomatch address", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a3}, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match address 2", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a2}, - }, - te: revertedEvents14000, - want: oneCollectedRevertedEvent, - }, - { - name: "match address 1", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a1}, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: twoCollectedEvent, - }, - { - name: "match one entry with alternate values", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - []byte("approval"), - }, - }), - }, - te: events14000, - want: twoCollectedEvent, - }, - { - name: "nomatch one entry by missing value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry by missing key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "method": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match one entry with multiple keys", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry with multiple keys", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr2"), - }, - }), - }, - te: revertedEvents14000, - want: oneCollectedRevertedEvent, - }, - { - name: "nomatch one entry with one mismatching key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "approver": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one mismatching value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr3"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one unindexed key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "amount": { - []byte("2988181"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one unindexed key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "amount": { - []byte("2988182"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - } - - exclusiveTestCases := []struct { - name string - filter *eventFilter - te *TipSetEvents - want []*CollectedEvent - }{ - { - name: "nomatch tipset min height", - filter: &eventFilter{ - minHeight: 14001, - maxHeight: -1, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch tipset max height", - filter: &eventFilter{ - minHeight: -1, - maxHeight: 13999, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match tipset min height", - filter: &eventFilter{ - minHeight: 14000, - maxHeight: -1, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match tipset cid", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - tipsetCid: cid14000, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match tipset cid but reverted", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - tipsetCid: reveredCID14000, - }, - te: revertedEvents14000, - want: noCollectedEvents, - }, - { - name: "nomatch address", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a3}, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch address 2 but reverted", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a2}, - }, - te: revertedEvents14000, - want: noCollectedEvents, - }, - { - name: "match address", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a1}, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry with alternate values", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - []byte("approval"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "nomatch one entry by missing value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry by missing key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "method": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match one entry with multiple keys", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "nomatch one entry with one mismatching key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "approver": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with matching reverted value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr2"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one mismatching value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr3"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one unindexed key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "amount": { - []byte("2988181"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - } - - for _, tc := range inclusiveTestCases { - tc := tc // appease lint - t.Run(tc.name, func(t *testing.T) { - if err := ei.prefillFilter(context.Background(), tc.filter, false); err != nil { - require.NoError(t, err, "prefill filter events") - } - - coll := tc.filter.TakeCollectedEvents(context.Background()) - require.ElementsMatch(t, coll, tc.want, tc.name) - }) - } - - for _, tc := range exclusiveTestCases { - tc := tc // appease lint - t.Run(tc.name, func(t *testing.T) { - if err := ei.prefillFilter(context.Background(), tc.filter, true); err != nil { - require.NoError(t, err, "prefill filter events") - } - - coll := tc.filter.TakeCollectedEvents(context.Background()) - require.ElementsMatch(t, coll, tc.want, tc.name) - }) - } -} - -// TestQueryPlan is to ensure that future modifications to the db schema, or future upgrades to -// sqlite, do not change the query plan of the prepared statements used by the event index such that -// queries hit undesirable indexes which are likely to slow down the query. -// Changes that break this test need to be sure that the query plan is still efficient for the -// expected query patterns. -func TestQueryPlan(t *testing.T) { - ei, err := NewEventIndex(context.Background(), filepath.Join(t.TempDir(), "actorevents.db"), nil) - require.NoError(t, err, "create event index") - - verifyQueryPlan := func(stmt string) { - rows, err := ei.db.Query("EXPLAIN QUERY PLAN " + strings.Replace(stmt, "?", "1", -1)) - require.NoError(t, err, "explain query plan for query: "+stmt) - defer func() { - require.NoError(t, rows.Close()) - }() - // First response to EXPLAIN QUERY PLAN should show us the use of an index that we want to - // encounter first to narrow down the search space - either a height or tipset_key_cid index - // - sqlite_autoindex_events_seen_1 is for the UNIQUE constraint on events_seen - // - events_seen_height and events_seen_tipset_key_cid are explicit indexes on events_seen - // - event_height and event_tipset_key_cid are explicit indexes on event - rows.Next() - var id, parent, notused, detail string - require.NoError(t, rows.Scan(&id, &parent, ¬used, &detail), "scan explain query plan for query: "+stmt) - detail = strings.TrimSpace(detail) - var expectedIndexes = []string{ - "sqlite_autoindex_events_seen_1", - "events_seen_height", - "events_seen_tipset_key_cid", - "event_height", - "event_tipset_key_cid", - } - indexUsed := false - for _, index := range expectedIndexes { - if strings.Contains(detail, " INDEX "+index) { - indexUsed = true - break - } - } - require.True(t, indexUsed, "index used for query: "+stmt+" detail: "+detail) - - stmt = regexp.MustCompile(`(?m)^\s+`).ReplaceAllString(stmt, " ") // remove all leading whitespace from the statement - stmt = strings.Replace(stmt, "\n", "", -1) // remove all newlines from the statement - t.Logf("[%s] has plan start: %s", stmt, detail) - } - - // Test the hard-coded select and update queries - stmtMap := preparedStatementMapping(&preparedStatements{}) - for _, stmt := range stmtMap { - if strings.HasPrefix(strings.TrimSpace(strings.ToLower(stmt)), "insert") { - continue - } - verifyQueryPlan(stmt) - } - - // Test the dynamic prefillFilter queries - prefillCases := []*eventFilter{ - {}, - {minHeight: 14000, maxHeight: 14000}, - {minHeight: 14000, maxHeight: 15000}, - {tipsetCid: cid.MustParse("bafkqaaa")}, - {minHeight: 14000, maxHeight: 14000, addresses: []address.Address{address.TestAddress}}, - {minHeight: 14000, maxHeight: 15000, addresses: []address.Address{address.TestAddress}}, - {tipsetCid: cid.MustParse("bafkqaaa"), addresses: []address.Address{address.TestAddress}}, - {minHeight: 14000, maxHeight: 14000, addresses: []address.Address{address.TestAddress, address.TestAddress}}, - {minHeight: 14000, maxHeight: 15000, addresses: []address.Address{address.TestAddress, address.TestAddress}}, - {tipsetCid: cid.MustParse("bafkqaaa"), addresses: []address.Address{address.TestAddress, address.TestAddress}}, - {minHeight: 14000, maxHeight: 14000, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}})}, - {minHeight: 14000, maxHeight: 15000, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}})}, - {tipsetCid: cid.MustParse("bafkqaaa"), keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}})}, - {minHeight: 14000, maxHeight: 14000, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - {minHeight: 14000, maxHeight: 15000, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - {tipsetCid: cid.MustParse("bafkqaaa"), keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - {minHeight: 14000, maxHeight: 14000, addresses: []address.Address{address.TestAddress, address.TestAddress}, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - {minHeight: 14000, maxHeight: 15000, addresses: []address.Address{address.TestAddress, address.TestAddress}, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - {tipsetCid: cid.MustParse("bafkqaaa"), addresses: []address.Address{address.TestAddress, address.TestAddress}, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - } - for _, filter := range prefillCases { - _, query := makePrefillFilterQuery(filter, true) - verifyQueryPlan(query) - _, query = makePrefillFilterQuery(filter, false) - verifyQueryPlan(query) - } -} diff --git a/chain/events/observer.go b/chain/events/observer.go index 0b021f9965b..896440eacbc 100644 --- a/chain/events/observer.go +++ b/chain/events/observer.go @@ -157,13 +157,14 @@ func (o *observer) applyChanges(ctx context.Context, changes []*api.HeadChange) } func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) error { + o.lk.Lock() + defer o.lk.Unlock() + ctx, span := trace.StartSpan(ctx, "events.HeadChange") span.AddAttributes(trace.Int64Attribute("reverts", int64(len(rev)))) span.AddAttributes(trace.Int64Attribute("applies", int64(len(app)))) - o.lk.Lock() head := o.head - o.lk.Unlock() defer func() { span.AddAttributes(trace.Int64Attribute("endHeight", int64(head.Height()))) @@ -199,14 +200,12 @@ func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) err // 1. We need to get the observers every time in case some registered/deregistered. // 2. We need to atomically set the head so new observers don't see events twice or // skip them. - o.lk.Lock() - observers := o.observers + o.head = to - o.lk.Unlock() - for _, obs := range observers { + for _, obs := range o.observers { if err := obs.Revert(ctx, from, to); err != nil { - log.Errorf("observer %T failed to apply tipset %s (%d) with: %s", obs, from.Key(), from.Height(), err) + log.Errorf("observer %T failed to revert tipset %s (%d) with: %s", obs, from.Key(), from.Height(), err) } } @@ -225,14 +224,11 @@ func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) err ) } - o.lk.Lock() - observers := o.observers o.head = to - o.lk.Unlock() - for _, obs := range observers { + for _, obs := range o.observers { if err := obs.Apply(ctx, head, to); err != nil { - log.Errorf("observer %T failed to revert tipset %s (%d) with: %s", obs, to.Key(), to.Height(), err) + log.Errorf("observer %T failed to apply tipset %s (%d) with: %s", obs, to.Key(), to.Height(), err) } } if to.Height() > o.maxHeight { @@ -244,6 +240,41 @@ func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) err return nil } +// ObserveAndBlock registers the observer and returns the current tipset along with an unlock function. +// +// This method guarantees that the observer will receive tipset updates starting from the returned tipset. +// It blocks all tipset updates for all clients until the returned unlock function is called. +// +// The typical usage pattern is: +// 1. Call ObserveAndBlock to register the observer +// 2. Perform any necessary initialization using the returned current tipset +// 3. Call the unlock function to start receiving updates +// +// Important notes: +// - This method should only be called after the observer has been started +// - The unlock function must be called to prevent blocking of tipset updates for all registered observers +// - This method returns an error if the observer hasn't started yet +// +// Returns: +// - *types.TipSet: The current tipset at the time of registration +// - func(): An unlock function that must be called to start receiving updates +// - error: An error if the observer hasn't started yet +func (o *observer) ObserveAndBlock(obs TipSetObserver) (*types.TipSet, func(), error) { + o.lk.Lock() + currentHead := o.head + if currentHead == nil { + o.lk.Unlock() + return nil, func() {}, xerrors.New("observer not started") + } + + o.observers = append(o.observers, obs) + unlockHandle := func() { + o.lk.Unlock() + } + + return currentHead, unlockHandle, nil +} + // Observe registers the observer, and returns the current tipset. The observer is guaranteed to // observe events starting at this tipset. // diff --git a/chain/gen/gen.go b/chain/gen/gen.go index 753e0824bd4..6747ff4098a 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -35,7 +35,6 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/proofs" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/rand" @@ -262,7 +261,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS //return nil, xerrors.Errorf("creating drand beacon: %w", err) //} - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds, nil) if err != nil { return nil, xerrors.Errorf("initing stmgr: %w", err) } diff --git a/chain/index/api.go b/chain/index/api.go new file mode 100644 index 00000000000..1bb19de7a67 --- /dev/null +++ b/chain/index/api.go @@ -0,0 +1,444 @@ +package index + +import ( + "context" + "database/sql" + "errors" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + ipld "github.com/ipfs/go-ipld-format" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + amt4 "github.com/filecoin-project/go-amt-ipld/v4" + "github.com/filecoin-project/go-state-types/abi" + + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/types" +) + +var ErrChainForked = xerrors.New("chain forked") + +func (si *SqliteIndexer) ChainValidateIndex(ctx context.Context, epoch abi.ChainEpoch, backfill bool) (*types.IndexValidation, error) { + // return an error if the indexer is not started + if !si.started { + return nil, errors.New("ChainValidateIndex called before indexer start") + } + + // return an error if the indexer is closed + if si.isClosed() { + return nil, errors.New("ChainValidateIndex called on closed indexer") + } + + // this API only works for epoch < head because of deferred execution in Filecoin + head := si.cs.GetHeaviestTipSet() + if epoch >= head.Height() { + return nil, xerrors.Errorf("cannot validate index at epoch %d, can only validate at an epoch less than chain head epoch %d", epoch, head.Height()) + } + + // fetch the tipset at the given epoch on the canonical chain + expectedTs, err := si.cs.GetTipsetByHeight(ctx, epoch, head, true) + if err != nil { + return nil, xerrors.Errorf("failed to get tipset at height %d: %w", epoch, err) + } + + // we need to take a write lock here so that back-filling does not race with real-time chain indexing + if backfill { + si.writerLk.Lock() + defer si.writerLk.Unlock() + } + + var isIndexEmpty bool + if err := si.stmts.isIndexEmptyStmt.QueryRowContext(ctx).Scan(&isIndexEmpty); err != nil { + return nil, xerrors.Errorf("failed to check if index is empty: %w", err) + } + + // Canonical chain has a null round at the epoch -> return if index is empty otherwise validate that index also + // has a null round at this epoch i.e. it does not have anything indexed at all for this epoch + if expectedTs.Height() != epoch { + if isIndexEmpty { + return &types.IndexValidation{ + Height: epoch, + IsNullRound: true, + }, nil + } + // validate the db has a hole here and error if not, we don't attempt to repair because something must be very wrong for this to fail + return si.validateIsNullRound(ctx, epoch) + } + + // if the index is empty -> short-circuit and simply backfill if applicable + if isIndexEmpty { + if !backfill { + return nil, makeBackfillRequiredErr(epoch) + } + return si.backfillMissingTipset(ctx, expectedTs) + } + // see if the tipset at this epoch is already indexed or if we need to backfill + revertedCount, nonRevertedCount, err := si.getTipsetCountsAtHeight(ctx, epoch) + if err != nil { + if err == sql.ErrNoRows { + if !backfill { + return nil, makeBackfillRequiredErr(epoch) + } + return si.backfillMissingTipset(ctx, expectedTs) + } + return nil, xerrors.Errorf("failed to get tipset counts at height %d: %w", epoch, err) + } + + switch { + case revertedCount == 0 && nonRevertedCount == 0: + // no tipsets at this epoch in the index, backfill + if !backfill { + return nil, makeBackfillRequiredErr(epoch) + } + return si.backfillMissingTipset(ctx, expectedTs) + + case revertedCount > 0 && nonRevertedCount == 0: + return nil, xerrors.Errorf("index corruption: height %d only has reverted tipsets", epoch) + + case nonRevertedCount > 1: + return nil, xerrors.Errorf("index corruption: height %d has multiple non-reverted tipsets", epoch) + } + + // fetch the non-reverted tipset at this epoch + var indexedTsKeyCidBytes []byte + err = si.stmts.getNonRevertedTipsetAtHeightStmt.QueryRowContext(ctx, epoch).Scan(&indexedTsKeyCidBytes) + if err != nil { + return nil, xerrors.Errorf("failed to get non-reverted tipset at height %d: %w", epoch, err) + } + + indexedTsKeyCid, err := cid.Cast(indexedTsKeyCidBytes) + if err != nil { + return nil, xerrors.Errorf("failed to cast tipset key cid: %w", err) + } + expectedTsKeyCid, err := expectedTs.Key().Cid() + if err != nil { + return nil, xerrors.Errorf("failed to get tipset key cid: %w", err) + } + if !indexedTsKeyCid.Equals(expectedTsKeyCid) { + return nil, xerrors.Errorf("index corruption: indexed tipset at height %d has key %s, but canonical chain has %s", epoch, indexedTsKeyCid, expectedTsKeyCid) + } + + getAndVerifyIndexedData := func() (*indexedTipSetData, error) { + indexedData, err := si.getIndexedTipSetData(ctx, expectedTs) + if err != nil { + return nil, xerrors.Errorf("failed to get indexed data for tipset at height %d: %w", expectedTs.Height(), err) + } + if indexedData == nil { + return nil, xerrors.Errorf("nil indexed data for tipset at height %d", expectedTs.Height()) + } + if err = si.verifyIndexedData(ctx, expectedTs, indexedData); err != nil { + return nil, err + } + return indexedData, nil + } + + indexedData, err := getAndVerifyIndexedData() + var bf bool + if err != nil { + if !backfill { + return nil, xerrors.Errorf("failed to verify indexed data at height %d: %w", expectedTs.Height(), err) + } + + log.Warnf("failed to verify indexed data at height %d; err:%s; backfilling once and validating again", expectedTs.Height(), err) + if _, err := si.backfillMissingTipset(ctx, expectedTs); err != nil { + return nil, xerrors.Errorf("failed to backfill missing tipset at height %d during validation; err: %w", expectedTs.Height(), err) + } + + indexedData, err = getAndVerifyIndexedData() + if err != nil { + return nil, xerrors.Errorf("failed to verify indexed data at height %d after backfill: %w", expectedTs.Height(), err) + } + bf = true + } + + return &types.IndexValidation{ + TipSetKey: expectedTs.Key(), + Height: expectedTs.Height(), + IndexedMessagesCount: indexedData.nonRevertedMessageCount, + IndexedEventsCount: indexedData.nonRevertedEventCount, + IndexedEventEntriesCount: indexedData.nonRevertedEventEntriesCount, + Backfilled: bf, + }, nil +} + +func (si *SqliteIndexer) validateIsNullRound(ctx context.Context, epoch abi.ChainEpoch) (*types.IndexValidation, error) { + // make sure we do not have tipset(reverted or non-reverted) indexed at this epoch + var isNullRound bool + err := si.stmts.hasNullRoundAtHeightStmt.QueryRowContext(ctx, epoch).Scan(&isNullRound) + if err != nil { + return nil, xerrors.Errorf("failed to check if null round exists at height %d: %w", epoch, err) + } + if !isNullRound { + return nil, xerrors.Errorf("index corruption: height %d should be a null round but is not", epoch) + } + + return &types.IndexValidation{ + Height: epoch, + IsNullRound: true, + }, nil +} + +func (si *SqliteIndexer) getTipsetCountsAtHeight(ctx context.Context, height abi.ChainEpoch) (revertedCount, nonRevertedCount int, err error) { + err = si.stmts.countTipsetsAtHeightStmt.QueryRowContext(ctx, height).Scan(&revertedCount, &nonRevertedCount) + if err != nil { + if err == sql.ErrNoRows { + // No tipsets found at this height + return 0, 0, nil + } + return 0, 0, xerrors.Errorf("failed to query tipset counts at height %d: %w", height, err) + } + + return revertedCount, nonRevertedCount, nil +} + +type indexedTipSetData struct { + nonRevertedMessageCount uint64 + nonRevertedEventCount uint64 + nonRevertedEventEntriesCount uint64 +} + +// getIndexedTipSetData fetches the indexed tipset data for a tipset +func (si *SqliteIndexer) getIndexedTipSetData(ctx context.Context, ts *types.TipSet) (*indexedTipSetData, error) { + tsKeyCidBytes, err := toTipsetKeyCidBytes(ts) + if err != nil { + return nil, xerrors.Errorf("failed to get tipset key cid: %w", err) + } + + var data indexedTipSetData + err = withTx(ctx, si.db, func(tx *sql.Tx) error { + if err = tx.Stmt(si.stmts.getNonRevertedTipsetMessageCountStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&data.nonRevertedMessageCount); err != nil { + return xerrors.Errorf("failed to query non reverted message count: %w", err) + } + + if err = tx.Stmt(si.stmts.getNonRevertedTipsetEventCountStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&data.nonRevertedEventCount); err != nil { + return xerrors.Errorf("failed to query non reverted event count: %w", err) + } + + if err = tx.Stmt(si.stmts.getNonRevertedTipsetEventEntriesCountStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&data.nonRevertedEventEntriesCount); err != nil { + return xerrors.Errorf("failed to query non reverted event entries count: %w", err) + } + + return nil + }) + + return &data, err +} + +// verifyIndexedData verifies that the indexed data for a tipset is correct +// by comparing the number of messages and events in the chainstore to the number of messages and events indexed. +// +// Notes: +// +// - Events are loaded from the executed messages of the tipset at the next epoch (ts.Height() + 1). +// - This is not a comprehensive verification because we only compare counts, assuming that a match +// means that the entries are correct. A future iteration may compare message and event details to +// confirm that they are what is expected. +func (si *SqliteIndexer) verifyIndexedData(ctx context.Context, ts *types.TipSet, indexedData *indexedTipSetData) (err error) { + tsKeyCid, err := ts.Key().Cid() + if err != nil { + return xerrors.Errorf("failed to get tipset key cid at height %d: %w", ts.Height(), err) + } + + executionTs, err := si.getNextTipset(ctx, ts) + if err != nil { + return xerrors.Errorf("failed to get next tipset for height %d: %w", ts.Height(), err) + } + + // given that `ts` is on the canonical chain and `executionTs` is the next tipset in the chain + // `ts` can not have reverted events + var hasRevertedEventsInTipset bool + err = si.stmts.hasRevertedEventsInTipsetStmt.QueryRowContext(ctx, tsKeyCid.Bytes()).Scan(&hasRevertedEventsInTipset) + if err != nil { + return xerrors.Errorf("failed to check if there are reverted events in tipset for height %d: %w", ts.Height(), err) + } + if hasRevertedEventsInTipset { + return xerrors.Errorf("index corruption: reverted events found for an executed tipset %s at height %d", tsKeyCid, ts.Height()) + } + + executedMsgs, err := si.executedMessagesLoaderFunc(ctx, si.cs, ts, executionTs) + if err != nil { + return xerrors.Errorf("failed to load executed messages for height %d: %w", ts.Height(), err) + } + + var ( + totalEventsCount = uint64(0) + totalEventEntriesCount = uint64(0) + ) + for _, emsg := range executedMsgs { + totalEventsCount += uint64(len(emsg.evs)) + for _, ev := range emsg.evs { + totalEventEntriesCount += uint64(len(ev.Entries)) + } + } + + if totalEventsCount != indexedData.nonRevertedEventCount { + return xerrors.Errorf("event count mismatch for height %d: chainstore has %d, index has %d", ts.Height(), totalEventsCount, indexedData.nonRevertedEventCount) + } + + totalExecutedMsgCount := uint64(len(executedMsgs)) + if totalExecutedMsgCount != indexedData.nonRevertedMessageCount { + return xerrors.Errorf("message count mismatch for height %d: chainstore has %d, index has %d", ts.Height(), totalExecutedMsgCount, indexedData.nonRevertedMessageCount) + } + + if indexedData.nonRevertedEventEntriesCount != totalEventEntriesCount { + return xerrors.Errorf("event entries count mismatch for height %d: chainstore has %d, index has %d", ts.Height(), totalEventEntriesCount, indexedData.nonRevertedEventEntriesCount) + } + + // compare the events AMT root between the indexed events and the events in the chain state + for _, emsg := range executedMsgs { + indexedRoot, hasEvents, err := si.amtRootForEvents(ctx, tsKeyCid, emsg.msg.Cid()) + if err != nil { + return xerrors.Errorf("failed to generate AMT root for indexed events of message %s at height %d: %w", emsg.msg.Cid(), ts.Height(), err) + } + + if !hasEvents && emsg.rct.EventsRoot == nil { + // No events in index and no events in receipt, this is fine + continue + } + + if hasEvents && emsg.rct.EventsRoot == nil { + return xerrors.Errorf("index corruption: events found in index for message %s at height %d, but message receipt has no events root", emsg.msg.Cid(), ts.Height()) + } + + if !hasEvents && emsg.rct.EventsRoot != nil { + return xerrors.Errorf("index corruption: no events found in index for message %s at height %d, but message receipt has events root %s", emsg.msg.Cid(), ts.Height(), emsg.rct.EventsRoot) + } + + // Both index and receipt have events, compare the roots + if !indexedRoot.Equals(*emsg.rct.EventsRoot) { + return xerrors.Errorf("index corruption: events AMT root mismatch for message %s at height %d. Index root: %s, Receipt root: %s", emsg.msg.Cid(), ts.Height(), indexedRoot, emsg.rct.EventsRoot) + } + } + + return nil +} + +func (si *SqliteIndexer) backfillMissingTipset(ctx context.Context, ts *types.TipSet) (*types.IndexValidation, error) { + executionTs, err := si.getNextTipset(ctx, ts) + if err != nil { + return nil, xerrors.Errorf("failed to get next tipset at height %d: %w", ts.Height(), err) + } + + backfillFunc := func() error { + return withTx(ctx, si.db, func(tx *sql.Tx) error { + return si.indexTipsetWithParentEvents(ctx, tx, ts, executionTs) + }) + } + + if err := backfillFunc(); err != nil { + if ipld.IsNotFound(err) { + return nil, xerrors.Errorf("failed to backfill tipset at epoch %d: chain store does not contain data: %w", ts.Height(), err) + } + if ctx.Err() != nil { + log.Errorf("failed to backfill tipset at epoch %d due to context cancellation: %s", ts.Height(), err) + } + return nil, xerrors.Errorf("failed to backfill tipset at epoch %d; err: %w", ts.Height(), err) + } + + indexedData, err := si.getIndexedTipSetData(ctx, ts) + if err != nil { + return nil, xerrors.Errorf("failed to get indexed tipset data: %w", err) + } + + return &types.IndexValidation{ + TipSetKey: ts.Key(), + Height: ts.Height(), + Backfilled: true, + IndexedMessagesCount: indexedData.nonRevertedMessageCount, + IndexedEventsCount: indexedData.nonRevertedEventCount, + IndexedEventEntriesCount: indexedData.nonRevertedEventEntriesCount, + }, nil +} + +func (si *SqliteIndexer) getNextTipset(ctx context.Context, ts *types.TipSet) (*types.TipSet, error) { + nextEpochTs, err := si.cs.GetTipsetByHeight(ctx, ts.Height()+1, nil, false) + if err != nil { + return nil, xerrors.Errorf("failed to get tipset at height %d: %w", ts.Height()+1, err) + } + + if nextEpochTs.Parents() != ts.Key() { + return nil, xerrors.Errorf("chain forked at height %d; please retry your request; err: %w", ts.Height(), ErrChainForked) + } + + return nextEpochTs, nil +} + +func makeBackfillRequiredErr(height abi.ChainEpoch) error { + return xerrors.Errorf("missing tipset at height %d in the chain index, set backfill flag to true to fix", height) +} + +// amtRootForEvents generates the events AMT root CID for a given message's events, and returns +// whether the message has events and a fatal error if one occurred. +func (si *SqliteIndexer) amtRootForEvents( + ctx context.Context, + tsKeyCid cid.Cid, + msgCid cid.Cid, +) (cid.Cid, bool, error) { + events := make([]cbg.CBORMarshaler, 0) + + err := withTx(ctx, si.db, func(tx *sql.Tx) error { + rows, err := tx.Stmt(si.stmts.getEventIdAndEmitterIdStmt).QueryContext(ctx, tsKeyCid.Bytes(), msgCid.Bytes()) + if err != nil { + return xerrors.Errorf("failed to query events: %w", err) + } + defer func() { + _ = rows.Close() + }() + + for rows.Next() { + var eventId int + var actorId int64 + if err := rows.Scan(&eventId, &actorId); err != nil { + return xerrors.Errorf("failed to scan row: %w", err) + } + + event := types.Event{ + Emitter: abi.ActorID(actorId), + Entries: make([]types.EventEntry, 0), + } + + rows2, err := tx.Stmt(si.stmts.getEventEntriesStmt).QueryContext(ctx, eventId) + if err != nil { + return xerrors.Errorf("failed to query event entries: %w", err) + } + defer func() { + _ = rows2.Close() + }() + + for rows2.Next() { + var flags []byte + var key string + var codec uint64 + var value []byte + if err := rows2.Scan(&flags, &key, &codec, &value); err != nil { + return xerrors.Errorf("failed to scan row: %w", err) + } + entry := types.EventEntry{ + Flags: flags[0], + Key: key, + Codec: codec, + Value: value, + } + event.Entries = append(event.Entries, entry) + } + + events = append(events, &event) + } + + return nil + }) + + if err != nil { + return cid.Undef, false, xerrors.Errorf("failed to retrieve events for message %s in tipset %s: %w", msgCid, tsKeyCid, err) + } + + // construct the AMT from our slice to an in-memory IPLD store just so we can get the root, + // we don't need the blocks themselves + root, err := amt4.FromArray(ctx, cbor.NewCborStore(bstore.NewMemory()), events, amt4.UseTreeBitWidth(types.EventAMTBitwidth)) + if err != nil { + return cid.Undef, false, xerrors.Errorf("failed to create AMT: %w", err) + } + return root, len(events) > 0, nil +} diff --git a/chain/index/api_test.go b/chain/index/api_test.go new file mode 100644 index 00000000000..93af0eeee9f --- /dev/null +++ b/chain/index/api_test.go @@ -0,0 +1,522 @@ +package index + +import ( + "context" + pseudo "math/rand" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/types" +) + +func TestValidateIsNullRoundSimple(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(100) + + tests := []struct { + name string + epoch abi.ChainEpoch + setupFunc func(*SqliteIndexer) + expectedResult bool + expectError bool + errorContains string + }{ + { + name: "happy path - null round", + epoch: 50, + expectedResult: true, + }, + { + name: "failure - non-null round", + epoch: 50, + setupFunc: func(si *SqliteIndexer) { + insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: randomCid(t, rng).Bytes(), + height: 50, + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + }, + expectError: true, + errorContains: "index corruption", + }, + { + name: "edge case - epoch 0", + epoch: 0, + expectedResult: true, + }, + { + name: "edge case - epoch above head", + epoch: headHeight + 1, + expectedResult: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + si, _, _ := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + + if tt.setupFunc != nil { + tt.setupFunc(si) + } + + res, err := si.validateIsNullRound(ctx, tt.epoch) + + if tt.expectError { + require.Error(t, err) + if tt.errorContains != "" { + require.ErrorContains(t, err, tt.errorContains) + } + } else { + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, tt.expectedResult, res.IsNullRound) + require.Equal(t, tt.epoch, res.Height) + } + }) + } +} + +func TestFailureHeadHeight(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(100) + + si, head, _ := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + si.Start() + + _, err := si.ChainValidateIndex(ctx, head.Height(), false) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot validate index at epoch") +} + +func TestBackfillNullRound(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(100) + + si, _, cs := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + si.Start() + + nullRoundEpoch := abi.ChainEpoch(50) + nonNullRoundEpoch := abi.ChainEpoch(51) + + // Create a tipset with a height different from the requested epoch + nonNullTs := fakeTipSet(t, rng, nonNullRoundEpoch, []cid.Cid{}) + + // Set up the chainstore to return the non-null tipset for the null round epoch + cs.SetTipsetByHeightAndKey(nullRoundEpoch, nonNullTs.Key(), nonNullTs) + + // Attempt to validate the null round epoch + result, err := si.ChainValidateIndex(ctx, nullRoundEpoch, true) + require.NoError(t, err) + require.NotNil(t, result) + require.False(t, result.Backfilled) + require.True(t, result.IsNullRound) +} + +func TestBackfillReturnsError(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(100) + + si, _, cs := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + si.Start() + + missingEpoch := abi.ChainEpoch(50) + + // Create a tipset for the missing epoch, but don't index it + missingTs := fakeTipSet(t, rng, missingEpoch, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(missingEpoch, missingTs.Key(), missingTs) + + // Attempt to validate the missing epoch with backfill flag set to false + _, err := si.ChainValidateIndex(ctx, missingEpoch, false) + require.Error(t, err) + require.ErrorContains(t, err, "missing tipset at height 50 in the chain index") +} + +func TestBackfillMissingEpoch(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(100) + + si, _, cs := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + si.Start() + + // Initialize address resolver + si.SetActorToDelegatedAddresFunc(func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { + idAddr, err := address.NewIDAddress(uint64(emitter)) + if err != nil { + return address.Undef, false + } + return idAddr, true + }) + + missingEpoch := abi.ChainEpoch(50) + + parentTs := fakeTipSet(t, rng, missingEpoch-1, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(missingEpoch-1, parentTs.Key(), parentTs) + + missingTs := fakeTipSet(t, rng, missingEpoch, parentTs.Cids()) + cs.SetTipsetByHeightAndKey(missingEpoch, missingTs.Key(), missingTs) + + executionTs := fakeTipSet(t, rng, missingEpoch+1, missingTs.Key().Cids()) + cs.SetTipsetByHeightAndKey(missingEpoch+1, executionTs.Key(), executionTs) + + // Create fake messages and events + fakeMsg := fakeMessage(randomIDAddr(t, rng), randomIDAddr(t, rng)) + fakeEvent := fakeEvent(1, []kv{{k: "test", v: []byte("value")}, {k: "test2", v: []byte("value2")}}, nil) + + ec := randomCid(t, rng) + executedMsg := executedMessage{ + msg: fakeMsg, + evs: []types.Event{*fakeEvent}, + rct: types.MessageReceipt{ + EventsRoot: &ec, + }, + } + + cs.SetMessagesForTipset(missingTs, []types.ChainMsg{fakeMsg}) + si.setExecutedMessagesLoaderFunc(func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + if msgTs.Height() == missingTs.Height() { + return []executedMessage{executedMsg}, nil + } + return nil, nil + }) + + // Attempt to validate and backfill the missing epoch + result, err := si.ChainValidateIndex(ctx, missingEpoch, true) + require.NoError(t, err) + require.NotNil(t, result) + require.True(t, result.Backfilled) + require.EqualValues(t, missingEpoch, result.Height) + require.Equal(t, uint64(1), result.IndexedMessagesCount) + require.Equal(t, uint64(1), result.IndexedEventsCount) + require.Equal(t, uint64(2), result.IndexedEventEntriesCount) + + // Verify that the epoch is now indexed + // fails as the events root dont match + verificationResult, err := si.ChainValidateIndex(ctx, missingEpoch, false) + require.ErrorContains(t, err, "events AMT root mismatch") + require.Nil(t, verificationResult) + + tsKeyCid, err := missingTs.Key().Cid() + require.NoError(t, err) + + root, b, err := si.amtRootForEvents(ctx, tsKeyCid, fakeMsg.Cid()) + require.NoError(t, err) + require.True(t, b) + executedMsg.rct.EventsRoot = &root + si.setExecutedMessagesLoaderFunc(func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + if msgTs.Height() == missingTs.Height() { + return []executedMessage{executedMsg}, nil + } + return nil, nil + }) + + verificationResult, err = si.ChainValidateIndex(ctx, missingEpoch, false) + require.NoError(t, err) + require.NotNil(t, verificationResult) + require.False(t, verificationResult.Backfilled) + require.Equal(t, result.IndexedMessagesCount, verificationResult.IndexedMessagesCount) + require.Equal(t, result.IndexedEventsCount, verificationResult.IndexedEventsCount) + require.Equal(t, result.IndexedEventEntriesCount, verificationResult.IndexedEventEntriesCount) +} + +func TestIndexCorruption(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(100) + + tests := []struct { + name string + setupFunc func(*testing.T, *SqliteIndexer, *dummyChainStore) + epoch abi.ChainEpoch + errorContains string + }{ + { + name: "only reverted tipsets", + setupFunc: func(t *testing.T, si *SqliteIndexer, cs *dummyChainStore) { + epoch := abi.ChainEpoch(50) + ts := fakeTipSet(t, rng, epoch, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(epoch, ts.Key(), ts) + keyBz, err := ts.Key().Cid() + require.NoError(t, err) + + insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: keyBz.Bytes(), + height: uint64(epoch), + reverted: true, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + }, + epoch: 50, + errorContains: "index corruption: height 50 only has reverted tipsets", + }, + { + name: "multiple non-reverted tipsets", + setupFunc: func(t *testing.T, si *SqliteIndexer, cs *dummyChainStore) { + epoch := abi.ChainEpoch(50) + ts1 := fakeTipSet(t, rng, epoch, []cid.Cid{}) + ts2 := fakeTipSet(t, rng, epoch, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(epoch, ts1.Key(), ts1) + + t1Bz, err := toTipsetKeyCidBytes(ts1) + require.NoError(t, err) + t2Bz, err := toTipsetKeyCidBytes(ts2) + require.NoError(t, err) + + insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: t1Bz, + height: uint64(epoch), + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: t2Bz, + height: uint64(epoch), + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + }, + epoch: 50, + errorContains: "index corruption: height 50 has multiple non-reverted tipsets", + }, + { + name: "tipset key mismatch", + setupFunc: func(_ *testing.T, si *SqliteIndexer, cs *dummyChainStore) { + epoch := abi.ChainEpoch(50) + ts1 := fakeTipSet(t, rng, epoch, []cid.Cid{}) + ts2 := fakeTipSet(t, rng, epoch, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(epoch, ts1.Key(), ts1) + insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: ts2.Key().Cids()[0].Bytes(), + height: uint64(epoch), + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + }, + epoch: 50, + errorContains: "index corruption: indexed tipset at height 50 has key", + }, + { + name: "reverted events for executed tipset", + setupFunc: func(_ *testing.T, si *SqliteIndexer, cs *dummyChainStore) { + epoch := abi.ChainEpoch(50) + ts := fakeTipSet(t, rng, epoch, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(epoch, ts.Key(), ts) + keyBz, err := ts.Key().Cid() + require.NoError(t, err) + + messageID := insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: keyBz.Bytes(), + height: uint64(epoch), + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + insertEvent(t, si, event{ + messageID: messageID, + eventIndex: 0, + emitterId: 1, + emitterAddr: randomIDAddr(t, rng).Bytes(), + reverted: true, + }) + cs.SetTipsetByHeightAndKey(epoch+1, fakeTipSet(t, rng, epoch+1, ts.Key().Cids()).Key(), fakeTipSet(t, rng, epoch+1, ts.Key().Cids())) + }, + epoch: 50, + errorContains: "index corruption: reverted events found for an executed tipset", + }, + { + name: "message count mismatch", + setupFunc: func(_ *testing.T, si *SqliteIndexer, cs *dummyChainStore) { + epoch := abi.ChainEpoch(50) + ts := fakeTipSet(t, rng, epoch, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(epoch, ts.Key(), ts) + keyBz, err := ts.Key().Cid() + require.NoError(t, err) + + // Insert two messages in the index + insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: keyBz.Bytes(), + height: uint64(epoch), + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: keyBz.Bytes(), + height: uint64(epoch), + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 1, + }) + + // Setup dummy event loader + si.setExecutedMessagesLoaderFunc(func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + return []executedMessage{{msg: fakeMessage(randomIDAddr(t, rng), randomIDAddr(t, rng))}}, nil + }) + + // Set up the next tipset for event execution + nextTs := fakeTipSet(t, rng, epoch+1, ts.Key().Cids()) + cs.SetTipsetByHeightAndKey(epoch+1, nextTs.Key(), nextTs) + }, + epoch: 50, + errorContains: "failed to verify indexed data at height 50: message count mismatch for height 50: chainstore has 1, index has 2", + }, + { + name: "event count mismatch", + setupFunc: func(_ *testing.T, si *SqliteIndexer, cs *dummyChainStore) { + epoch := abi.ChainEpoch(50) + ts := fakeTipSet(t, rng, epoch, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(epoch, ts.Key(), ts) + keyBz, err := ts.Key().Cid() + require.NoError(t, err) + + // Insert one message in the index + messageID := insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: keyBz.Bytes(), + height: uint64(epoch), + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + + // Insert two events for the message + insertEvent(t, si, event{ + messageID: messageID, + eventIndex: 0, + emitterId: 2, + emitterAddr: randomIDAddr(t, rng).Bytes(), + reverted: false, + }) + insertEvent(t, si, event{ + messageID: messageID, + eventIndex: 1, + emitterId: 3, + emitterAddr: randomIDAddr(t, rng).Bytes(), + reverted: false, + }) + + // Setup dummy event loader to return only one event + si.setExecutedMessagesLoaderFunc(func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + return []executedMessage{ + { + msg: fakeMessage(randomIDAddr(t, rng), randomIDAddr(t, rng)), + evs: []types.Event{*fakeEvent(1, []kv{{k: "test", v: []byte("value")}}, nil)}, + }, + }, nil + }) + + // Set up the next tipset for event execution + nextTs := fakeTipSet(t, rng, epoch+1, ts.Key().Cids()) + cs.SetTipsetByHeightAndKey(epoch+1, nextTs.Key(), nextTs) + }, + epoch: 50, + errorContains: "failed to verify indexed data at height 50: event count mismatch for height 50: chainstore has 1, index has 2", + }, + { + name: "event entries count mismatch", + setupFunc: func(_ *testing.T, si *SqliteIndexer, cs *dummyChainStore) { + epoch := abi.ChainEpoch(50) + ts := fakeTipSet(t, rng, epoch, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(epoch, ts.Key(), ts) + keyBz, err := ts.Key().Cid() + require.NoError(t, err) + + // Insert one message in the index + messageID := insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: keyBz.Bytes(), + height: uint64(epoch), + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + + // Insert one event with two entries for the message + eventID := insertEvent(t, si, event{ + messageID: messageID, + eventIndex: 0, + emitterId: 4, + emitterAddr: randomIDAddr(t, rng).Bytes(), + reverted: false, + }) + insertEventEntry(t, si, eventEntry{ + eventID: eventID, + indexed: true, + flags: []byte{0x01}, + key: "key1", + codec: 1, + value: []byte("value1"), + }) + insertEventEntry(t, si, eventEntry{ + eventID: eventID, + indexed: true, + flags: []byte{0x00}, + key: "key2", + codec: 2, + value: []byte("value2"), + }) + + // Setup dummy event loader to return one event with only one entry + si.setExecutedMessagesLoaderFunc(func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + return []executedMessage{ + { + msg: fakeMessage(randomIDAddr(t, rng), randomIDAddr(t, rng)), + evs: []types.Event{*fakeEvent(1, []kv{{k: "key1", v: []byte("value1")}}, nil)}, + }, + }, nil + }) + + // Set up the next tipset for event execution + nextTs := fakeTipSet(t, rng, epoch+1, ts.Key().Cids()) + cs.SetTipsetByHeightAndKey(epoch+1, nextTs.Key(), nextTs) + }, + epoch: 50, + errorContains: "failed to verify indexed data at height 50: event entries count mismatch for height 50: chainstore has 1, index has 2", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + si, _, cs := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + si.Start() + + tt.setupFunc(t, si, cs) + + _, err := si.ChainValidateIndex(ctx, tt.epoch, false) + require.Error(t, err) + require.Contains(t, err.Error(), tt.errorContains) + }) + } +} diff --git a/chain/index/ddls.go b/chain/index/ddls.go new file mode 100644 index 00000000000..056f45c5921 --- /dev/null +++ b/chain/index/ddls.go @@ -0,0 +1,111 @@ +package index + +import "database/sql" + +const DefaultDbFilename = "chainindex.db" + +var ddls = []string{ + `CREATE TABLE IF NOT EXISTS tipset_message ( + id INTEGER PRIMARY KEY, + tipset_key_cid BLOB NOT NULL, + height INTEGER NOT NULL, + reverted INTEGER NOT NULL, + message_cid BLOB, + message_index INTEGER, + UNIQUE (tipset_key_cid, message_cid) + )`, + + `CREATE TABLE IF NOT EXISTS eth_tx_hash ( + tx_hash TEXT PRIMARY KEY, + message_cid BLOB NOT NULL, + inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + )`, + + `CREATE TABLE IF NOT EXISTS event ( + id INTEGER PRIMARY KEY, + message_id INTEGER NOT NULL, + event_index INTEGER NOT NULL, + emitter_id INTEGER NOT NULL, + emitter_addr BLOB, + reverted INTEGER NOT NULL, + FOREIGN KEY (message_id) REFERENCES tipset_message(id) ON DELETE CASCADE, + UNIQUE (message_id, event_index) + )`, + + `CREATE TABLE IF NOT EXISTS event_entry ( + event_id INTEGER NOT NULL, + indexed INTEGER NOT NULL, + flags BLOB NOT NULL, + key TEXT NOT NULL, + codec INTEGER, + value BLOB NOT NULL, + FOREIGN KEY (event_id) REFERENCES event(id) ON DELETE CASCADE + )`, + + `CREATE INDEX IF NOT EXISTS insertion_time_index ON eth_tx_hash (inserted_at)`, + + `CREATE INDEX IF NOT EXISTS idx_message_cid ON tipset_message (message_cid)`, + + `CREATE INDEX IF NOT EXISTS idx_tipset_key_cid ON tipset_message (tipset_key_cid)`, + + `CREATE INDEX IF NOT EXISTS idx_event_message_id ON event (message_id)`, + + `CREATE INDEX IF NOT EXISTS idx_height ON tipset_message (height)`, + + `CREATE INDEX IF NOT EXISTS event_entry_event_id ON event_entry(event_id)`, +} + +// preparedStatementMapping returns a map of fields of the preparedStatements struct to the SQL +// query that should be prepared for that field. This is used to prepare all the statements in +// the preparedStatements struct. +func preparedStatementMapping(ps *preparedStatements) map[**sql.Stmt]string { + return map[**sql.Stmt]string{ + &ps.getNonRevertedMsgInfoStmt: "SELECT tipset_key_cid, height FROM tipset_message WHERE message_cid = ? AND reverted = 0 LIMIT 1", + &ps.getMsgCidFromEthHashStmt: "SELECT message_cid FROM eth_tx_hash WHERE tx_hash = ? LIMIT 1", + &ps.insertEthTxHashStmt: "INSERT INTO eth_tx_hash (tx_hash, message_cid) VALUES (?, ?) ON CONFLICT (tx_hash) DO UPDATE SET inserted_at = CURRENT_TIMESTAMP", + &ps.insertTipsetMessageStmt: "INSERT INTO tipset_message (tipset_key_cid, height, reverted, message_cid, message_index) VALUES (?, ?, ?, ?, ?) ON CONFLICT (tipset_key_cid, message_cid) DO UPDATE SET reverted = 0", + &ps.hasTipsetStmt: "SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ?)", + &ps.updateTipsetToNonRevertedStmt: "UPDATE tipset_message SET reverted = 0 WHERE tipset_key_cid = ?", + &ps.updateTipsetToRevertedStmt: "UPDATE tipset_message SET reverted = 1 WHERE tipset_key_cid = ?", + &ps.removeTipsetsBeforeHeightStmt: "DELETE FROM tipset_message WHERE height < ?", + &ps.removeEthHashesOlderThanStmt: "DELETE FROM eth_tx_hash WHERE inserted_at < datetime('now', ?)", + &ps.updateTipsetsToRevertedFromHeightStmt: "UPDATE tipset_message SET reverted = 1 WHERE height >= ?", + &ps.updateEventsToRevertedFromHeightStmt: "UPDATE event SET reverted = 1 WHERE message_id IN (SELECT id FROM tipset_message WHERE height >= ?)", + &ps.isIndexEmptyStmt: "SELECT NOT EXISTS(SELECT 1 FROM tipset_message LIMIT 1)", + &ps.getMinNonRevertedHeightStmt: "SELECT MIN(height) FROM tipset_message WHERE reverted = 0", + &ps.hasNonRevertedTipsetStmt: "SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ? AND reverted = 0)", + &ps.updateEventsToRevertedStmt: "UPDATE event SET reverted = 1 WHERE message_id IN (SELECT id FROM tipset_message WHERE tipset_key_cid = ?)", + &ps.updateEventsToNonRevertedStmt: "UPDATE event SET reverted = 0 WHERE message_id IN (SELECT id FROM tipset_message WHERE tipset_key_cid = ?)", + &ps.getMsgIdForMsgCidAndTipsetStmt: "SELECT id FROM tipset_message WHERE tipset_key_cid = ? AND message_cid = ? AND reverted = 0", + &ps.insertEventStmt: "INSERT INTO event (message_id, event_index, emitter_addr, reverted) VALUES (?, ?, ?, ?) ON CONFLICT (message_id, event_index) DO UPDATE SET reverted = 0", + &ps.insertEventEntryStmt: "INSERT INTO event_entry (event_id, indexed, flags, key, codec, value) VALUES (?, ?, ?, ?, ?, ?)", + &ps.hasNullRoundAtHeightStmt: "SELECT NOT EXISTS(SELECT 1 FROM tipset_message WHERE height = ?)", + &ps.getNonRevertedTipsetAtHeightStmt: "SELECT tipset_key_cid FROM tipset_message WHERE height = ? AND reverted = 0 LIMIT 1", + &ps.countTipsetsAtHeightStmt: "SELECT COUNT(CASE WHEN reverted = 1 THEN 1 END) AS reverted_count, COUNT(CASE WHEN reverted = 0 THEN 1 END) AS non_reverted_count FROM (SELECT tipset_key_cid, MAX(reverted) AS reverted FROM tipset_message WHERE height = ? GROUP BY tipset_key_cid) AS unique_tipsets", + &ps.getNonRevertedTipsetMessageCountStmt: "SELECT COUNT(*) FROM tipset_message WHERE tipset_key_cid = ? AND reverted = 0 AND message_cid IS NOT NULL", + &ps.getNonRevertedTipsetEventCountStmt: "SELECT COUNT(*) FROM event WHERE reverted = 0 AND message_id IN (SELECT id FROM tipset_message WHERE tipset_key_cid = ? AND reverted = 0)", + &ps.hasRevertedEventsInTipsetStmt: "SELECT EXISTS(SELECT 1 FROM event WHERE reverted = 1 AND message_id IN (SELECT id FROM tipset_message WHERE tipset_key_cid = ?))", + &ps.getNonRevertedTipsetEventEntriesCountStmt: "SELECT COUNT(ee.event_id) AS entry_count FROM event_entry ee JOIN event e ON ee.event_id = e.id JOIN tipset_message tm ON e.message_id = tm.id WHERE tm.tipset_key_cid = ? AND tm.reverted = 0", + &ps.removeRevertedTipsetsBeforeHeightStmt: "DELETE FROM tipset_message WHERE reverted = 1 AND height < ?", + &ps.getNonRevertedMsgInfoStmt: "SELECT tipset_key_cid, height FROM tipset_message WHERE message_cid = ? AND reverted = 0 LIMIT 1", + &ps.getMsgCidFromEthHashStmt: "SELECT message_cid FROM eth_tx_hash WHERE tx_hash = ? LIMIT 1", + &ps.insertEthTxHashStmt: "INSERT INTO eth_tx_hash (tx_hash, message_cid) VALUES (?, ?) ON CONFLICT (tx_hash) DO UPDATE SET inserted_at = CURRENT_TIMESTAMP", + &ps.insertTipsetMessageStmt: "INSERT INTO tipset_message (tipset_key_cid, height, reverted, message_cid, message_index) VALUES (?, ?, ?, ?, ?) ON CONFLICT (tipset_key_cid, message_cid) DO UPDATE SET reverted = 0", + &ps.hasTipsetStmt: "SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ?)", + &ps.updateTipsetToNonRevertedStmt: "UPDATE tipset_message SET reverted = 0 WHERE tipset_key_cid = ?", + &ps.updateTipsetToRevertedStmt: "UPDATE tipset_message SET reverted = 1 WHERE tipset_key_cid = ?", + &ps.removeTipsetsBeforeHeightStmt: "DELETE FROM tipset_message WHERE height < ?", + &ps.removeEthHashesOlderThanStmt: "DELETE FROM eth_tx_hash WHERE inserted_at < datetime('now', ?)", + &ps.updateTipsetsToRevertedFromHeightStmt: "UPDATE tipset_message SET reverted = 1 WHERE height >= ?", + &ps.updateEventsToRevertedFromHeightStmt: "UPDATE event SET reverted = 1 WHERE message_id IN (SELECT id FROM tipset_message WHERE height >= ?)", + &ps.getMinNonRevertedHeightStmt: "SELECT MIN(height) FROM tipset_message WHERE reverted = 0", + &ps.hasNonRevertedTipsetStmt: "SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ? AND reverted = 0)", + &ps.updateEventsToRevertedStmt: "UPDATE event SET reverted = 1 WHERE message_id IN (SELECT id FROM tipset_message WHERE tipset_key_cid = ?)", + &ps.updateEventsToNonRevertedStmt: "UPDATE event SET reverted = 0 WHERE message_id IN (SELECT id FROM tipset_message WHERE tipset_key_cid = ?)", + &ps.getMsgIdForMsgCidAndTipsetStmt: "SELECT id FROM tipset_message WHERE tipset_key_cid = ? AND message_cid = ? AND reverted = 0 LIMIT 1", + &ps.insertEventStmt: "INSERT INTO event (message_id, event_index, emitter_id, emitter_addr, reverted) VALUES (?, ?, ?, ?, ?)", + &ps.insertEventEntryStmt: "INSERT INTO event_entry (event_id, indexed, flags, key, codec, value) VALUES (?, ?, ?, ?, ?, ?)", + &ps.getEventEntriesStmt: "SELECT flags, key, codec, value FROM event_entry WHERE event_id=? ORDER BY _rowid_ ASC", + &ps.getEventIdAndEmitterIdStmt: "SELECT e.id, e.emitter_id FROM event e JOIN tipset_message tm ON e.message_id = tm.id WHERE tm.tipset_key_cid = ? AND tm.message_cid = ? ORDER BY e.event_index ASC", + } +} diff --git a/chain/index/ddls_test.go b/chain/index/ddls_test.go new file mode 100644 index 00000000000..e71db1a8cdf --- /dev/null +++ b/chain/index/ddls_test.go @@ -0,0 +1,866 @@ +package index + +import ( + "database/sql" + "testing" + + "github.com/stretchr/testify/require" +) + +const ( + tipsetKeyCid1 = "test_tipset_key" + tipsetKeyCid2 = "test_tipset_key_2" + messageCid1 = "test_message_cid" + messageCid2 = "test_message_cid_2" + emitterAddr1 = "test_emitter_addr" +) + +func TestHasRevertedEventsInTipsetStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // running on empty DB should return false + verifyHasRevertedEventsInTipsetStmt(t, s, []byte(tipsetKeyCid1), false) + + // Insert tipset with a reverted event + ts := tipsetMessage{ + tipsetKeyCid: []byte(tipsetKeyCid1), + height: 1, + reverted: false, + messageCid: []byte(messageCid1), + messageIndex: 0, + } + messageID := insertTipsetMessage(t, s, ts) + + // this event will be un-reverted later + insertEvent(t, s, event{ + messageID: messageID, + eventIndex: 0, + emitterId: 1, + emitterAddr: []byte(emitterAddr1), + reverted: true, + }) + + // this event should not be un-reverted + ts = tipsetMessage{ + tipsetKeyCid: []byte(tipsetKeyCid2), + height: 1, + reverted: false, + messageCid: []byte(messageCid2), + messageIndex: 0, + } + messageID2 := insertTipsetMessage(t, s, ts) + insertEvent(t, s, event{ + messageID: messageID2, + eventIndex: 0, + emitterId: 2, + emitterAddr: []byte(emitterAddr1), + reverted: true, + }) + + // Verify `hasRevertedEventsInTipset` returns true + verifyHasRevertedEventsInTipsetStmt(t, s, []byte(tipsetKeyCid1), true) + verifyHasRevertedEventsInTipsetStmt(t, s, []byte(tipsetKeyCid2), true) + + // change event to non-reverted + updateEventsToNonReverted(t, s, []byte(tipsetKeyCid1)) + + // Verify `hasRevertedEventsInTipset` returns false + verifyHasRevertedEventsInTipsetStmt(t, s, []byte(tipsetKeyCid1), false) + verifyHasRevertedEventsInTipsetStmt(t, s, []byte(tipsetKeyCid2), true) +} + +func TestGetNonRevertedTipsetCountStmts(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // running on empty DB should return 0 + verifyNonRevertedEventEntriesCount(t, s, []byte(tipsetKeyCid1), 0) + verifyNonRevertedEventCount(t, s, []byte(tipsetKeyCid1), 0) + verifyNonRevertedMessageCount(t, s, []byte(tipsetKeyCid1), 0) + + // Insert non-reverted tipset + messageID := insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte(tipsetKeyCid1), + height: 1, + reverted: false, + messageCid: []byte(messageCid1), + messageIndex: 0, + }) + + // Insert event + eventID1 := insertEvent(t, s, event{ + messageID: messageID, + eventIndex: 0, + emitterId: 1, + emitterAddr: []byte(emitterAddr1), + reverted: false, + }) + eventID2 := insertEvent(t, s, event{ + messageID: messageID, + eventIndex: 1, + emitterId: 2, + emitterAddr: []byte(emitterAddr1), + reverted: false, + }) + + // Insert event entry + insertEventEntry(t, s, eventEntry{ + eventID: eventID1, + indexed: true, + flags: []byte("test_flags"), + key: "test_key", + codec: 1, + value: []byte("test_value"), + }) + insertEventEntry(t, s, eventEntry{ + eventID: eventID2, + indexed: true, + flags: []byte("test_flags2"), + key: "test_key2", + codec: 2, + value: []byte("test_value2"), + }) + + // verify 2 event entries + verifyNonRevertedEventEntriesCount(t, s, []byte(tipsetKeyCid1), 2) + + // Verify event count + verifyNonRevertedEventCount(t, s, []byte(tipsetKeyCid1), 2) + + // verify message count is 1 + verifyNonRevertedMessageCount(t, s, []byte(tipsetKeyCid1), 1) + + // mark tipset as reverted + revertTipset(t, s, []byte(tipsetKeyCid1)) + + // Verify `getNonRevertedTipsetEventEntriesCountStmt` returns 0 + verifyNonRevertedEventEntriesCount(t, s, []byte(tipsetKeyCid1), 0) + + // verify event count is 0 + verifyNonRevertedEventCount(t, s, []byte(tipsetKeyCid1), 0) + + // verify message count is 0 + verifyNonRevertedMessageCount(t, s, []byte(tipsetKeyCid1), 0) +} + +func TestGetEventIdAndEmitterIdStmtAndGetEventEntriesStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // Insert a tipset message + tsKeyCid := []byte("test_tipset_key") + msgCid := []byte("test_message_cid") + messageID := insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: tsKeyCid, + height: 1, + reverted: false, + messageCid: msgCid, + messageIndex: 0, + }) + + // Insert events + event1ID := insertEvent(t, s, event{ + messageID: messageID, + eventIndex: 0, + emitterId: 1, + emitterAddr: []byte("emitter_addr_1"), + reverted: false, + }) + event2ID := insertEvent(t, s, event{ + messageID: messageID, + eventIndex: 1, + emitterId: 2, + emitterAddr: []byte("emitter_addr_2"), + reverted: false, + }) + + // Insert event entries + insertEventEntry(t, s, eventEntry{ + eventID: event1ID, + indexed: true, + flags: []byte{0x01}, + key: "key1", + codec: 1, + value: []byte("value1"), + }) + insertEventEntry(t, s, eventEntry{ + eventID: event1ID, + indexed: false, + flags: []byte{0x00}, + key: "key2", + codec: 2, + value: []byte("value2"), + }) + insertEventEntry(t, s, eventEntry{ + eventID: event2ID, + indexed: true, + flags: []byte{0x01}, + key: "key3", + codec: 3, + value: []byte("value3"), + }) + + // Test getEventIdAndEmitterIdStmt + rows, err := s.stmts.getEventIdAndEmitterIdStmt.Query(tsKeyCid, msgCid) + require.NoError(t, err) + defer func() { + _ = rows.Close() + }() + var eventIDs []int64 + var emitterIDs []uint64 + for rows.Next() { + var eventID int64 + var emitterID uint64 + err := rows.Scan(&eventID, &emitterID) + require.NoError(t, err) + eventIDs = append(eventIDs, eventID) + emitterIDs = append(emitterIDs, emitterID) + } + require.NoError(t, rows.Err()) + require.Equal(t, []int64{event1ID, event2ID}, eventIDs) + require.Equal(t, []uint64{1, 2}, emitterIDs) + + // Test getEventEntriesStmt for event1 + rows, err = s.stmts.getEventEntriesStmt.Query(event1ID) + require.NoError(t, err) + defer func() { + _ = rows.Close() + }() + + var entries []eventEntry + for rows.Next() { + var entry eventEntry + err := rows.Scan(&entry.flags, &entry.key, &entry.codec, &entry.value) + require.NoError(t, err) + entries = append(entries, entry) + } + require.NoError(t, rows.Err()) + require.Len(t, entries, 2) + require.Equal(t, []byte{0x01}, entries[0].flags) + require.Equal(t, "key1", entries[0].key) + require.Equal(t, 1, entries[0].codec) + require.Equal(t, []byte("value1"), entries[0].value) + require.Equal(t, []byte{0x00}, entries[1].flags) + require.Equal(t, "key2", entries[1].key) + require.Equal(t, 2, entries[1].codec) + require.Equal(t, []byte("value2"), entries[1].value) + + // Test getEventEntriesStmt for event2 + rows, err = s.stmts.getEventEntriesStmt.Query(event2ID) + require.NoError(t, err) + defer func() { + _ = rows.Close() + }() + + entries = nil + for rows.Next() { + var entry eventEntry + err := rows.Scan(&entry.flags, &entry.key, &entry.codec, &entry.value) + require.NoError(t, err) + entries = append(entries, entry) + } + require.NoError(t, rows.Err()) + require.Len(t, entries, 1) + require.Equal(t, []byte{0x01}, entries[0].flags) + require.Equal(t, "key3", entries[0].key) + require.Equal(t, 3, entries[0].codec) + require.Equal(t, []byte("value3"), entries[0].value) +} +func TestUpdateTipsetToNonRevertedStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // insert a reverted tipset + ts := tipsetMessage{ + tipsetKeyCid: []byte(tipsetKeyCid1), + height: 1, + reverted: true, + messageCid: []byte(messageCid1), + messageIndex: 0, + } + + // Insert tipset + messageId := insertTipsetMessage(t, s, ts) + + res, err := s.stmts.updateTipsetToNonRevertedStmt.Exec([]byte(tipsetKeyCid1)) + require.NoError(t, err) + + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) + + // verify the tipset is not reverted + ts.reverted = false + verifyTipsetMessage(t, s, messageId, ts) +} + +func TestHasNullRoundAtHeightStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // running on empty DB should return true + verifyHasNullRoundAtHeightStmt(t, s, 1, true) + verifyHasNullRoundAtHeightStmt(t, s, 0, true) + + // insert tipset + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte(tipsetKeyCid1), + height: 1, + reverted: false, + messageCid: []byte(messageCid1), + messageIndex: 0, + }) + + // verify not a null round + verifyHasNullRoundAtHeightStmt(t, s, 1, false) +} + +func TestHasTipsetStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // running on empty DB should return false + verifyHasTipsetStmt(t, s, []byte(tipsetKeyCid1), false) + + // insert tipset + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte(tipsetKeyCid1), + height: 1, + reverted: false, + messageCid: []byte(messageCid1), + messageIndex: 0, + }) + + // verify tipset exists + verifyHasTipsetStmt(t, s, []byte(tipsetKeyCid1), true) + + // verify non-existent tipset + verifyHasTipsetStmt(t, s, []byte("non_existent_tipset_key"), false) +} + +func TestUpdateEventsToRevertedStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // Insert a non-reverted tipset + messageID := insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte(tipsetKeyCid1), + height: 1, + reverted: false, + messageCid: []byte(messageCid1), + messageIndex: 0, + }) + + // Insert non-reverted events + insertEvent(t, s, event{ + messageID: messageID, + eventIndex: 0, + emitterId: 1, + emitterAddr: []byte(emitterAddr1), + reverted: false, + }) + insertEvent(t, s, event{ + messageID: messageID, + eventIndex: 1, + emitterId: 2, + emitterAddr: []byte(emitterAddr1), + reverted: false, + }) + + // Verify events are not reverted + var count int + err = s.db.QueryRow("SELECT COUNT(*) FROM event WHERE reverted = 0 AND message_id = ?", messageID).Scan(&count) + require.NoError(t, err) + require.Equal(t, 2, count) + + // Execute updateEventsToRevertedStmt + _, err = s.stmts.updateEventsToRevertedStmt.Exec([]byte(tipsetKeyCid1)) + require.NoError(t, err) + + // Verify events are now reverted + err = s.db.QueryRow("SELECT COUNT(*) FROM event WHERE reverted = 1 AND message_id = ?", messageID).Scan(&count) + require.NoError(t, err) + require.Equal(t, 2, count) + + // Verify no non-reverted events remain + err = s.db.QueryRow("SELECT COUNT(*) FROM event WHERE reverted = 0 AND message_id = ?", messageID).Scan(&count) + require.NoError(t, err) + require.Equal(t, 0, count) +} + +func TestCountTipsetsAtHeightStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // Test empty DB + verifyCountTipsetsAtHeightStmt(t, s, 1, 0, 0) + + // Test 0,1 case + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_1"), + height: 1, + reverted: false, + messageCid: []byte("test_message_cid_1"), + messageIndex: 0, + }) + verifyCountTipsetsAtHeightStmt(t, s, 1, 0, 1) + + // Test 0,2 case + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_2"), + height: 1, + reverted: false, + messageCid: []byte("test_message_cid_2"), + messageIndex: 0, + }) + verifyCountTipsetsAtHeightStmt(t, s, 1, 0, 2) + + // Test 1,2 case + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_3"), + height: 1, + reverted: true, + messageCid: []byte("test_message_cid_3"), + messageIndex: 0, + }) + verifyCountTipsetsAtHeightStmt(t, s, 1, 1, 2) + + // Test 2,2 case + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_4"), + height: 1, + reverted: true, + messageCid: []byte("test_message_cid_4"), + messageIndex: 0, + }) + verifyCountTipsetsAtHeightStmt(t, s, 1, 2, 2) +} + +func TestNonRevertedTipsetAtHeightStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // Test empty DB + var et []byte + err = s.stmts.getNonRevertedTipsetAtHeightStmt.QueryRow(10).Scan(&et) + require.Equal(t, sql.ErrNoRows, err) + + // Insert non-reverted tipset + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_1"), + height: 10, + reverted: false, + messageCid: []byte("test_message_cid_1"), + messageIndex: 0, + }) + + // Insert reverted tipset at same height + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_2"), + height: 10, + reverted: true, + messageCid: []byte("test_message_cid_2"), + messageIndex: 0, + }) + + // Verify getNonRevertedTipsetAtHeightStmt returns the non-reverted tipset + var tipsetKeyCid []byte + err = s.stmts.getNonRevertedTipsetAtHeightStmt.QueryRow(10).Scan(&tipsetKeyCid) + require.NoError(t, err) + require.Equal(t, []byte("test_tipset_key_1"), tipsetKeyCid) + + // Insert another non-reverted tipset at a different height + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_3"), + height: 20, + reverted: false, + messageCid: []byte("test_message_cid_3"), + messageIndex: 0, + }) + + // Verify getNonRevertedTipsetAtHeightStmt returns the correct tipset for the new height + err = s.stmts.getNonRevertedTipsetAtHeightStmt.QueryRow(20).Scan(&tipsetKeyCid) + require.NoError(t, err) + require.Equal(t, []byte("test_tipset_key_3"), tipsetKeyCid) + + // Test with a height that has no tipset + err = s.stmts.getNonRevertedTipsetAtHeightStmt.QueryRow(30).Scan(&tipsetKeyCid) + require.Equal(t, sql.ErrNoRows, err) + + // Revert all tipsets at height 10 + _, err = s.db.Exec("UPDATE tipset_message SET reverted = 1 WHERE height = 10") + require.NoError(t, err) + + // Verify getNonRevertedTipsetAtHeightStmt returns no rows for the reverted height + err = s.stmts.getNonRevertedTipsetAtHeightStmt.QueryRow(10).Scan(&tipsetKeyCid) + require.Equal(t, sql.ErrNoRows, err) +} + +func TestMinNonRevertedHeightStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // Test empty DB + var minHeight sql.NullInt64 + err = s.stmts.getMinNonRevertedHeightStmt.QueryRow().Scan(&minHeight) + require.NoError(t, err) + require.False(t, minHeight.Valid) + + // Insert non-reverted tipsets + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_1"), + height: 10, + reverted: false, + messageCid: []byte("test_message_cid_1"), + messageIndex: 0, + }) + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_2"), + height: 20, + reverted: false, + messageCid: []byte("test_message_cid_2"), + messageIndex: 0, + }) + + // Verify minimum non-reverted height + verifyMinNonRevertedHeightStmt(t, s, 10) + + // Insert reverted tipset with lower height + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_4"), + height: 5, + reverted: true, + messageCid: []byte("test_message_cid_4"), + messageIndex: 0, + }) + + // Verify minimum non-reverted height hasn't changed + verifyMinNonRevertedHeightStmt(t, s, 10) + + // Revert all tipsets + _, err = s.db.Exec("UPDATE tipset_message SET reverted = 1") + require.NoError(t, err) + + // Verify no minimum non-reverted height + err = s.stmts.getMinNonRevertedHeightStmt.QueryRow().Scan(&minHeight) + require.NoError(t, err) + require.False(t, minHeight.Valid) +} + +func verifyMinNonRevertedHeightStmt(t *testing.T, s *SqliteIndexer, expectedMinHeight int64) { + var minHeight sql.NullInt64 + err := s.stmts.getMinNonRevertedHeightStmt.QueryRow().Scan(&minHeight) + require.NoError(t, err) + require.True(t, minHeight.Valid) + require.Equal(t, expectedMinHeight, minHeight.Int64) +} + +func TestGetMsgIdForMsgCidAndTipsetStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // Insert a non-reverted tipset + tipsetKeyCid := []byte(tipsetKeyCid1) + messageCid := []byte(messageCid1) + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: tipsetKeyCid, + height: 1, + reverted: false, + messageCid: messageCid, + messageIndex: 0, + }) + + // Verify getMsgIdForMsgCidAndTipset returns the correct message ID + var messageID int64 + err = s.stmts.getMsgIdForMsgCidAndTipsetStmt.QueryRow(tipsetKeyCid, messageCid).Scan(&messageID) + require.NoError(t, err) + require.Equal(t, int64(1), messageID) + + // Test with non-existent message CID + nonExistentMessageCid := []byte("non_existent_message_cid") + err = s.stmts.getMsgIdForMsgCidAndTipsetStmt.QueryRow(tipsetKeyCid, nonExistentMessageCid).Scan(&messageID) + require.Equal(t, sql.ErrNoRows, err) + + // Test with non-existent tipset key + nonExistentTipsetKeyCid := []byte("non_existent_tipset_key") + err = s.stmts.getMsgIdForMsgCidAndTipsetStmt.QueryRow(nonExistentTipsetKeyCid, messageCid).Scan(&messageID) + require.Equal(t, sql.ErrNoRows, err) + + // Insert a reverted tipset + revertedTipsetKeyCid := []byte("reverted_tipset_key") + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: revertedTipsetKeyCid, + height: 2, + reverted: true, + messageCid: messageCid, + messageIndex: 0, + }) + + // Verify getMsgIdForMsgCidAndTipset doesn't return the message ID for a reverted tipset + err = s.stmts.getMsgIdForMsgCidAndTipsetStmt.QueryRow(revertedTipsetKeyCid, messageCid).Scan(&messageID) + require.Equal(t, sql.ErrNoRows, err) +} + +func TestForeignKeyCascadeDelete(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // Insert a tipset + messageID := insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key"), + height: 1, + reverted: false, + messageCid: []byte(messageCid1), + messageIndex: 0, + }) + + // Insert an event for the tipset + eventID := insertEvent(t, s, event{ + messageID: messageID, + eventIndex: 0, + emitterId: 2, + emitterAddr: []byte("test_emitter_addr"), + reverted: false, + }) + + // Insert an event entry for the event + insertEventEntry(t, s, eventEntry{ + eventID: eventID, + indexed: true, + flags: []byte("test_flags"), + key: "test_key", + codec: 1, + value: []byte("test_value"), + }) + + // Delete the tipset + res, err := s.db.Exec("DELETE FROM tipset_message WHERE tipset_key_cid = ?", []byte("test_tipset_key")) + require.NoError(t, err) + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) + + // verify event is deleted + verifyEventAbsent(t, s, eventID) + verifyEventEntryAbsent(t, s, eventID) +} + +func TestInsertTipsetMessage(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + ts := tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key"), + height: 1, + reverted: false, + messageCid: []byte(messageCid1), + messageIndex: 0, + } + + // Insert a tipset + messageID := insertTipsetMessage(t, s, ts) + + // revert the tipset + revertTipset(t, s, []byte("test_tipset_key")) + ts.reverted = true + verifyTipsetMessage(t, s, messageID, ts) + + // inserting with the same (tipset, message) should overwrite the reverted flag + res, err := s.stmts.insertTipsetMessageStmt.Exec(ts.tipsetKeyCid, ts.height, true, ts.messageCid, ts.messageIndex) + require.NoError(t, err) + + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) + + ts.reverted = false + verifyTipsetMessage(t, s, messageID, ts) +} + +type tipsetMessage struct { + tipsetKeyCid []byte + height uint64 + reverted bool + messageCid []byte + messageIndex int64 +} + +type event struct { + eventIndex uint64 + emitterId uint64 + emitterAddr []byte + reverted bool + messageID int64 +} + +type eventEntry struct { + eventID int64 + indexed bool + flags []byte + key string + codec int + value []byte +} + +func updateEventsToNonReverted(t *testing.T, s *SqliteIndexer, tsKeyCid []byte) { + res, err := s.stmts.updateEventsToNonRevertedStmt.Exec(tsKeyCid) + require.NoError(t, err) + + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) + + // read all events for this tipset and verify they are not reverted using a COUNT query + var count int + err = s.db.QueryRow("SELECT COUNT(*) FROM event e JOIN tipset_message tm ON e.message_id = tm.id WHERE tm.tipset_key_cid = ? AND e.reverted = 1", tsKeyCid).Scan(&count) + require.NoError(t, err) + require.Equal(t, 0, count, "Expected no reverted events for this tipset") +} + +func revertTipset(t *testing.T, s *SqliteIndexer, tipsetKeyCid []byte) { + res, err := s.stmts.updateTipsetToRevertedStmt.Exec(tipsetKeyCid) + require.NoError(t, err) + + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) + + var reverted bool + err = s.db.QueryRow("SELECT reverted FROM tipset_message WHERE tipset_key_cid = ?", tipsetKeyCid).Scan(&reverted) + require.NoError(t, err) + require.True(t, reverted) +} + +func verifyTipsetMessage(t *testing.T, s *SqliteIndexer, messageID int64, expectedTipsetMessage tipsetMessage) { + var tipsetKeyCid []byte + var height uint64 + var reverted bool + var messageCid []byte + var messageIndex int64 + err := s.db.QueryRow("SELECT tipset_key_cid, height, reverted, message_cid, message_index FROM tipset_message WHERE id = ?", messageID).Scan(&tipsetKeyCid, &height, &reverted, &messageCid, &messageIndex) + require.NoError(t, err) + require.Equal(t, expectedTipsetMessage.tipsetKeyCid, tipsetKeyCid) + require.Equal(t, expectedTipsetMessage.height, height) + require.Equal(t, expectedTipsetMessage.reverted, reverted) + require.Equal(t, expectedTipsetMessage.messageCid, messageCid) + require.Equal(t, expectedTipsetMessage.messageIndex, messageIndex) +} + +func verifyEventEntryAbsent(t *testing.T, s *SqliteIndexer, eventID int64) { + err := s.db.QueryRow("SELECT event_id FROM event_entry WHERE event_id = ?", eventID).Scan(&eventID) + require.Equal(t, sql.ErrNoRows, err) +} + +func verifyEventAbsent(t *testing.T, s *SqliteIndexer, eventID int64) { + var eventIndex uint64 + err := s.db.QueryRow("SELECT event_index FROM event WHERE id = ?", eventID).Scan(&eventIndex) + require.Equal(t, sql.ErrNoRows, err) +} + +func verifyEvent(t *testing.T, s *SqliteIndexer, eventID int64, expectedEvent event) { + var eventIndex uint64 + var emitterAddr []byte + var reverted bool + var messageID int64 + err := s.db.QueryRow("SELECT event_index, emitter_addr, reverted, message_id FROM event WHERE id = ?", eventID).Scan(&eventIndex, &emitterAddr, &reverted, &messageID) + require.NoError(t, err) + require.Equal(t, expectedEvent.eventIndex, eventIndex) + require.Equal(t, expectedEvent.emitterAddr, emitterAddr) + require.Equal(t, expectedEvent.reverted, reverted) + require.Equal(t, expectedEvent.messageID, messageID) +} + +func verifyCountTipsetsAtHeightStmt(t *testing.T, s *SqliteIndexer, height uint64, expectedRevertedCount, expectedNonRevertedCount int) { + var revertedCount, nonRevertedCount int + err := s.stmts.countTipsetsAtHeightStmt.QueryRow(height).Scan(&revertedCount, &nonRevertedCount) + require.NoError(t, err) + require.Equal(t, expectedRevertedCount, revertedCount) + require.Equal(t, expectedNonRevertedCount, nonRevertedCount) +} + +func verifyHasTipsetStmt(t *testing.T, s *SqliteIndexer, tipsetKeyCid []byte, expectedHas bool) { + var has bool + err := s.stmts.hasTipsetStmt.QueryRow(tipsetKeyCid).Scan(&has) + require.NoError(t, err) + require.Equal(t, expectedHas, has) +} + +func verifyHasRevertedEventsInTipsetStmt(t *testing.T, s *SqliteIndexer, tipsetKeyCid []byte, expectedHas bool) { + var hasRevertedEventsInTipset bool + err := s.stmts.hasRevertedEventsInTipsetStmt.QueryRow(tipsetKeyCid).Scan(&hasRevertedEventsInTipset) + require.NoError(t, err) + require.Equal(t, expectedHas, hasRevertedEventsInTipset) +} + +func verifyHasNullRoundAtHeightStmt(t *testing.T, s *SqliteIndexer, height uint64, expectedHasNullRound bool) { + var hasNullRound bool + err := s.stmts.hasNullRoundAtHeightStmt.QueryRow(height).Scan(&hasNullRound) + require.NoError(t, err) + require.Equal(t, expectedHasNullRound, hasNullRound) +} + +func verifyNonRevertedMessageCount(t *testing.T, s *SqliteIndexer, tipsetKeyCid []byte, expectedCount int) { + var count int + err := s.stmts.getNonRevertedTipsetMessageCountStmt.QueryRow(tipsetKeyCid).Scan(&count) + require.NoError(t, err) + require.Equal(t, expectedCount, count) +} + +func verifyNonRevertedEventCount(t *testing.T, s *SqliteIndexer, tipsetKeyCid []byte, expectedCount int) { + var count int + err := s.stmts.getNonRevertedTipsetEventCountStmt.QueryRow(tipsetKeyCid).Scan(&count) + require.NoError(t, err) + require.Equal(t, expectedCount, count) +} + +func verifyNonRevertedEventEntriesCount(t *testing.T, s *SqliteIndexer, tipsetKeyCid []byte, expectedCount int) { + var count int + err := s.stmts.getNonRevertedTipsetEventEntriesCountStmt.QueryRow(tipsetKeyCid).Scan(&count) + require.NoError(t, err) + require.Equal(t, expectedCount, count) +} + +func insertTipsetMessage(t *testing.T, s *SqliteIndexer, ts tipsetMessage) int64 { + res, err := s.stmts.insertTipsetMessageStmt.Exec(ts.tipsetKeyCid, ts.height, ts.reverted, ts.messageCid, ts.messageIndex) + require.NoError(t, err) + + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) + + messageID, err := res.LastInsertId() + require.NoError(t, err) + require.NotEqual(t, int64(0), messageID) + + // read back the message to verify it was inserted correctly + verifyTipsetMessage(t, s, messageID, ts) + + return messageID +} + +func insertEvent(t *testing.T, s *SqliteIndexer, e event) int64 { + res, err := s.stmts.insertEventStmt.Exec(e.messageID, e.eventIndex, e.emitterId, e.emitterAddr, e.reverted) + require.NoError(t, err) + + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) + + eventID, err := res.LastInsertId() + require.NoError(t, err) + require.NotEqual(t, int64(0), eventID) + + verifyEvent(t, s, eventID, e) + + return eventID +} + +func insertEventEntry(t *testing.T, s *SqliteIndexer, ee eventEntry) { + res, err := s.stmts.insertEventEntryStmt.Exec(ee.eventID, ee.indexed, ee.flags, ee.key, ee.codec, ee.value) + require.NoError(t, err) + + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) +} diff --git a/chain/index/events.go b/chain/index/events.go new file mode 100644 index 00000000000..0a1836f7b96 --- /dev/null +++ b/chain/index/events.go @@ -0,0 +1,602 @@ +package index + +import ( + "bytes" + "context" + "database/sql" + "errors" + "fmt" + "math" + "sort" + "strings" + + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + amt4 "github.com/filecoin-project/go-amt-ipld/v4" + "github.com/filecoin-project/go-state-types/abi" + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/lotus/chain/types" +) + +const maxLookBackForWait = 120 // one hour of tipsets + +type executedMessage struct { + msg types.ChainMsg + rct types.MessageReceipt + // events extracted from receipt + evs []types.Event +} + +// events are indexed against their inclusion/message tipset when we get the corresponding execution tipset +func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *types.TipSet, executionTs *types.TipSet) error { + if si.actorToDelegatedAddresFunc == nil { + return xerrors.Errorf("indexer can not index events without an address resolver") + } + if si.executedMessagesLoaderFunc == nil { + return xerrors.Errorf("indexer can not index events without an event loader") + } + + // check if we have an event indexed for any message in the `msgTs` tipset -> if so, there's nothig to do here + // this makes event inserts idempotent + msgTsKeyCidBytes, err := toTipsetKeyCidBytes(msgTs) + if err != nil { + return xerrors.Errorf("failed to get tipset key cid: %w", err) + } + + // if we've already indexed events for this tipset, mark them as unreverted and return + res, err := tx.Stmt(si.stmts.updateEventsToNonRevertedStmt).ExecContext(ctx, msgTsKeyCidBytes) + if err != nil { + return xerrors.Errorf("failed to unrevert events for tipset: %w", err) + } + rows, err := res.RowsAffected() + if err != nil { + return xerrors.Errorf("failed to get rows affected by unreverting events for tipset: %w", err) + } + if rows > 0 { + log.Debugf("unreverted %d events for tipset: %s", rows, msgTs.Key()) + return nil + } + + if !si.cs.IsStoringEvents() { + return nil + } + + ems, err := si.executedMessagesLoaderFunc(ctx, si.cs, msgTs, executionTs) + if err != nil { + return xerrors.Errorf("failed to load executed messages: %w", err) + } + eventCount := 0 + addressLookups := make(map[abi.ActorID]address.Address) + + for _, em := range ems { + msgCidBytes := em.msg.Cid().Bytes() + + // read message id for this message cid and tipset key cid + var messageID int64 + if err := tx.Stmt(si.stmts.getMsgIdForMsgCidAndTipsetStmt).QueryRowContext(ctx, msgTsKeyCidBytes, msgCidBytes).Scan(&messageID); err != nil { + return xerrors.Errorf("failed to get message id for message cid and tipset key cid: %w", err) + } + if messageID == 0 { + return xerrors.Errorf("message id not found for message cid %s and tipset key cid %s", em.msg.Cid(), msgTs.Key()) + } + + // Insert events for this message + for _, event := range em.evs { + addr, found := addressLookups[event.Emitter] + if !found { + var ok bool + addr, ok = si.actorToDelegatedAddresFunc(ctx, event.Emitter, executionTs) + if !ok { + // not an address we will be able to match against + continue + } + addressLookups[event.Emitter] = addr + } + + var robustAddrbytes []byte + if addr.Protocol() == address.Delegated { + robustAddrbytes = addr.Bytes() + } + + // Insert event into events table + eventResult, err := tx.Stmt(si.stmts.insertEventStmt).ExecContext(ctx, messageID, eventCount, uint64(event.Emitter), robustAddrbytes, 0) + if err != nil { + return xerrors.Errorf("failed to insert event: %w", err) + } + + // Get the event_id of the inserted event + eventID, err := eventResult.LastInsertId() + if err != nil { + return xerrors.Errorf("failed to get last insert id for event: %w", err) + } + + // Insert event entries + for _, entry := range event.Entries { + _, err := tx.Stmt(si.stmts.insertEventEntryStmt).ExecContext(ctx, + eventID, + isIndexedFlag(entry.Flags), + []byte{entry.Flags}, + entry.Key, + entry.Codec, + entry.Value, + ) + if err != nil { + return xerrors.Errorf("failed to insert event entry: %w", err) + } + } + eventCount++ + } + } + + return nil +} + +func loadExecutedMessages(ctx context.Context, cs ChainStore, recomputeTipSetStateFunc RecomputeTipSetStateFunc, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + msgs, err := cs.MessagesForTipset(ctx, msgTs) + if err != nil { + return nil, xerrors.Errorf("failed to get messages for tipset: %w", err) + } + + st := cs.ActorStore(ctx) + + var recomputed bool + recompute := func() error { + tskCid, err2 := rctTs.Key().Cid() + if err2 != nil { + return xerrors.Errorf("failed to compute tipset key cid: %w", err2) + } + + log.Warnf("failed to load receipts for tipset %s (height %d): %s; recomputing tipset state", tskCid.String(), rctTs.Height(), err.Error()) + if err := recomputeTipSetStateFunc(ctx, msgTs); err != nil { + return xerrors.Errorf("failed to recompute tipset state: %w", err) + } + log.Warnf("successfully recomputed tipset state and loaded events for %s (height %d)", tskCid.String(), rctTs.Height()) + return nil + } + + receiptsArr, err := blockadt.AsArray(st, rctTs.Blocks()[0].ParentMessageReceipts) + if err != nil { + if !ipld.IsNotFound(err) || recomputeTipSetStateFunc == nil { + return nil, xerrors.Errorf("failed to load message receipts: %w", err) + } + + if err := recompute(); err != nil { + return nil, err + } + recomputed = true + receiptsArr, err = blockadt.AsArray(st, rctTs.Blocks()[0].ParentMessageReceipts) + if err != nil { + return nil, xerrors.Errorf("failed to load receipts after tipset state recompute: %w", err) + } + } + + if uint64(len(msgs)) != receiptsArr.Length() { + return nil, xerrors.Errorf("mismatching message and receipt counts (%d msgs, %d rcts)", len(msgs), receiptsArr.Length()) + } + + ems := make([]executedMessage, len(msgs)) + + for i := 0; i < len(msgs); i++ { + ems[i].msg = msgs[i] + + var rct types.MessageReceipt + if found, err := receiptsArr.Get(uint64(i), &rct); err != nil { + return nil, xerrors.Errorf("failed to load receipt %d: %w", i, err) + } else if !found { + return nil, xerrors.Errorf("receipt %d not found", i) + } + ems[i].rct = rct + + // no events in the receipt + if rct.EventsRoot == nil { + continue + } + + eventsArr, err := amt4.LoadAMT(ctx, st, *rct.EventsRoot, amt4.UseTreeBitWidth(types.EventAMTBitwidth)) + if err != nil { + if !ipld.IsNotFound(err) || recomputeTipSetStateFunc == nil || recomputed { + return nil, xerrors.Errorf("failed to load events root for message %s: err: %w", ems[i].msg.Cid(), err) + } + // we may have the receipts but not the events, IsStoringEvents may have been false + if err := recompute(); err != nil { + return nil, err + } + eventsArr, err = amt4.LoadAMT(ctx, st, *rct.EventsRoot, amt4.UseTreeBitWidth(types.EventAMTBitwidth)) + if err != nil { + return nil, xerrors.Errorf("failed to load events amt for re-executed tipset for message %s: %w", ems[i].msg.Cid(), err) + } + } + + ems[i].evs = make([]types.Event, eventsArr.Len()) + var evt types.Event + err = eventsArr.ForEach(ctx, func(u uint64, deferred *cbg.Deferred) error { + if u > math.MaxInt { + return xerrors.Errorf("too many events") + } + if err := evt.UnmarshalCBOR(bytes.NewReader(deferred.Raw)); err != nil { + return err + } + + cpy := evt + ems[i].evs[int(u)] = cpy + return nil + }) + + if err != nil { + return nil, xerrors.Errorf("failed to iterate over events for message %d: %w", i, err) + } + } + + return ems, nil +} + +// checkTipsetIndexedStatus verifies if a specific tipset is indexed based on the EventFilter. +// It returns nil if the tipset is indexed, ErrNotFound if it's not indexed or not specified, +func (si *SqliteIndexer) checkTipsetIndexedStatus(ctx context.Context, f *EventFilter) error { + var tipsetKeyCid []byte + var err error + + // Determine the tipset to check based on the filter + switch { + case f.TipsetCid != cid.Undef: + tipsetKeyCid = f.TipsetCid.Bytes() + case f.MinHeight >= 0 && f.MinHeight == f.MaxHeight: + tipsetKeyCid, err = si.getTipsetKeyCidByHeight(ctx, f.MinHeight) + if err != nil { + if err == ErrNotFound { + // this means that this is a null round and there exist no events for this epoch + return nil + } + + return xerrors.Errorf("failed to get tipset key cid by height: %w", err) + } + default: + // This function distinguishes between two scenarios: + // 1. Missing events: The requested tipset is not present in the Index (an error condition). + // 2. Valid case: The tipset exists but contains no events (a normal situation). + // Currently, this distinction is only made for the common use case where a user requests events for a single tipset. + // TODO: Implement this functionality for a range of tipsets. This is expensive and not a common use case so it's deferred for now. + return nil + } + + // If we couldn't determine a specific tipset, return ErrNotFound + if tipsetKeyCid == nil { + return ErrNotFound + } + + // Check if the determined tipset is indexed + if exists, err := si.isTipsetIndexed(ctx, tipsetKeyCid); err != nil { + return xerrors.Errorf("failed to check if tipset is indexed: %w", err) + } else if exists { + return nil // Tipset is indexed + } + + return ErrNotFound // Tipset is not indexed +} + +// getTipsetKeyCidByHeight retrieves the tipset key CID for a given height. +func (si *SqliteIndexer) getTipsetKeyCidByHeight(ctx context.Context, height abi.ChainEpoch) ([]byte, error) { + ts, err := si.cs.GetTipsetByHeight(ctx, height, nil, false) + if err != nil { + return nil, xerrors.Errorf("failed to get tipset by height: %w", err) + } + if ts == nil { + return nil, xerrors.Errorf("tipset is nil for height: %d", height) + } + + if ts.Height() != height { + // this means that this is a null round + return nil, ErrNotFound + } + + return toTipsetKeyCidBytes(ts) +} + +// GetEventsForFilter returns matching events for the given filter +// Returns nil, nil if the filter has no matching events +// Returns nil, ErrNotFound if the filter has no matching events and the tipset is not indexed +// Returns nil, err for all other errors +func (si *SqliteIndexer) GetEventsForFilter(ctx context.Context, f *EventFilter) ([]*CollectedEvent, error) { + getEventsFnc := func(stmt *sql.Stmt, values []any) ([]*CollectedEvent, error) { + q, err := stmt.QueryContext(ctx, values...) + if err != nil { + return nil, xerrors.Errorf("failed to query events: %w", err) + } + defer func() { _ = q.Close() }() + + var ces []*CollectedEvent + var currentID int64 = -1 + var ce *CollectedEvent + + for q.Next() { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + var row struct { + id int64 + height uint64 + tipsetKeyCid []byte + emitterID uint64 + emitterAddr []byte + eventIndex int + messageCid []byte + messageIndex int + reverted bool + flags []byte + key string + codec uint64 + value []byte + } + + if err := q.Scan( + &row.id, + &row.height, + &row.tipsetKeyCid, + &row.emitterID, + &row.emitterAddr, + &row.eventIndex, + &row.messageCid, + &row.messageIndex, + &row.reverted, + &row.flags, + &row.key, + &row.codec, + &row.value, + ); err != nil { + return nil, xerrors.Errorf("read prefill row: %w", err) + } + + // The query will return all entries for all matching events, so we need to keep track + // of which event we are dealing with and create a new one each time we see a new id + if row.id != currentID { + // Unfortunately we can't easily incorporate the max results limit into the query due to the + // unpredictable number of rows caused by joins + // Break here to stop collecting rows + if f.MaxResults > 0 && len(ces) >= f.MaxResults { + break + } + + currentID = row.id + ce = &CollectedEvent{ + EventIdx: row.eventIndex, + Reverted: row.reverted, + Height: abi.ChainEpoch(row.height), + MsgIdx: row.messageIndex, + } + ces = append(ces, ce) + + if row.emitterAddr == nil { + ce.EmitterAddr, err = address.NewIDAddress(row.emitterID) + if err != nil { + return nil, xerrors.Errorf("failed to parse emitter id: %w", err) + } + } else { + ce.EmitterAddr, err = address.NewFromBytes(row.emitterAddr) + if err != nil { + return nil, xerrors.Errorf("parse emitter addr: %w", err) + } + } + + tsKeyCid, err := cid.Cast(row.tipsetKeyCid) + if err != nil { + return nil, xerrors.Errorf("parse tipsetkey cid: %w", err) + } + + ts, err := si.cs.GetTipSetByCid(ctx, tsKeyCid) + if err != nil { + return nil, xerrors.Errorf("get tipset by cid: %w", err) + } + if ts == nil { + return nil, xerrors.Errorf("failed to get tipset from cid: tipset is nil for cid: %s", tsKeyCid) + } + + ce.TipSetKey = ts.Key() + + ce.MsgCid, err = cid.Cast(row.messageCid) + if err != nil { + return nil, xerrors.Errorf("parse message cid: %w", err) + } + } + + ce.Entries = append(ce.Entries, types.EventEntry{ + Flags: row.flags[0], + Key: row.key, + Codec: row.codec, + Value: row.value, + }) + } + + if len(ces) == 0 { + return nil, nil + } + + // collected event list is in inverted order since we selected only the most recent events + // sort it into height order + sort.Slice(ces, func(i, j int) bool { return ces[i].Height < ces[j].Height }) + + return ces, nil + } + + values, query, err := makePrefillFilterQuery(f) + if err != nil { + return nil, xerrors.Errorf("failed to make prefill filter query: %w", err) + } + + stmt, err := si.db.Prepare(query) + if err != nil { + return nil, xerrors.Errorf("prepare prefill query: %w", err) + } + defer func() { _ = stmt.Close() }() + + ces, err := getEventsFnc(stmt, values) + if err != nil { + return nil, xerrors.Errorf("failed to get events: %w", err) + } + if len(ces) == 0 { + height := f.MaxHeight + if f.TipsetCid != cid.Undef { + ts, err := si.cs.GetTipSetByCid(ctx, f.TipsetCid) + if err != nil { + return nil, xerrors.Errorf("failed to get tipset by cid: %w", err) + } + if ts == nil { + return nil, xerrors.Errorf("failed to get tipset from cid: tipset is nil for cid: %s", f.TipsetCid) + } + height = ts.Height() + } + if height > 0 { + head := si.cs.GetHeaviestTipSet() + if head == nil { + return nil, errors.New("failed to get head: head is nil") + } + headHeight := head.Height() + maxLookBackHeight := headHeight - maxLookBackForWait + + // if the height is old enough, we'll assume the index is caught up to it and not bother + // waiting for it to be indexed + if height <= maxLookBackHeight { + return nil, si.checkTipsetIndexedStatus(ctx, f) + } + } + + // there's no matching events for the filter, wait till index has caught up to the head and then retry + if err := si.waitTillHeadIndexed(ctx); err != nil { + return nil, xerrors.Errorf("failed to wait for head to be indexed: %w", err) + } + ces, err = getEventsFnc(stmt, values) + if err != nil { + return nil, xerrors.Errorf("failed to get events: %w", err) + } + + if len(ces) == 0 { + return nil, si.checkTipsetIndexedStatus(ctx, f) + } + } + + return ces, nil +} + +func makePrefillFilterQuery(f *EventFilter) ([]any, string, error) { + clauses := []string{} + values := []any{} + joins := []string{} + + if f.TipsetCid != cid.Undef { + clauses = append(clauses, "tm.tipset_key_cid=?") + values = append(values, f.TipsetCid.Bytes()) + } else { + if f.MinHeight >= 0 && f.MinHeight == f.MaxHeight { + clauses = append(clauses, "tm.height=?") + values = append(values, f.MinHeight) + } else { + if f.MaxHeight >= 0 && f.MinHeight >= 0 { + clauses = append(clauses, "tm.height BETWEEN ? AND ?") + values = append(values, f.MinHeight, f.MaxHeight) + } else if f.MinHeight >= 0 { + clauses = append(clauses, "tm.height >= ?") + values = append(values, f.MinHeight) + } else if f.MaxHeight >= 0 { + clauses = append(clauses, "tm.height <= ?") + values = append(values, f.MaxHeight) + } else { + return nil, "", xerrors.Errorf("filter must specify either a tipset or a height range") + } + } + // unless asking for a specific tipset, we never want to see reverted historical events + clauses = append(clauses, "e.reverted=?") + values = append(values, false) + } + + if len(f.Addresses) > 0 { + idAddresses := make([]uint64, 0) + delegatedAddresses := make([][]byte, 0) + + for _, addr := range f.Addresses { + switch addr.Protocol() { + case address.ID: + id, err := address.IDFromAddress(addr) + if err != nil { + return nil, "", xerrors.Errorf("failed to get ID from address: %w", err) + } + idAddresses = append(idAddresses, id) + case address.Delegated: + delegatedAddresses = append(delegatedAddresses, addr.Bytes()) + default: + return nil, "", xerrors.Errorf("can only query events by ID or Delegated addresses; but request has address: %s", addr) + } + } + + if len(idAddresses) > 0 { + placeholders := strings.Repeat("?,", len(idAddresses)-1) + "?" + clauses = append(clauses, "e.emitter_id IN ("+placeholders+")") + for _, id := range idAddresses { + values = append(values, id) + } + } + + if len(delegatedAddresses) > 0 { + placeholders := strings.Repeat("?,", len(delegatedAddresses)-1) + "?" + clauses = append(clauses, "e.emitter_addr IN ("+placeholders+")") + for _, addr := range delegatedAddresses { + values = append(values, addr) + } + } + } + + if len(f.KeysWithCodec) > 0 { + join := 0 + for key, vals := range f.KeysWithCodec { + if len(vals) > 0 { + join++ + joinAlias := fmt.Sprintf("ee%d", join) + joins = append(joins, fmt.Sprintf("event_entry %s ON e.id=%[1]s.event_id", joinAlias)) + clauses = append(clauses, fmt.Sprintf("%s.indexed=1 AND %[1]s.key=?", joinAlias)) + values = append(values, key) + subclauses := make([]string, 0, len(vals)) + for _, val := range vals { + subclauses = append(subclauses, fmt.Sprintf("(%s.value=? AND %[1]s.codec=?)", joinAlias)) + values = append(values, val.Value, val.Codec) + } + clauses = append(clauses, "("+strings.Join(subclauses, " OR ")+")") + } + } + } + + s := `SELECT + e.id, + tm.height, + tm.tipset_key_cid, + e.emitter_id, + e.emitter_addr, + e.event_index, + tm.message_cid, + tm.message_index, + e.reverted, + ee.flags, + ee.key, + ee.codec, + ee.value + FROM event e + JOIN tipset_message tm ON e.message_id = tm.id + JOIN event_entry ee ON e.id = ee.event_id` + + if len(joins) > 0 { + s = s + ", " + strings.Join(joins, ", ") + } + + if len(clauses) > 0 { + s = s + " WHERE " + strings.Join(clauses, " AND ") + } + + // retain insertion order of event_entry rows + s += " ORDER BY tm.height DESC, ee._rowid_ ASC" + return values, s, nil +} diff --git a/chain/index/events_test.go b/chain/index/events_test.go new file mode 100644 index 00000000000..5cf00e89ff0 --- /dev/null +++ b/chain/index/events_test.go @@ -0,0 +1,441 @@ +package index + +import ( + "context" + "database/sql" + "errors" + pseudo "math/rand" + "sort" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/must" +) + +func TestGetEventsForFilterNoEvents(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + + headHeight := abi.ChainEpoch(60) + si, _, cs := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + + // Create a fake tipset at height 1 + fakeTipSet1 := fakeTipSet(t, rng, 1, nil) + + // Set the dummy chainstore to return this tipset for height 1 + cs.SetTipsetByHeightAndKey(1, fakeTipSet1.Key(), fakeTipSet1) // empty DB + cs.SetTipSetByCid(t, fakeTipSet1) + + // tipset is not indexed + f := &EventFilter{ + MinHeight: 1, + MaxHeight: 1, + } + ces, err := si.GetEventsForFilter(ctx, f) + require.True(t, errors.Is(err, ErrNotFound)) + require.Equal(t, 0, len(ces)) + + tsCid, err := fakeTipSet1.Key().Cid() + require.NoError(t, err) + f = &EventFilter{ + TipsetCid: tsCid, + } + + ces, err = si.GetEventsForFilter(ctx, f) + require.True(t, errors.Is(err, ErrNotFound)) + require.Equal(t, 0, len(ces)) + + // tipset is indexed but has no events + err = withTx(ctx, si.db, func(tx *sql.Tx) error { + return si.indexTipset(ctx, tx, fakeTipSet1) + }) + require.NoError(t, err) + + ces, err = si.GetEventsForFilter(ctx, f) + require.NoError(t, err) + require.Equal(t, 0, len(ces)) + + f = &EventFilter{ + TipsetCid: tsCid, + } + ces, err = si.GetEventsForFilter(ctx, f) + require.NoError(t, err) + require.Equal(t, 0, len(ces)) + + // search for a range that is absent + f = &EventFilter{ + MinHeight: 100, + MaxHeight: 200, + } + ces, err = si.GetEventsForFilter(ctx, f) + require.NoError(t, err) + require.Equal(t, 0, len(ces)) +} + +func TestGetEventsForFilterWithEvents(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(60) + si, _, cs := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + + ev1 := fakeEvent( + abi.ActorID(1), + []kv{ + {k: "type", v: []byte("approval")}, + {k: "signer", v: []byte("addr1")}, + }, + []kv{ + {k: "amount", v: []byte("2988181")}, + }, + ) + + ev2 := fakeEvent( + abi.ActorID(2), + []kv{ + {k: "type", v: []byte("approval")}, + {k: "signer", v: []byte("addr2")}, + }, + []kv{ + {k: "amount", v: []byte("2988181")}, + }, + ) + + events := []types.Event{*ev1, *ev2} + + fm := fakeMessage(address.TestAddress, address.TestAddress) + em1 := executedMessage{ + msg: fm, + evs: events, + } + + si.SetActorToDelegatedAddresFunc(func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { + idAddr, err := address.NewIDAddress(uint64(emitter)) + if err != nil { + return address.Undef, false + } + + return idAddr, true + }) + + si.setExecutedMessagesLoaderFunc(func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + return []executedMessage{em1}, nil + }) + + // Create a fake tipset at height 1 + fakeTipSet1 := fakeTipSet(t, rng, 1, nil) + fakeTipSet2 := fakeTipSet(t, rng, 2, nil) + + // Set the dummy chainstore to return this tipset for height 1 + cs.SetTipsetByHeightAndKey(1, fakeTipSet1.Key(), fakeTipSet1) // empty DB + cs.SetTipsetByHeightAndKey(2, fakeTipSet2.Key(), fakeTipSet2) // empty DB + cs.SetTipSetByCid(t, fakeTipSet1) + cs.SetTipSetByCid(t, fakeTipSet2) + + cs.SetMessagesForTipset(fakeTipSet1, []types.ChainMsg{fm}) + + // index tipset and events + require.NoError(t, si.Apply(ctx, fakeTipSet1, fakeTipSet2)) + + // fetch it based on height -> works + f := &EventFilter{ + MinHeight: 1, + MaxHeight: 1, + } + ces, err := si.GetEventsForFilter(ctx, f) + require.NoError(t, err) + require.Equal(t, 2, len(ces)) + + // fetch it based on cid -> works + tsCid1, err := fakeTipSet1.Key().Cid() + require.NoError(t, err) + + tsCid2, err := fakeTipSet2.Key().Cid() + require.NoError(t, err) + + f = &EventFilter{ + TipsetCid: tsCid1, + } + ces, err = si.GetEventsForFilter(ctx, f) + require.NoError(t, err) + + require.Equal(t, []*CollectedEvent{ + { + Entries: ev1.Entries, + EmitterAddr: must.One(address.NewIDAddress(uint64(ev1.Emitter))), + EventIdx: 0, + Reverted: false, + Height: 1, + TipSetKey: fakeTipSet1.Key(), + MsgIdx: 0, + MsgCid: fm.Cid(), + }, + { + Entries: ev2.Entries, + EmitterAddr: must.One(address.NewIDAddress(uint64(ev2.Emitter))), + EventIdx: 1, + Reverted: false, + Height: 1, + TipSetKey: fakeTipSet1.Key(), + MsgIdx: 0, + MsgCid: fm.Cid(), + }, + }, ces) + + // mark fakeTipSet2 as reverted so events for fakeTipSet1 are reverted + require.NoError(t, si.Revert(ctx, fakeTipSet2, fakeTipSet1)) + + var reverted bool + err = si.db.QueryRow("SELECT reverted FROM tipset_message WHERE tipset_key_cid = ?", tsCid2.Bytes()).Scan(&reverted) + require.NoError(t, err) + require.True(t, reverted) + + var reverted2 bool + err = si.db.QueryRow("SELECT reverted FROM tipset_message WHERE tipset_key_cid = ?", tsCid1.Bytes()).Scan(&reverted2) + require.NoError(t, err) + require.False(t, reverted2) + + // fetching events fails if excludeReverted is true i.e. we request events by height + f = &EventFilter{ + MinHeight: 1, + MaxHeight: 1, + } + ces, err = si.GetEventsForFilter(ctx, f) + require.NoError(t, err) + require.Equal(t, 0, len(ces)) + + // works if excludeReverted is false i.e. we request events by hash + f = &EventFilter{ + TipsetCid: tsCid1, + } + ces, err = si.GetEventsForFilter(ctx, f) + require.NoError(t, err) + require.Equal(t, 2, len(ces)) +} + +func TestGetEventsFilterByAddress(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(60) + si, _, cs := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + + addr1, err := address.NewIDAddress(1) + require.NoError(t, err) + addr2, err := address.NewIDAddress(2) + require.NoError(t, err) + addr3, err := address.NewIDAddress(3) + require.NoError(t, err) + + delegatedAddr1, err := address.NewFromString("f410fagkp3qx2f76maqot74jaiw3tzbxe76k76zrkl3xifk67isrnbn2sll3yua") + require.NoError(t, err) + + ev1 := fakeEvent( + abi.ActorID(1), + []kv{ + {k: "type", v: []byte("approval")}, + {k: "signer", v: []byte("addr1")}, + }, + []kv{ + {k: "amount", v: []byte("2988181")}, + }, + ) + + ev2 := fakeEvent( + abi.ActorID(2), + []kv{ + {k: "type", v: []byte("approval")}, + {k: "signer", v: []byte("addr2")}, + }, + []kv{ + {k: "amount", v: []byte("2988181")}, + }, + ) + + events := []types.Event{*ev1, *ev2} + + fm := fakeMessage(address.TestAddress, address.TestAddress) + em1 := executedMessage{ + msg: fm, + evs: events, + } + + si.SetActorToDelegatedAddresFunc(func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { + if emitter == abi.ActorID(1) { + return delegatedAddr1, true + } + idAddr, err := address.NewIDAddress(uint64(emitter)) + if err != nil { + return address.Undef, false + } + return idAddr, true + }) + + si.setExecutedMessagesLoaderFunc(func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + return []executedMessage{em1}, nil + }) + + // Create a fake tipset at height 1 + fakeTipSet1 := fakeTipSet(t, rng, 1, nil) + fakeTipSet2 := fakeTipSet(t, rng, 2, nil) + + // Set the dummy chainstore to return this tipset for height 1 + cs.SetTipsetByHeightAndKey(1, fakeTipSet1.Key(), fakeTipSet1) // empty DB + cs.SetTipsetByHeightAndKey(2, fakeTipSet2.Key(), fakeTipSet2) // empty DB + cs.SetTipSetByCid(t, fakeTipSet1) + cs.SetTipSetByCid(t, fakeTipSet2) + + cs.SetMessagesForTipset(fakeTipSet1, []types.ChainMsg{fm}) + + require.NoError(t, si.Apply(ctx, fakeTipSet1, fakeTipSet2)) + + testCases := []struct { + name string + f *EventFilter + expectedCount int + expectedAddresses []address.Address + }{ + { + name: "matching single ID address (non-delegated)", + f: &EventFilter{ + Addresses: []address.Address{addr2}, + MinHeight: 1, + MaxHeight: 1, + }, + expectedCount: 1, + expectedAddresses: []address.Address{addr2}, + }, + { + name: "matching single ID address", + f: &EventFilter{ + Addresses: []address.Address{addr1}, + MinHeight: 1, + MaxHeight: 1, + }, + expectedCount: 1, + expectedAddresses: []address.Address{delegatedAddr1}, + }, + { + name: "matching single delegated address", + f: &EventFilter{ + Addresses: []address.Address{delegatedAddr1}, + MinHeight: 1, + MaxHeight: 1, + }, + expectedCount: 1, + expectedAddresses: []address.Address{delegatedAddr1}, + }, + { + name: "matching multiple addresses", + f: &EventFilter{ + Addresses: []address.Address{addr1, addr2}, + MinHeight: 1, + MaxHeight: 1, + }, + expectedCount: 2, + expectedAddresses: []address.Address{delegatedAddr1, addr2}, + }, + { + name: "no matching address", + f: &EventFilter{ + Addresses: []address.Address{addr3}, + MinHeight: 1, + MaxHeight: 1, + }, + expectedCount: 0, + expectedAddresses: []address.Address{}, + }, + { + name: "empty address list", + f: &EventFilter{ + Addresses: []address.Address{}, + MinHeight: 1, + MaxHeight: 1, + }, + expectedCount: 2, + expectedAddresses: []address.Address{delegatedAddr1, addr2}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ces, err := si.GetEventsForFilter(ctx, tc.f) + require.NoError(t, err) + require.Equal(t, tc.expectedCount, len(ces)) + + actualAddresses := make([]address.Address, len(ces)) + for i, ce := range ces { + actualAddresses[i] = ce.EmitterAddr + } + + sortAddresses(tc.expectedAddresses) + sortAddresses(actualAddresses) + + require.Equal(t, tc.expectedAddresses, actualAddresses) + }) + } +} + +func sortAddresses(addrs []address.Address) { + sort.Slice(addrs, func(i, j int) bool { + return addrs[i].String() < addrs[j].String() + }) +} + +func fakeMessage(to, from address.Address) *types.Message { + return &types.Message{ + To: to, + From: from, + Nonce: 197, + Method: 1, + Params: []byte("some random bytes"), + GasLimit: 126723, + GasPremium: types.NewInt(4), + GasFeeCap: types.NewInt(120), + } +} + +func fakeEvent(emitter abi.ActorID, indexed []kv, unindexed []kv) *types.Event { + ev := &types.Event{ + Emitter: emitter, + } + + for _, in := range indexed { + ev.Entries = append(ev.Entries, types.EventEntry{ + Flags: 0x01, + Key: in.k, + Codec: cid.Raw, + Value: in.v, + }) + } + + for _, in := range unindexed { + ev.Entries = append(ev.Entries, types.EventEntry{ + Flags: 0x00, + Key: in.k, + Codec: cid.Raw, + Value: in.v, + }) + } + + return ev +} + +type kv struct { + k string + v []byte +} diff --git a/chain/index/gc.go b/chain/index/gc.go new file mode 100644 index 00000000000..5a7377e7263 --- /dev/null +++ b/chain/index/gc.go @@ -0,0 +1,96 @@ +package index + +import ( + "context" + "strconv" + "time" + + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/lotus/chain/actors/builtin" +) + +var ( + log = logging.Logger("chainindex") + cleanupInterval = time.Duration(4) * time.Hour +) + +func (si *SqliteIndexer) gcLoop() { + defer si.wg.Done() + + // Initial cleanup before entering the loop + si.gc(si.ctx) + + cleanupTicker := time.NewTicker(cleanupInterval) + defer cleanupTicker.Stop() + + for si.ctx.Err() == nil { + if si.isClosed() { + return + } + + select { + case <-cleanupTicker.C: + si.gc(si.ctx) + case <-si.ctx.Done(): + return + } + } +} + +func (si *SqliteIndexer) gc(ctx context.Context) { + if si.gcRetentionEpochs <= 0 { + log.Info("gc retention epochs is not set, skipping gc") + return + } + log.Info("starting index gc") + + head := si.cs.GetHeaviestTipSet() + + removalEpoch := int64(head.Height()) - si.gcRetentionEpochs - 10 // 10 is for some grace period + if removalEpoch <= 0 { + log.Info("no tipsets to gc") + return + } + + log.Infof("gc'ing all (reverted and non-reverted) tipsets before epoch %d", removalEpoch) + + res, err := si.stmts.removeTipsetsBeforeHeightStmt.ExecContext(ctx, removalEpoch) + if err != nil { + log.Errorw("failed to remove reverted tipsets before height", "height", removalEpoch, "error", err) + return + } + + rows, err := res.RowsAffected() + if err != nil { + log.Errorw("failed to get rows affected", "error", err) + return + } + + log.Infof("gc'd %d entries before epoch %d", rows, removalEpoch) + + // ------------------------------------------------------------------------------------------------- + // Also GC eth hashes + + // Convert gcRetentionEpochs to number of days + gcRetentionDays := si.gcRetentionEpochs / (builtin.EpochsInDay) + if gcRetentionDays < 1 { + log.Infof("skipping gc of eth hashes as retention days is less than 1") + return + } + + log.Infof("gc'ing eth hashes older than %d days", gcRetentionDays) + res, err = si.stmts.removeEthHashesOlderThanStmt.ExecContext(ctx, "-"+strconv.Itoa(int(gcRetentionDays))+" day") + if err != nil { + log.Errorf("failed to gc eth hashes older than %d days: %w", gcRetentionDays, err) + return + } + + rows, err = res.RowsAffected() + if err != nil { + log.Errorf("failed to get rows affected: %w", err) + return + } + + log.Infof("gc'd %d eth hashes older than %d days", rows, gcRetentionDays) +} diff --git a/chain/index/gc_test.go b/chain/index/gc_test.go new file mode 100644 index 00000000000..c08c29f636a --- /dev/null +++ b/chain/index/gc_test.go @@ -0,0 +1,123 @@ +package index + +import ( + "context" + pseudo "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/types" +) + +func TestGC(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(60) + si, _, cs := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + + si.gcRetentionEpochs = 20 + + ev1 := fakeEvent( + abi.ActorID(1), + []kv{ + {k: "type", v: []byte("approval")}, + {k: "signer", v: []byte("addr1")}, + }, + []kv{ + {k: "amount", v: []byte("2988181")}, + }, + ) + + ev2 := fakeEvent( + abi.ActorID(2), + []kv{ + {k: "type", v: []byte("approval")}, + {k: "signer", v: []byte("addr2")}, + }, + []kv{ + {k: "amount", v: []byte("2988181")}, + }, + ) + + events := []types.Event{*ev1, *ev2} + + fm := fakeMessage(address.TestAddress, address.TestAddress) + em1 := executedMessage{ + msg: fm, + evs: events, + } + + si.SetActorToDelegatedAddresFunc(func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { + idAddr, err := address.NewIDAddress(uint64(emitter)) + if err != nil { + return address.Undef, false + } + + return idAddr, true + }) + + si.setExecutedMessagesLoaderFunc(func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + if msgTs.Height() == 1 { + return []executedMessage{em1}, nil + } + return nil, nil + }) + + // Create a fake tipset at height 1 + fakeTipSet1 := fakeTipSet(t, rng, 1, nil) + fakeTipSet2 := fakeTipSet(t, rng, 10, nil) + fakeTipSet3 := fakeTipSet(t, rng, 50, nil) + + // Set the dummy chainstore to return this tipset for height 1 + cs.SetTipsetByHeightAndKey(1, fakeTipSet1.Key(), fakeTipSet1) // empty DB + cs.SetTipsetByHeightAndKey(10, fakeTipSet2.Key(), fakeTipSet2) // empty DB + cs.SetTipsetByHeightAndKey(50, fakeTipSet3.Key(), fakeTipSet3) // empty DB + cs.SetTipSetByCid(t, fakeTipSet1) + cs.SetTipSetByCid(t, fakeTipSet2) + cs.SetTipSetByCid(t, fakeTipSet3) + + cs.SetMessagesForTipset(fakeTipSet1, []types.ChainMsg{fm}) + + // index tipset and events + require.NoError(t, si.Apply(ctx, fakeTipSet1, fakeTipSet2)) + require.NoError(t, si.Apply(ctx, fakeTipSet2, fakeTipSet3)) + + // getLogs works for height 1 + filter := &EventFilter{ + MinHeight: 1, + MaxHeight: 1, + } + ces, err := si.GetEventsForFilter(ctx, filter) + require.NoError(t, err) + require.Len(t, ces, 2) + + si.gc(ctx) + + // getLogs does not work for height 1 + _, err = si.GetEventsForFilter(ctx, filter) + require.Error(t, err) + + // Verify that the tipset at height 1 is removed + var count int + err = si.db.QueryRow("SELECT COUNT(*) FROM tipset_message WHERE height = 1").Scan(&count) + require.NoError(t, err) + require.Equal(t, 0, count) + + // Verify that the tipset at height 10 is not removed + err = si.db.QueryRow("SELECT COUNT(*) FROM tipset_message WHERE height = 10").Scan(&count) + require.NoError(t, err) + require.Equal(t, 0, count) + + // Verify that the tipset at height 50 is not removed + err = si.db.QueryRow("SELECT COUNT(*) FROM tipset_message WHERE height = 50").Scan(&count) + require.NoError(t, err) + require.Equal(t, 1, count) +} diff --git a/chain/index/helpers.go b/chain/index/helpers.go new file mode 100644 index 00000000000..a4db495c99e --- /dev/null +++ b/chain/index/helpers.go @@ -0,0 +1,155 @@ +package index + +import ( + "context" + "database/sql" + "errors" + "os" + "strings" + "time" + + ipld "github.com/ipfs/go-ipld-format" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/types" +) + +const maxRetries = 3 +const retryDelay = 150 * time.Millisecond + +// PopulateFromSnapshot initializes and populates the chain index from a snapshot. +// +// This function creates a new Index at the specified path and populates +// it by using the chain state from the provided ChainStore. It starts from the heaviest +// tipset and works backwards, indexing each tipset until it reaches the genesis +// block or encounters a tipset for which it is unable to find messages in the chain store. +// +// Important Notes: +// 1. This function assumes that the snapshot has already been imported into the ChainStore. +// 2. Events are not populated in the index because snapshots do not contain event data, +// and messages are not re-executed during this process. The resulting index will +// only contain tipsets and messages. +// 3. This function will delete any existing database at the specified path before +// creating a new one. +func PopulateFromSnapshot(ctx context.Context, path string, cs ChainStore) error { + log.Infof("populating chainindex at path %s from snapshot", path) + // Check if a database already exists and attempt to delete it + if _, err := os.Stat(path); err == nil { + log.Infof("deleting existing chainindex at %s", path) + if err = os.Remove(path); err != nil { + return xerrors.Errorf("failed to delete existing chainindex at %s: %w", path, err) + } + } + + si, err := NewSqliteIndexer(path, cs, 0, false, 0) + if err != nil { + return xerrors.Errorf("failed to create sqlite indexer: %w", err) + } + defer func() { + if closeErr := si.Close(); closeErr != nil { + log.Errorf("failed to close sqlite indexer: %s", closeErr) + } + }() + + totalIndexed := 0 + + err = withTx(ctx, si.db, func(tx *sql.Tx) error { + head := cs.GetHeaviestTipSet() + curTs := head + log.Infof("starting to populate chainindex from snapshot at head height %d", head.Height()) + + for curTs != nil { + if err := si.indexTipset(ctx, tx, curTs); err != nil { + if ipld.IsNotFound(err) { + log.Infof("stopping chainindex population at height %d as snapshot only contains data upto this height; error is: %s", curTs.Height(), err) + break + } + + return xerrors.Errorf("failed to populate chainindex from snapshot at height %d: %w", curTs.Height(), err) + } + totalIndexed++ + + curTs, err = cs.GetTipSetFromKey(ctx, curTs.Parents()) + if err != nil { + return xerrors.Errorf("failed to get parent tipset: %w", err) + } + } + + return nil + }) + if err != nil { + return xerrors.Errorf("failed to populate chainindex from snapshot: %w", err) + } + + log.Infof("Successfully populated chainindex from snapshot with %d tipsets", totalIndexed) + return nil +} + +func toTipsetKeyCidBytes(ts *types.TipSet) ([]byte, error) { + if ts == nil { + return nil, errors.New("failed to get tipset key cid: tipset is nil") + } + tsKeyCid, err := ts.Key().Cid() + if err != nil { + return nil, err + } + return tsKeyCid.Bytes(), nil +} + +func withTx(ctx context.Context, db *sql.DB, fn func(*sql.Tx) error) error { + var err error + for i := 0; i < maxRetries; i++ { + if ctx.Err() != nil { + return ctx.Err() + } + var tx *sql.Tx + tx, err = db.BeginTx(ctx, nil) + if err != nil { + return xerrors.Errorf("failed to begin transaction: %w", err) + } + + defer func() { + if p := recover(); p != nil { + // A panic occurred, rollback and repanic + if tx != nil { + _ = tx.Rollback() + } + panic(p) + } + }() + + err = fn(tx) + if err == nil { + if commitErr := tx.Commit(); commitErr != nil { + return xerrors.Errorf("failed to commit transaction: %w", commitErr) + } + return nil + } + + _ = tx.Rollback() + + if !isRetryableError(err) { + return xerrors.Errorf("transaction failed: %w", err) + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(retryDelay): + // Retry after delay + } + } + + return xerrors.Errorf("transaction failed after %d retries; last error: %w", maxRetries, err) +} + +func isRetryableError(err error) bool { + return err != nil && strings.Contains(err.Error(), "database is locked") +} + +func isIndexedFlag(b uint8) bool { + // currently we mark the full entry as indexed if either the key + // or the value are indexed; in the future we will need finer-grained + // management of indices + return b&(types.EventFlagIndexedKey|types.EventFlagIndexedValue) > 0 +} diff --git a/chain/index/indexer.go b/chain/index/indexer.go new file mode 100644 index 00000000000..7cee575a6df --- /dev/null +++ b/chain/index/indexer.go @@ -0,0 +1,419 @@ +package index + +import ( + "context" + "database/sql" + "sync" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/ethtypes" + "github.com/filecoin-project/lotus/lib/sqlite" +) + +var _ Indexer = (*SqliteIndexer)(nil) + +// ActorToDelegatedAddressFunc is a function type that resolves an actor ID to a DelegatedAddress if one exists for that actor, otherwise returns nil +type ActorToDelegatedAddressFunc func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) +type emsLoaderFunc func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) +type RecomputeTipSetStateFunc func(ctx context.Context, ts *types.TipSet) error + +type preparedStatements struct { + insertEthTxHashStmt *sql.Stmt + getNonRevertedMsgInfoStmt *sql.Stmt + getMsgCidFromEthHashStmt *sql.Stmt + insertTipsetMessageStmt *sql.Stmt + updateTipsetToRevertedStmt *sql.Stmt + hasTipsetStmt *sql.Stmt + updateTipsetToNonRevertedStmt *sql.Stmt + removeTipsetsBeforeHeightStmt *sql.Stmt + removeEthHashesOlderThanStmt *sql.Stmt + updateTipsetsToRevertedFromHeightStmt *sql.Stmt + updateEventsToRevertedFromHeightStmt *sql.Stmt + isIndexEmptyStmt *sql.Stmt + getMinNonRevertedHeightStmt *sql.Stmt + hasNonRevertedTipsetStmt *sql.Stmt + updateEventsToRevertedStmt *sql.Stmt + updateEventsToNonRevertedStmt *sql.Stmt + getMsgIdForMsgCidAndTipsetStmt *sql.Stmt + insertEventStmt *sql.Stmt + insertEventEntryStmt *sql.Stmt + getEventIdAndEmitterIdStmt *sql.Stmt + getEventEntriesStmt *sql.Stmt + + hasNullRoundAtHeightStmt *sql.Stmt + getNonRevertedTipsetAtHeightStmt *sql.Stmt + countTipsetsAtHeightStmt *sql.Stmt + + getNonRevertedTipsetMessageCountStmt *sql.Stmt + getNonRevertedTipsetEventCountStmt *sql.Stmt + getNonRevertedTipsetEventEntriesCountStmt *sql.Stmt + hasRevertedEventsInTipsetStmt *sql.Stmt + removeRevertedTipsetsBeforeHeightStmt *sql.Stmt +} + +type SqliteIndexer struct { + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + db *sql.DB + cs ChainStore + + actorToDelegatedAddresFunc ActorToDelegatedAddressFunc + executedMessagesLoaderFunc emsLoaderFunc + + stmts *preparedStatements + + gcRetentionEpochs int64 + reconcileEmptyIndex bool + maxReconcileTipsets uint64 + + mu sync.Mutex + updateSubs map[uint64]*updateSub + subIdCounter uint64 + + started bool + + closeLk sync.RWMutex + closed bool + + // ensures writes are serialized so backfilling does not race with index updates + writerLk sync.Mutex +} + +func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64, reconcileEmptyIndex bool, + maxReconcileTipsets uint64) (si *SqliteIndexer, err error) { + + if gcRetentionEpochs != 0 && gcRetentionEpochs < builtin.EpochsInDay { + return nil, xerrors.Errorf("gc retention epochs must be 0 or greater than %d", builtin.EpochsInDay) + } + + db, err := sqlite.Open(path) + if err != nil { + return nil, xerrors.Errorf("failed to setup message index db: %w", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + + defer func() { + if err != nil { + _ = db.Close() + cancel() + } + }() + + err = sqlite.InitDb(ctx, "chain index", db, ddls, []sqlite.MigrationFunc{}) + if err != nil { + return nil, xerrors.Errorf("failed to init chain index db: %w", err) + } + + si = &SqliteIndexer{ + ctx: ctx, + cancel: cancel, + db: db, + cs: cs, + updateSubs: make(map[uint64]*updateSub), + subIdCounter: 0, + gcRetentionEpochs: gcRetentionEpochs, + reconcileEmptyIndex: reconcileEmptyIndex, + maxReconcileTipsets: maxReconcileTipsets, + stmts: &preparedStatements{}, + } + + if err = si.initStatements(); err != nil { + return nil, xerrors.Errorf("failed to prepare statements: %w", err) + } + + return si, nil +} + +func (si *SqliteIndexer) Start() { + si.wg.Add(1) + go si.gcLoop() + + si.started = true +} + +func (si *SqliteIndexer) SetActorToDelegatedAddresFunc(actorToDelegatedAddresFunc ActorToDelegatedAddressFunc) { + si.actorToDelegatedAddresFunc = actorToDelegatedAddresFunc +} + +func (si *SqliteIndexer) SetRecomputeTipSetStateFunc(f RecomputeTipSetStateFunc) { + si.buildExecutedMessagesLoader(f) +} + +func (si *SqliteIndexer) buildExecutedMessagesLoader(rf RecomputeTipSetStateFunc) { + si.executedMessagesLoaderFunc = func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + return loadExecutedMessages(ctx, cs, rf, msgTs, rctTs) + } +} + +func (si *SqliteIndexer) Close() error { + si.closeLk.Lock() + defer si.closeLk.Unlock() + if si.closed { + return nil + } + si.closed = true + + if si.db == nil { + return nil + } + si.cancel() + si.wg.Wait() + + if err := si.db.Close(); err != nil { + return xerrors.Errorf("failed to close db: %w", err) + } + return nil +} + +func (si *SqliteIndexer) initStatements() error { + stmtMapping := preparedStatementMapping(si.stmts) + for stmtPointer, query := range stmtMapping { + var err error + *stmtPointer, err = si.db.Prepare(query) + if err != nil { + return xerrors.Errorf("prepare statement [%s]: %w", query, err) + } + } + + return nil +} + +func (si *SqliteIndexer) IndexEthTxHash(ctx context.Context, txHash ethtypes.EthHash, msgCid cid.Cid) error { + if si.isClosed() { + return ErrClosed + } + + return withTx(ctx, si.db, func(tx *sql.Tx) error { + return si.indexEthTxHash(ctx, tx, txHash, msgCid) + }) +} + +func (si *SqliteIndexer) indexEthTxHash(ctx context.Context, tx *sql.Tx, txHash ethtypes.EthHash, msgCid cid.Cid) error { + insertEthTxHashStmt := tx.Stmt(si.stmts.insertEthTxHashStmt) + _, err := insertEthTxHashStmt.ExecContext(ctx, txHash.String(), msgCid.Bytes()) + if err != nil { + return xerrors.Errorf("failed to index eth tx hash: %w", err) + } + + return nil +} + +func (si *SqliteIndexer) IndexSignedMessage(ctx context.Context, msg *types.SignedMessage) error { + if msg.Signature.Type != crypto.SigTypeDelegated { + return nil + } + + if si.isClosed() { + return ErrClosed + } + + return withTx(ctx, si.db, func(tx *sql.Tx) error { + return si.indexSignedMessage(ctx, tx, msg) + }) +} + +func (si *SqliteIndexer) indexSignedMessage(ctx context.Context, tx *sql.Tx, msg *types.SignedMessage) error { + ethTx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(msg) + if err != nil { + return xerrors.Errorf("failed to convert filecoin message to eth tx: %w", err) + } + + txHash, err := ethTx.TxHash() + if err != nil { + return xerrors.Errorf("failed to hash transaction: %w", err) + } + + return si.indexEthTxHash(ctx, tx, txHash, msg.Cid()) +} + +func (si *SqliteIndexer) Apply(ctx context.Context, from, to *types.TipSet) error { + if si.isClosed() { + return ErrClosed + } + + si.writerLk.Lock() + + // We're moving the chain ahead from the `from` tipset to the `to` tipset + // Height(to) > Height(from) + err := withTx(ctx, si.db, func(tx *sql.Tx) error { + if err := si.indexTipsetWithParentEvents(ctx, tx, from, to); err != nil { + return xerrors.Errorf("failed to index tipset: %w", err) + } + + return nil + }) + + if err != nil { + si.writerLk.Unlock() + return xerrors.Errorf("failed to apply tipset: %w", err) + } + si.writerLk.Unlock() + + si.notifyUpdateSubs() + + return nil +} + +func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error { + tsKeyCidBytes, err := toTipsetKeyCidBytes(ts) + if err != nil { + return xerrors.Errorf("failed to compute tipset cid: %w", err) + } + + if restored, err := si.restoreTipsetIfExists(ctx, tx, tsKeyCidBytes); err != nil { + return xerrors.Errorf("failed to restore tipset: %w", err) + } else if restored { + return nil + } + + height := ts.Height() + + msgs, err := si.cs.MessagesForTipset(ctx, ts) + if err != nil { + return xerrors.Errorf("failed to get messages for tipset: %w", err) + } + + if len(msgs) == 0 { + insertTipsetMsgStmt := tx.Stmt(si.stmts.insertTipsetMessageStmt) + // If there are no messages, just insert the tipset and return + if _, err := insertTipsetMsgStmt.ExecContext(ctx, tsKeyCidBytes, height, 0, nil, -1); err != nil { + return xerrors.Errorf("failed to insert empty tipset: %w", err) + } + return nil + } + + for i, msg := range msgs { + insertTipsetMsgStmt := tx.Stmt(si.stmts.insertTipsetMessageStmt) + msg := msg + if _, err := insertTipsetMsgStmt.ExecContext(ctx, tsKeyCidBytes, height, 0, msg.Cid().Bytes(), i); err != nil { + return xerrors.Errorf("failed to insert tipset message: %w", err) + } + } + + for _, blk := range ts.Blocks() { + blk := blk + _, smsgs, err := si.cs.MessagesForBlock(ctx, blk) + if err != nil { + return xerrors.Errorf("failed to get messages for block: %w", err) + } + + for _, smsg := range smsgs { + smsg := smsg + if smsg.Signature.Type != crypto.SigTypeDelegated { + continue + } + if err := si.indexSignedMessage(ctx, tx, smsg); err != nil { + return xerrors.Errorf("failed to index eth tx hash: %w", err) + } + } + } + + return nil +} + +func (si *SqliteIndexer) indexTipsetWithParentEvents(ctx context.Context, tx *sql.Tx, parentTs *types.TipSet, currentTs *types.TipSet) error { + // Index the parent tipset if it doesn't exist yet. + // This is necessary to properly index events produced by executing + // messages included in the parent tipset by the current tipset (deferred execution). + if err := si.indexTipset(ctx, tx, parentTs); err != nil { + return xerrors.Errorf("failed to index parent tipset: %w", err) + } + if err := si.indexTipset(ctx, tx, currentTs); err != nil { + return xerrors.Errorf("failed to index tipset: %w", err) + } + + // Now Index events + if err := si.indexEvents(ctx, tx, parentTs, currentTs); err != nil { + return xerrors.Errorf("failed to index events: %w", err) + } + + return nil +} + +func (si *SqliteIndexer) restoreTipsetIfExists(ctx context.Context, tx *sql.Tx, tsKeyCidBytes []byte) (bool, error) { + // Check if the tipset already exists + var exists bool + if err := tx.Stmt(si.stmts.hasTipsetStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { + return false, xerrors.Errorf("failed to check if tipset exists: %w", err) + } + if exists { + if _, err := tx.Stmt(si.stmts.updateTipsetToNonRevertedStmt).ExecContext(ctx, tsKeyCidBytes); err != nil { + return false, xerrors.Errorf("failed to restore tipset: %w", err) + } + return true, nil + } + return false, nil +} + +func (si *SqliteIndexer) Revert(ctx context.Context, from, to *types.TipSet) error { + if si.isClosed() { + return ErrClosed + } + + // We're reverting the chain from the tipset at `from` to the tipset at `to`. + // Height(to) < Height(from) + + revertTsKeyCid, err := toTipsetKeyCidBytes(from) + if err != nil { + return xerrors.Errorf("failed to get tipset key cid: %w", err) + } + + // Because of deferred execution in Filecoin, events at tipset T are reverted when a tipset T+1 is reverted. + // However, the tipet `T` itself is not reverted. + eventTsKeyCid, err := toTipsetKeyCidBytes(to) + if err != nil { + return xerrors.Errorf("failed to get tipset key cid: %w", err) + } + + si.writerLk.Lock() + + err = withTx(ctx, si.db, func(tx *sql.Tx) error { + // revert the `from` tipset + if _, err := tx.Stmt(si.stmts.updateTipsetToRevertedStmt).ExecContext(ctx, revertTsKeyCid); err != nil { + return xerrors.Errorf("failed to mark tipset %s as reverted: %w", revertTsKeyCid, err) + } + + // index the `to` tipset -> it is idempotent + if err := si.indexTipset(ctx, tx, to); err != nil { + return xerrors.Errorf("failed to index tipset: %w", err) + } + + // events are indexed against the message inclusion tipset, not the message execution tipset. + // So we need to revert the events for the message inclusion tipset i.e. `to` tipset. + if _, err := tx.Stmt(si.stmts.updateEventsToRevertedStmt).ExecContext(ctx, eventTsKeyCid); err != nil { + return xerrors.Errorf("failed to revert events for tipset %s: %w", eventTsKeyCid, err) + } + + return nil + }) + if err != nil { + si.writerLk.Unlock() + return xerrors.Errorf("failed during revert transaction: %w", err) + } + + si.writerLk.Unlock() + si.notifyUpdateSubs() + + return nil +} + +func (si *SqliteIndexer) isClosed() bool { + si.closeLk.RLock() + defer si.closeLk.RUnlock() + return si.closed +} + +func (si *SqliteIndexer) setExecutedMessagesLoaderFunc(f emsLoaderFunc) { + si.executedMessagesLoaderFunc = f +} diff --git a/chain/index/indexer_test.go b/chain/index/indexer_test.go new file mode 100644 index 00000000000..bc4a7a70c4f --- /dev/null +++ b/chain/index/indexer_test.go @@ -0,0 +1,56 @@ +package index + +import ( + "context" + "database/sql" + pseudo "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestRestoreTipsetIfExists(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + si, _, _ := setupWithHeadIndexed(t, 10, rng) + + tsKeyCid := randomCid(t, rng) + tsKeyCidBytes := tsKeyCid.Bytes() + + err := withTx(ctx, si.db, func(tx *sql.Tx) error { + // tipset does not exist + exists, err := si.restoreTipsetIfExists(ctx, tx, tsKeyCidBytes) + require.NoError(t, err) + require.False(t, exists) + + // insert reverted tipset + _, err = tx.Stmt(si.stmts.insertTipsetMessageStmt).Exec(tsKeyCidBytes, 1, 1, randomCid(t, rng).Bytes(), 0) + require.NoError(t, err) + + // tipset exists and is NOT reverted + exists, err = si.restoreTipsetIfExists(ctx, tx, tsKeyCidBytes) + require.NoError(t, err) + require.True(t, exists) + + // Verify that the tipset is not reverted + var reverted bool + err = tx.QueryRow("SELECT reverted FROM tipset_message WHERE tipset_key_cid = ?", tsKeyCidBytes).Scan(&reverted) + require.NoError(t, err) + require.False(t, reverted, "Tipset should not be reverted") + + return nil + }) + require.NoError(t, err) + + exists, err := si.isTipsetIndexed(ctx, tsKeyCidBytes) + require.NoError(t, err) + require.True(t, exists) + + fc := randomCid(t, rng) + exists, err = si.isTipsetIndexed(ctx, fc.Bytes()) + require.NoError(t, err) + require.False(t, exists) +} diff --git a/chain/index/interface.go b/chain/index/interface.go index f875a94bf79..e312648e6cf 100644 --- a/chain/index/interface.go +++ b/chain/index/interface.go @@ -6,10 +6,16 @@ import ( "github.com/ipfs/go-cid" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/ethtypes" ) -var ErrNotFound = errors.New("message not found") +var ErrNotFound = errors.New("not found in index") var ErrClosed = errors.New("index closed") // MsgInfo is the Message metadata the index tracks. @@ -22,24 +28,60 @@ type MsgInfo struct { Epoch abi.ChainEpoch } -// MsgIndex is the interface to the message index -type MsgIndex interface { - // GetMsgInfo retrieves the message metadata through the index. - // The lookup is done using the onchain message Cid; that is the signed message Cid - // for SECP messages and unsigned message Cid for BLS messages. - GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error) - // Close closes the index - Close() error +type CollectedEvent struct { + Entries []types.EventEntry + EmitterAddr address.Address // address of emitter + EventIdx int // index of the event within the list of emitted events in a given tipset + Reverted bool + Height abi.ChainEpoch + TipSetKey types.TipSetKey // tipset that contained the message + MsgIdx int // index of the message in the tipset + MsgCid cid.Cid // cid of message that produced event +} + +type EventFilter struct { + MinHeight abi.ChainEpoch // minimum epoch to apply filter or -1 if no minimum + MaxHeight abi.ChainEpoch // maximum epoch to apply filter or -1 if no maximum + TipsetCid cid.Cid + Addresses []address.Address // list of actor addresses that are extpected to emit the event + + KeysWithCodec map[string][]types.ActorEventBlock // map of key names to a list of alternate values that may match + MaxResults int // maximum number of results to collect, 0 is unlimited } -type dummyMsgIndex struct{} +type Indexer interface { + Start() + ReconcileWithChain(ctx context.Context, currHead *types.TipSet) error + IndexSignedMessage(ctx context.Context, msg *types.SignedMessage) error + IndexEthTxHash(ctx context.Context, txHash ethtypes.EthHash, c cid.Cid) error + + SetActorToDelegatedAddresFunc(idToRobustAddrFunc ActorToDelegatedAddressFunc) + SetRecomputeTipSetStateFunc(f RecomputeTipSetStateFunc) -func (dummyMsgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error) { - return MsgInfo{}, ErrNotFound + Apply(ctx context.Context, from, to *types.TipSet) error + Revert(ctx context.Context, from, to *types.TipSet) error + + // Returns (cid.Undef, nil) if the message was not found + GetCidFromHash(ctx context.Context, hash ethtypes.EthHash) (cid.Cid, error) + // Returns (nil, ErrNotFound) if the message was not found + GetMsgInfo(ctx context.Context, m cid.Cid) (*MsgInfo, error) + + GetEventsForFilter(ctx context.Context, f *EventFilter) ([]*CollectedEvent, error) + + ChainValidateIndex(ctx context.Context, epoch abi.ChainEpoch, backfill bool) (*types.IndexValidation, error) + + Close() error } -func (dummyMsgIndex) Close() error { - return nil +type ChainStore interface { + MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) + GetHeaviestTipSet() *types.TipSet + GetTipSetByCid(ctx context.Context, tsKeyCid cid.Cid) (*types.TipSet, error) + GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) + MessagesForBlock(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) + ActorStore(ctx context.Context) adt.Store + GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, ts *types.TipSet, prev bool) (*types.TipSet, error) + IsStoringEvents() bool } -var DummyMsgIndex MsgIndex = dummyMsgIndex{} +var _ ChainStore = (*store.ChainStore)(nil) diff --git a/chain/index/msgindex.go b/chain/index/msgindex.go deleted file mode 100644 index f5248f2782e..00000000000 --- a/chain/index/msgindex.go +++ /dev/null @@ -1,499 +0,0 @@ -package index - -import ( - "context" - "database/sql" - "os" - "sync" - "time" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - _ "github.com/mattn/go-sqlite3" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/sqlite" -) - -const DefaultDbFilename = "msgindex.db" - -var log = logging.Logger("msgindex") - -var ddls = []string{ - `CREATE TABLE IF NOT EXISTS messages ( - cid VARCHAR(80) PRIMARY KEY ON CONFLICT REPLACE, - tipset_cid VARCHAR(80) NOT NULL, - epoch INTEGER NOT NULL - )`, - `CREATE INDEX IF NOT EXISTS tipset_cids ON messages (tipset_cid)`, -} - -const ( - // prepared stmts - dbqGetMessageInfo = "SELECT tipset_cid, epoch FROM messages WHERE cid = ?" - dbqInsertMessage = "INSERT INTO messages VALUES (?, ?, ?)" - dbqDeleteTipsetMessages = "DELETE FROM messages WHERE tipset_cid = ?" - // reconciliation - dbqCountMessages = "SELECT COUNT(*) FROM messages" - dbqMinEpoch = "SELECT MIN(epoch) FROM messages" - dbqCountTipsetMessages = "SELECT COUNT(*) FROM messages WHERE tipset_cid = ?" - dbqDeleteMessagesByEpoch = "DELETE FROM messages WHERE epoch >= ?" -) - -// coalescer configuration (TODO: use observer instead) -// these are exposed to make tests snappy -var ( - CoalesceMinDelay = time.Second - CoalesceMaxDelay = 15 * time.Second - CoalesceMergeInterval = time.Second -) - -// ChainStore interface; we could use store.ChainStore directly, -// but this simplifies unit testing. -type ChainStore interface { - SubscribeHeadChanges(f store.ReorgNotifee) - MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) - GetHeaviestTipSet() *types.TipSet - GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) -} - -var _ ChainStore = (*store.ChainStore)(nil) - -type msgIndex struct { - cs ChainStore - - db *sql.DB - selectMsgStmt *sql.Stmt - insertMsgStmt *sql.Stmt - deleteTipSetStmt *sql.Stmt - - sema chan struct{} - mx sync.Mutex - pend []headChange - - cancel func() - workers sync.WaitGroup - closeLk sync.RWMutex - closed bool -} - -var _ MsgIndex = (*msgIndex)(nil) - -type headChange struct { - rev []*types.TipSet - app []*types.TipSet -} - -func NewMsgIndex(lctx context.Context, path string, cs ChainStore) (MsgIndex, error) { - db, exists, err := sqlite.Open(path) - if err != nil { - return nil, xerrors.Errorf("failed to setup message index db: %w", err) - } - - if err = sqlite.InitDb(lctx, "message index", db, ddls, []sqlite.MigrationFunc{}); err != nil { - _ = db.Close() - return nil, xerrors.Errorf("failed to init message index db: %w", err) - } - - // TODO we may consider populating the index when first creating the db - if exists { - if err := reconcileIndex(db, cs); err != nil { - return nil, xerrors.Errorf("error reconciling msgindex database: %w", err) - } - } - - ctx, cancel := context.WithCancel(lctx) - - msgIndex := &msgIndex{ - db: db, - cs: cs, - sema: make(chan struct{}, 1), - cancel: cancel, - } - - err = msgIndex.prepareStatements() - if err != nil { - if err := db.Close(); err != nil { - log.Errorf("error closing msgindex database: %s", err) - } - - return nil, xerrors.Errorf("error preparing msgindex database statements: %w", err) - } - - rnf := store.WrapHeadChangeCoalescer( - msgIndex.onHeadChange, - CoalesceMinDelay, - CoalesceMaxDelay, - CoalesceMergeInterval, - ) - cs.SubscribeHeadChanges(rnf) - - msgIndex.workers.Add(1) - go msgIndex.background(ctx) - - return msgIndex, nil -} - -func PopulateAfterSnapshot(lctx context.Context, path string, cs ChainStore) error { - // if a database already exists, we try to delete it and create a new one - if _, err := os.Stat(path); err == nil { - if err = os.Remove(path); err != nil { - return xerrors.Errorf("msgindex already exists at %s and can't be deleted", path) - } - } - - db, _, err := sqlite.Open(path) - if err != nil { - return xerrors.Errorf("failed to setup message index db: %w", err) - } - defer func() { - if err := db.Close(); err != nil { - log.Errorf("error closing msgindex database: %s", err) - } - }() - - if err := sqlite.InitDb(lctx, "message index", db, ddls, []sqlite.MigrationFunc{}); err != nil { - _ = db.Close() - return xerrors.Errorf("error creating msgindex database: %w", err) - } - - tx, err := db.Begin() - if err != nil { - return xerrors.Errorf("error when starting transaction: %w", err) - } - - rollback := func() { - if err := tx.Rollback(); err != nil { - log.Errorf("error in rollback: %s", err) - } - } - - insertStmt, err := tx.Prepare(dbqInsertMessage) - if err != nil { - rollback() - return xerrors.Errorf("error preparing insertStmt: %w", err) - } - - curTs := cs.GetHeaviestTipSet() - startHeight := curTs.Height() - for curTs != nil { - tscid, err := curTs.Key().Cid() - if err != nil { - rollback() - return xerrors.Errorf("error computing tipset cid: %w", err) - } - - tskey := tscid.String() - epoch := int64(curTs.Height()) - - msgs, err := cs.MessagesForTipset(lctx, curTs) - if err != nil { - log.Infof("stopping import after %d tipsets", startHeight-curTs.Height()) - break - } - - for _, msg := range msgs { - key := msg.Cid().String() - if _, err := insertStmt.Exec(key, tskey, epoch); err != nil { - rollback() - return xerrors.Errorf("error inserting message: %w", err) - } - } - - curTs, err = cs.GetTipSetFromKey(lctx, curTs.Parents()) - if err != nil { - rollback() - return xerrors.Errorf("error walking chain: %w", err) - } - } - - err = tx.Commit() - if err != nil { - return xerrors.Errorf("error committing transaction: %w", err) - } - - return nil -} - -func reconcileIndex(db *sql.DB, cs ChainStore) error { - // Invariant: after reconciliation, every tipset in the index is in the current chain; ie either - // the chain head or reachable by walking the chain. - // Algorithm: - // 1. Count messages in index; if none, trivially reconciled. - // TODO we may consider populating the index in that case - // 2. Find the minimum tipset in the index; this will mark the end of the reconciliation walk - // 3. Walk from current tipset until we find a tipset in the index. - // 4. Delete (revert!) all tipsets above the found tipset. - // 5. If the walk ends in the boundary epoch, then delete everything. - // - - row := db.QueryRow(dbqCountMessages) - - var result int64 - if err := row.Scan(&result); err != nil { - return xerrors.Errorf("error counting messages: %w", err) - } - - if result == 0 { - return nil - } - - row = db.QueryRow(dbqMinEpoch) - if err := row.Scan(&result); err != nil { - return xerrors.Errorf("error finding boundary epoch: %w", err) - } - - boundaryEpoch := abi.ChainEpoch(result) - - countMsgsStmt, err := db.Prepare(dbqCountTipsetMessages) - if err != nil { - return xerrors.Errorf("error preparing statement: %w", err) - } - - curTs := cs.GetHeaviestTipSet() - for curTs != nil && curTs.Height() >= boundaryEpoch { - tsCid, err := curTs.Key().Cid() - if err != nil { - return xerrors.Errorf("error computing tipset cid: %w", err) - } - - key := tsCid.String() - row = countMsgsStmt.QueryRow(key) - if err := row.Scan(&result); err != nil { - return xerrors.Errorf("error counting messages: %w", err) - } - - if result > 0 { - // found it! - boundaryEpoch = curTs.Height() + 1 - break - } - - // walk up - parents := curTs.Parents() - curTs, err = cs.GetTipSetFromKey(context.TODO(), parents) - if err != nil { - return xerrors.Errorf("error walking chain: %w", err) - } - } - - // delete everything above the minEpoch - if _, err = db.Exec(dbqDeleteMessagesByEpoch, int64(boundaryEpoch)); err != nil { - return xerrors.Errorf("error deleting stale reorged out message: %w", err) - } - - return nil -} - -func (x *msgIndex) prepareStatements() error { - stmt, err := x.db.Prepare(dbqGetMessageInfo) - if err != nil { - return xerrors.Errorf("prepare selectMsgStmt: %w", err) - } - x.selectMsgStmt = stmt - - stmt, err = x.db.Prepare(dbqInsertMessage) - if err != nil { - return xerrors.Errorf("prepare insertMsgStmt: %w", err) - } - x.insertMsgStmt = stmt - - stmt, err = x.db.Prepare(dbqDeleteTipsetMessages) - if err != nil { - return xerrors.Errorf("prepare deleteTipSetStmt: %w", err) - } - x.deleteTipSetStmt = stmt - - return nil -} - -// head change notifee -func (x *msgIndex) onHeadChange(rev, app []*types.TipSet) error { - x.closeLk.RLock() - defer x.closeLk.RUnlock() - - if x.closed { - return nil - } - - // do it in the background to avoid blocking head change processing - x.mx.Lock() - x.pend = append(x.pend, headChange{rev: rev, app: app}) - pendLen := len(x.pend) - x.mx.Unlock() - - // complain loudly if this is building backlog - if pendLen > 10 { - log.Warnf("message index head change processing is building backlog: %d pending head changes", pendLen) - } - - select { - case x.sema <- struct{}{}: - default: - } - - return nil -} - -func (x *msgIndex) background(ctx context.Context) { - defer x.workers.Done() - - for { - select { - case <-x.sema: - err := x.processHeadChanges(ctx) - if err != nil { - // we can't rely on an inconsistent index, so shut it down. - log.Errorf("error processing head change notifications: %s; shutting down message index", err) - if err2 := x.Close(); err2 != nil { - log.Errorf("error shutting down index: %s", err2) - } - } - - case <-ctx.Done(): - return - } - } -} - -func (x *msgIndex) processHeadChanges(ctx context.Context) error { - x.mx.Lock() - pend := x.pend - x.pend = nil - x.mx.Unlock() - - tx, err := x.db.Begin() - if err != nil { - return xerrors.Errorf("error creating transaction: %w", err) - } - - for _, hc := range pend { - for _, ts := range hc.rev { - if err := x.doRevert(ctx, tx, ts); err != nil { - if err2 := tx.Rollback(); err2 != nil { - log.Errorf("error rolling back transaction: %s", err2) - } - return xerrors.Errorf("error reverting %s: %w", ts, err) - } - } - - for _, ts := range hc.app { - if err := x.doApply(ctx, tx, ts); err != nil { - if err2 := tx.Rollback(); err2 != nil { - log.Errorf("error rolling back transaction: %s", err2) - } - return xerrors.Errorf("error applying %s: %w", ts, err) - } - } - } - - return tx.Commit() -} - -func (x *msgIndex) doRevert(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error { - tskey, err := ts.Key().Cid() - if err != nil { - return xerrors.Errorf("error computing tipset cid: %w", err) - } - - key := tskey.String() - _, err = tx.Stmt(x.deleteTipSetStmt).Exec(key) - return err -} - -func (x *msgIndex) doApply(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error { - tscid, err := ts.Key().Cid() - if err != nil { - return xerrors.Errorf("error computing tipset cid: %w", err) - } - - tskey := tscid.String() - epoch := int64(ts.Height()) - - msgs, err := x.cs.MessagesForTipset(ctx, ts) - if err != nil { - return xerrors.Errorf("error retrieving messages for tipset %s: %w", ts, err) - } - - insertStmt := tx.Stmt(x.insertMsgStmt) - for _, msg := range msgs { - key := msg.Cid().String() - if _, err := insertStmt.Exec(key, tskey, epoch); err != nil { - return xerrors.Errorf("error inserting message: %w", err) - } - } - - return nil -} - -// interface -func (x *msgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error) { - x.closeLk.RLock() - defer x.closeLk.RUnlock() - - if x.closed { - return MsgInfo{}, ErrClosed - } - - var ( - tipset string - epoch int64 - ) - - key := m.String() - row := x.selectMsgStmt.QueryRow(key) - err := row.Scan(&tipset, &epoch) - switch { - case err == sql.ErrNoRows: - return MsgInfo{}, ErrNotFound - - case err != nil: - return MsgInfo{}, xerrors.Errorf("error querying msgindex database: %w", err) - } - - tipsetCid, err := cid.Decode(tipset) - if err != nil { - return MsgInfo{}, xerrors.Errorf("error decoding tipset cid: %w", err) - } - - return MsgInfo{ - Message: m, - TipSet: tipsetCid, - Epoch: abi.ChainEpoch(epoch), - }, nil -} - -func (x *msgIndex) Close() error { - x.closeLk.Lock() - defer x.closeLk.Unlock() - - if x.closed { - return nil - } - - x.closed = true - - x.cancel() - x.workers.Wait() - - return x.db.Close() -} - -// informal apis for itests; not exposed in the main interface -func (x *msgIndex) CountMessages() (int64, error) { - x.closeLk.RLock() - defer x.closeLk.RUnlock() - - if x.closed { - return 0, ErrClosed - } - - var result int64 - row := x.db.QueryRow(dbqCountMessages) - err := row.Scan(&result) - return result, err -} diff --git a/chain/index/msgindex_test.go b/chain/index/msgindex_test.go deleted file mode 100644 index 2cf707b0fed..00000000000 --- a/chain/index/msgindex_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package index - -import ( - "context" - "errors" - "math/rand" - "os" - "testing" - "time" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/types/mock" -) - -func TestBasicMsgIndex(t *testing.T) { - // the most basic of tests: - // 1. Create an index with mock chain store - // 2. Advance the chain for a few tipsets - // 3. Verify that the index contains all messages with the correct tipset/epoch - cs := newMockChainStore() - cs.genesis() - - tmp := t.TempDir() - t.Cleanup(func() { _ = os.RemoveAll(tmp) }) - - msgIndex, err := NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs) - require.NoError(t, err) - - defer msgIndex.Close() //nolint - - for i := 0; i < 10; i++ { - t.Logf("advance to epoch %d", i+1) - err := cs.advance() - require.NoError(t, err) - } - - waitForCoalescerAfterLastEvent() - - t.Log("verifying index") - verifyIndex(t, cs, msgIndex) -} - -func TestReorgMsgIndex(t *testing.T) { - // slightly more nuanced test that includes reorgs - // 1. Create an index with mock chain store - // 2. Advance/Reorg the chain for a few tipsets - // 3. Verify that the index contains all messages with the correct tipset/epoch - cs := newMockChainStore() - cs.genesis() - - tmp := t.TempDir() - t.Cleanup(func() { _ = os.RemoveAll(tmp) }) - - msgIndex, err := NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs) - require.NoError(t, err) - - defer msgIndex.Close() //nolint - - for i := 0; i < 10; i++ { - t.Logf("advance to epoch %d", i+1) - err := cs.advance() - require.NoError(t, err) - } - - waitForCoalescerAfterLastEvent() - - // a simple reorg - t.Log("doing reorg") - reorgme := cs.curTs - reorgmeParent, err := cs.GetTipSetFromKey(context.Background(), reorgme.Parents()) - require.NoError(t, err) - cs.setHead(reorgmeParent) - reorgmeChild := cs.makeBlk() - err = cs.reorg([]*types.TipSet{reorgme}, []*types.TipSet{reorgmeChild}) - require.NoError(t, err) - - waitForCoalescerAfterLastEvent() - - t.Log("verifying index") - verifyIndex(t, cs, msgIndex) - - t.Log("verifying that reorged messages are not present") - verifyMissing(t, cs, msgIndex, reorgme) -} - -func TestReconcileMsgIndex(t *testing.T) { - // test that exercises the reconciliation code paths - // 1. Create and populate a basic msgindex, similar to TestBasicMsgIndex. - // 2. Close it - // 3. Reorg the mock chain store - // 4. Reopen the index to trigger reconciliation - // 5. Enxure that only the stable messages remain. - cs := newMockChainStore() - cs.genesis() - - tmp := t.TempDir() - t.Cleanup(func() { _ = os.RemoveAll(tmp) }) - - msgIndex, err := NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs) - require.NoError(t, err) - - for i := 0; i < 10; i++ { - t.Logf("advance to epoch %d", i+1) - err := cs.advance() - require.NoError(t, err) - } - - waitForCoalescerAfterLastEvent() - - // Close it and reorg - err = msgIndex.Close() - require.NoError(t, err) - cs.notify = nil - - // a simple reorg - t.Log("doing reorg") - reorgme := cs.curTs - reorgmeParent, err := cs.GetTipSetFromKey(context.Background(), reorgme.Parents()) - require.NoError(t, err) - cs.setHead(reorgmeParent) - reorgmeChild := cs.makeBlk() - err = cs.reorg([]*types.TipSet{reorgme}, []*types.TipSet{reorgmeChild}) - require.NoError(t, err) - - // reopen to reconcile - msgIndex, err = NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs) - require.NoError(t, err) - - defer msgIndex.Close() //nolint - - t.Log("verifying index") - // need to step one up because the last tipset is not known by the index - cs.setHead(reorgmeParent) - verifyIndex(t, cs, msgIndex) - - t.Log("verifying that reorged and unknown messages are not present") - verifyMissing(t, cs, msgIndex, reorgme, reorgmeChild) -} - -func verifyIndex(t *testing.T, cs *mockChainStore, msgIndex MsgIndex) { - for ts := cs.curTs; ts.Height() > 0; { - t.Logf("verify at height %d", ts.Height()) - blks := ts.Blocks() - if len(blks) == 0 { - break - } - - tsCid, err := ts.Key().Cid() - require.NoError(t, err) - - msgs, err := cs.MessagesForTipset(context.Background(), ts) - require.NoError(t, err) - for _, m := range msgs { - minfo, err := msgIndex.GetMsgInfo(context.Background(), m.Cid()) - require.NoError(t, err) - require.Equal(t, tsCid, minfo.TipSet) - require.Equal(t, ts.Height(), minfo.Epoch) - } - - parents := ts.Parents() - ts, err = cs.GetTipSetFromKey(context.Background(), parents) - require.NoError(t, err) - } -} - -func verifyMissing(t *testing.T, cs *mockChainStore, msgIndex MsgIndex, missing ...*types.TipSet) { - for _, ts := range missing { - msgs, err := cs.MessagesForTipset(context.Background(), ts) - require.NoError(t, err) - for _, m := range msgs { - _, err := msgIndex.GetMsgInfo(context.Background(), m.Cid()) - require.Equal(t, ErrNotFound, err) - } - } -} - -type mockChainStore struct { - notify store.ReorgNotifee - - curTs *types.TipSet - tipsets map[types.TipSetKey]*types.TipSet - msgs map[types.TipSetKey][]types.ChainMsg - - nonce uint64 -} - -var _ ChainStore = (*mockChainStore)(nil) - -var systemAddr address.Address -var rng *rand.Rand - -func init() { - systemAddr, _ = address.NewIDAddress(0) - rng = rand.New(rand.NewSource(314159)) - - // adjust those to make tests snappy - CoalesceMinDelay = 100 * time.Millisecond - CoalesceMaxDelay = time.Second - CoalesceMergeInterval = 100 * time.Millisecond -} - -func newMockChainStore() *mockChainStore { - return &mockChainStore{ - tipsets: make(map[types.TipSetKey]*types.TipSet), - msgs: make(map[types.TipSetKey][]types.ChainMsg), - } -} - -func (cs *mockChainStore) genesis() { - genBlock := mock.MkBlock(nil, 0, 0) - genTs := mock.TipSet(genBlock) - cs.msgs[genTs.Key()] = nil - cs.setHead(genTs) -} - -func (cs *mockChainStore) setHead(ts *types.TipSet) { - cs.curTs = ts - cs.tipsets[ts.Key()] = ts -} - -func (cs *mockChainStore) advance() error { - ts := cs.makeBlk() - return cs.reorg(nil, []*types.TipSet{ts}) -} - -func (cs *mockChainStore) reorg(rev, app []*types.TipSet) error { - for _, ts := range rev { - parents := ts.Parents() - cs.curTs = cs.tipsets[parents] - } - - for _, ts := range app { - cs.tipsets[ts.Key()] = ts - cs.curTs = ts - } - - if cs.notify != nil { - return cs.notify(rev, app) - } - - return nil -} - -func (cs *mockChainStore) makeBlk() *types.TipSet { - height := cs.curTs.Height() + 1 - - blk := mock.MkBlock(cs.curTs, uint64(height), uint64(height)) - blk.Messages = cs.makeGarbageCid() - - ts := mock.TipSet(blk) - msg1 := cs.makeMsg() - msg2 := cs.makeMsg() - cs.msgs[ts.Key()] = []types.ChainMsg{msg1, msg2} - - return ts -} - -func (cs *mockChainStore) makeMsg() *types.Message { - nonce := cs.nonce - cs.nonce++ - return &types.Message{To: systemAddr, From: systemAddr, Nonce: nonce} -} - -func (cs *mockChainStore) makeGarbageCid() cid.Cid { - garbage := blocks.NewBlock([]byte{byte(rng.Intn(256)), byte(rng.Intn(256)), byte(rng.Intn(256))}) - return garbage.Cid() -} - -func (cs *mockChainStore) SubscribeHeadChanges(f store.ReorgNotifee) { - cs.notify = f -} - -func (cs *mockChainStore) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) { - msgs, ok := cs.msgs[ts.Key()] - if !ok { - return nil, errors.New("unknown tipset") - } - - return msgs, nil -} - -func (cs *mockChainStore) GetHeaviestTipSet() *types.TipSet { - return cs.curTs -} - -func (cs *mockChainStore) GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { - ts, ok := cs.tipsets[tsk] - if !ok { - return nil, errors.New("unknown tipset") - } - return ts, nil -} - -func waitForCoalescerAfterLastEvent() { - // It can take up to CoalesceMinDelay for the coalescer timer to fire after the last event. - // When the timer fires, it can wait up to CoalesceMinDelay again for more events. - // Therefore the total wait is 2 * CoalesceMinDelay. - // Then we wait another second for the listener (the index) to actually process events. - time.Sleep(2*CoalesceMinDelay + time.Second) -} diff --git a/chain/index/pub_sub.go b/chain/index/pub_sub.go new file mode 100644 index 00000000000..a8dd8d05b7b --- /dev/null +++ b/chain/index/pub_sub.go @@ -0,0 +1,59 @@ +package index + +import "context" + +type updateSub struct { + ctx context.Context + cancel context.CancelFunc + + ch chan chainIndexUpdated +} + +type chainIndexUpdated struct{} + +func (si *SqliteIndexer) subscribeUpdates() (chan chainIndexUpdated, func()) { + subCtx, subCancel := context.WithCancel(si.ctx) + ch := make(chan chainIndexUpdated) + + si.mu.Lock() + subId := si.subIdCounter + si.subIdCounter++ + si.updateSubs[subId] = &updateSub{ + ctx: subCtx, + cancel: subCancel, + ch: ch, + } + si.mu.Unlock() + + unSubscribeF := func() { + si.mu.Lock() + if sub, ok := si.updateSubs[subId]; ok { + sub.cancel() + delete(si.updateSubs, subId) + } + si.mu.Unlock() + } + + return ch, unSubscribeF +} + +func (si *SqliteIndexer) notifyUpdateSubs() { + si.mu.Lock() + tSubs := make([]*updateSub, 0, len(si.updateSubs)) + for _, tSub := range si.updateSubs { + tSub := tSub + tSubs = append(tSubs, tSub) + } + si.mu.Unlock() + + for _, tSub := range tSubs { + tSub := tSub + select { + case tSub.ch <- chainIndexUpdated{}: + case <-tSub.ctx.Done(): + // subscription was cancelled, ignore + case <-si.ctx.Done(): + return + } + } +} diff --git a/chain/index/read.go b/chain/index/read.go new file mode 100644 index 00000000000..d7c00bd35cf --- /dev/null +++ b/chain/index/read.go @@ -0,0 +1,133 @@ +package index + +import ( + "context" + "database/sql" + "time" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/types/ethtypes" +) + +const headIndexedWaitTimeout = 5 * time.Second + +func (si *SqliteIndexer) GetCidFromHash(ctx context.Context, txHash ethtypes.EthHash) (cid.Cid, error) { + if si.isClosed() { + return cid.Undef, ErrClosed + } + + var msgCidBytes []byte + + if err := si.readWithHeadIndexWait(ctx, func() error { + return si.queryMsgCidFromEthHash(ctx, txHash, &msgCidBytes) + }); err != nil { + return cid.Undef, err + } + + msgCid, err := cid.Cast(msgCidBytes) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to cast message CID: %w", err) + } + + return msgCid, nil +} + +func (si *SqliteIndexer) queryMsgCidFromEthHash(ctx context.Context, txHash ethtypes.EthHash, msgCidBytes *[]byte) error { + return si.stmts.getMsgCidFromEthHashStmt.QueryRowContext(ctx, txHash.String()).Scan(msgCidBytes) +} + +func (si *SqliteIndexer) GetMsgInfo(ctx context.Context, messageCid cid.Cid) (*MsgInfo, error) { + if si.isClosed() { + return nil, ErrClosed + } + + var tipsetKeyCidBytes []byte + var height int64 + + if err := si.queryMsgInfo(ctx, messageCid, &tipsetKeyCidBytes, &height); err != nil { + return nil, err + } + + tipsetKey, err := cid.Cast(tipsetKeyCidBytes) + if err != nil { + return nil, xerrors.Errorf("failed to cast tipset key cid: %w", err) + } + + return &MsgInfo{ + Message: messageCid, + TipSet: tipsetKey, + Epoch: abi.ChainEpoch(height), + }, nil +} + +// This function attempts to read data using the provided readFunc. +// If the initial read returns no rows, it waits for the head to be indexed +// and tries again. This ensures that the most up-to-date data is checked. +// If no data is found after the second attempt, it returns ErrNotFound. +func (si *SqliteIndexer) readWithHeadIndexWait(ctx context.Context, readFunc func() error) error { + err := readFunc() + if err == sql.ErrNoRows { + // not found, but may be in latest head, so wait for it and check again + if err := si.waitTillHeadIndexed(ctx); err != nil { + return xerrors.Errorf("failed while waiting for head to be indexed: %w", err) + } + err = readFunc() + } + + if err != nil { + if err == sql.ErrNoRows { + return ErrNotFound + } + return xerrors.Errorf("failed to read data from index: %w", err) + } + + return nil +} + +func (si *SqliteIndexer) queryMsgInfo(ctx context.Context, messageCid cid.Cid, tipsetKeyCidBytes *[]byte, height *int64) error { + return si.stmts.getNonRevertedMsgInfoStmt.QueryRowContext(ctx, messageCid.Bytes()).Scan(tipsetKeyCidBytes, height) +} + +func (si *SqliteIndexer) waitTillHeadIndexed(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, headIndexedWaitTimeout) + defer cancel() + + head := si.cs.GetHeaviestTipSet() + headTsKeyCidBytes, err := toTipsetKeyCidBytes(head) + if err != nil { + return xerrors.Errorf("failed to get tipset key cid: %w", err) + } + + // wait till it is indexed + subCh, unsubFn := si.subscribeUpdates() + defer unsubFn() + + for ctx.Err() == nil { + exists, err := si.isTipsetIndexed(ctx, headTsKeyCidBytes) + if err != nil { + return xerrors.Errorf("failed to check if tipset exists: %w", err) + } else if exists { + return nil + } + + select { + case <-subCh: + // Continue to next iteration to check again + case <-ctx.Done(): + return ctx.Err() + } + } + return ctx.Err() +} + +func (si *SqliteIndexer) isTipsetIndexed(ctx context.Context, tsKeyCidBytes []byte) (bool, error) { + var exists bool + if err := si.stmts.hasTipsetStmt.QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { + return false, xerrors.Errorf("failed to check if tipset is indexed: %w", err) + } + return exists, nil +} diff --git a/chain/index/read_test.go b/chain/index/read_test.go new file mode 100644 index 00000000000..ebb6d1acf41 --- /dev/null +++ b/chain/index/read_test.go @@ -0,0 +1,292 @@ +package index + +import ( + "context" + "errors" + pseudo "math/rand" + "sync" + "testing" + "time" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/ethtypes" +) + +func TestGetCidFromHash(t *testing.T) { + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + ctx := context.Background() + + s, _, _ := setupWithHeadIndexed(t, 10, rng) + + ethTxHash := ethtypes.EthHash([32]byte{1}) + msgCid := randomCid(t, rng) + + // read from empty db -> ErrNotFound + c, err := s.GetCidFromHash(ctx, ethTxHash) + require.Error(t, err) + require.True(t, errors.Is(err, ErrNotFound)) + require.EqualValues(t, cid.Undef, c) + + // insert and read + insertEthTxHash(t, s, ethTxHash, msgCid) + c, err = s.GetCidFromHash(ctx, ethTxHash) + require.NoError(t, err) + require.EqualValues(t, msgCid, c) + + // look up some other hash -> fails + c, err = s.GetCidFromHash(ctx, ethtypes.EthHash([32]byte{2})) + require.Error(t, err) + require.True(t, errors.Is(err, ErrNotFound)) + require.EqualValues(t, cid.Undef, c) +} + +func TestGetMsgInfo(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + s, _, _ := setupWithHeadIndexed(t, 10, rng) + msgCid := randomCid(t, rng) + msgCidBytes := msgCid.Bytes() + tsKeyCid := randomCid(t, rng) + + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: tsKeyCid.Bytes(), + height: uint64(1), + reverted: false, + messageCid: msgCidBytes, + messageIndex: 1, + }) + + mi, err := s.GetMsgInfo(ctx, msgCid) + require.NoError(t, err) + require.Equal(t, msgCid, mi.Message) + require.Equal(t, tsKeyCid, mi.TipSet) + require.Equal(t, abi.ChainEpoch(1), mi.Epoch) +} + +func setupWithHeadIndexed(t *testing.T, headHeight abi.ChainEpoch, rng *pseudo.Rand) (*SqliteIndexer, *types.TipSet, *dummyChainStore) { + head := fakeTipSet(t, rng, headHeight, []cid.Cid{}) + d := newDummyChainStore() + d.SetHeaviestTipSet(head) + + s, err := NewSqliteIndexer(":memory:", d, 0, false, 0) + require.NoError(t, err) + insertHead(t, s, head, headHeight) + + return s, head, d +} + +func insertHead(t *testing.T, s *SqliteIndexer, head *types.TipSet, height abi.ChainEpoch) { + headKeyBytes, err := toTipsetKeyCidBytes(head) + require.NoError(t, err) + + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: headKeyBytes, + height: uint64(height), + reverted: false, + messageCid: nil, + messageIndex: -1, + }) +} + +func insertEthTxHash(t *testing.T, s *SqliteIndexer, ethTxHash ethtypes.EthHash, messageCid cid.Cid) { + msgCidBytes := messageCid.Bytes() + + res, err := s.stmts.insertEthTxHashStmt.Exec(ethTxHash.String(), msgCidBytes) + require.NoError(t, err) + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) +} + +type dummyChainStore struct { + mu sync.RWMutex + + heightToTipSet map[abi.ChainEpoch]*types.TipSet + messagesForTipset map[*types.TipSet][]types.ChainMsg + keyToTipSet map[types.TipSetKey]*types.TipSet + tipsetCidToTipset map[cid.Cid]*types.TipSet + + heaviestTipSet *types.TipSet + messagesForBlock func(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) + actorStore func(ctx context.Context) adt.Store +} + +func newDummyChainStore() *dummyChainStore { + return &dummyChainStore{ + heightToTipSet: make(map[abi.ChainEpoch]*types.TipSet), + messagesForTipset: make(map[*types.TipSet][]types.ChainMsg), + keyToTipSet: make(map[types.TipSetKey]*types.TipSet), + tipsetCidToTipset: make(map[cid.Cid]*types.TipSet), + } +} + +func (d *dummyChainStore) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + msgs, ok := d.messagesForTipset[ts] + if !ok { + return nil, nil + } + return msgs, nil +} + +func (d *dummyChainStore) GetHeaviestTipSet() *types.TipSet { + d.mu.RLock() + defer d.mu.RUnlock() + return d.heaviestTipSet +} + +func (d *dummyChainStore) GetTipSetByCid(_ context.Context, tsKeyCid cid.Cid) (*types.TipSet, error) { + d.mu.RLock() + defer d.mu.RUnlock() + if _, ok := d.tipsetCidToTipset[tsKeyCid]; !ok { + return nil, errors.New("not found") + } + return d.tipsetCidToTipset[tsKeyCid], nil +} + +func (d *dummyChainStore) SetTipSetByCid(t *testing.T, ts *types.TipSet) { + d.mu.Lock() + defer d.mu.Unlock() + + tsKeyCid, err := ts.Key().Cid() + require.NoError(t, err) + d.tipsetCidToTipset[tsKeyCid] = ts +} + +func (d *dummyChainStore) GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + return d.keyToTipSet[tsk], nil +} + +func (d *dummyChainStore) MessagesForBlock(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { + d.mu.RLock() + defer d.mu.RUnlock() + if d.messagesForBlock != nil { + return d.messagesForBlock(ctx, b) + } + return nil, nil, nil +} + +func (d *dummyChainStore) ActorStore(ctx context.Context) adt.Store { + d.mu.RLock() + defer d.mu.RUnlock() + if d.actorStore != nil { + return d.actorStore(ctx) + } + return nil +} + +func (d *dummyChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, _ *types.TipSet, prev bool) (*types.TipSet, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + ts, ok := d.heightToTipSet[h] + if !ok { + return nil, errors.New("tipset not found") + } + return ts, nil +} + +func (d *dummyChainStore) IsStoringEvents() bool { + return true +} + +// Setter methods to configure the mock + +func (d *dummyChainStore) SetMessagesForTipset(ts *types.TipSet, msgs []types.ChainMsg) { + d.mu.Lock() + defer d.mu.Unlock() + d.messagesForTipset[ts] = msgs +} + +func (d *dummyChainStore) SetHeaviestTipSet(ts *types.TipSet) { + d.mu.Lock() + defer d.mu.Unlock() + d.heaviestTipSet = ts +} + +func (d *dummyChainStore) SetTipsetByHeightAndKey(h abi.ChainEpoch, tsk types.TipSetKey, ts *types.TipSet) { + d.mu.Lock() + defer d.mu.Unlock() + + d.heightToTipSet[h] = ts + d.keyToTipSet[tsk] = ts +} + +func randomIDAddr(tb testing.TB, rng *pseudo.Rand) address.Address { + tb.Helper() + addr, err := address.NewIDAddress(uint64(rng.Int63())) + require.NoError(tb, err) + return addr +} + +func randomCid(tb testing.TB, rng *pseudo.Rand) cid.Cid { + tb.Helper() + cb := cid.V1Builder{Codec: cid.Raw, MhType: mh.IDENTITY} + c, err := cb.Sum(randomBytes(10, rng)) + require.NoError(tb, err) + return c +} + +func randomBytes(n int, rng *pseudo.Rand) []byte { + buf := make([]byte, n) + rng.Read(buf) + return buf +} + +func fakeTipSet(tb testing.TB, rng *pseudo.Rand, h abi.ChainEpoch, parents []cid.Cid) *types.TipSet { + tb.Helper() + ts, err := types.NewTipSet([]*types.BlockHeader{ + { + Height: h, + Miner: randomIDAddr(tb, rng), + + Parents: parents, + + Ticket: &types.Ticket{VRFProof: []byte{byte(h % 2)}}, + + ParentStateRoot: randomCid(tb, rng), + Messages: randomCid(tb, rng), + ParentMessageReceipts: randomCid(tb, rng), + + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + }, + { + Height: h, + Miner: randomIDAddr(tb, rng), + + Parents: parents, + + Ticket: &types.Ticket{VRFProof: []byte{byte((h + 1) % 2)}}, + + ParentStateRoot: randomCid(tb, rng), + Messages: randomCid(tb, rng), + ParentMessageReceipts: randomCid(tb, rng), + + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + }, + }) + + require.NoError(tb, err) + + return ts +} diff --git a/chain/index/reconcile.go b/chain/index/reconcile.go new file mode 100644 index 00000000000..72a1e6ecaa6 --- /dev/null +++ b/chain/index/reconcile.go @@ -0,0 +1,276 @@ +package index + +import ( + "context" + "database/sql" + + ipld "github.com/ipfs/go-ipld-format" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" +) + +// ReconcileWithChain ensures that the index is consistent with the current chain state. +// It performs the following steps: +// 1. Checks if the index is empty. If so, it returns immediately as there's nothing to reconcile. +// 2. Finds the lowest non-reverted height in the index. +// 3. Walks backwards from the current chain head until it finds a tipset that exists +// in the index and is not marked as reverted. +// 4. Sets a boundary epoch just above this found tipset. +// 5. Marks all tipsets above this boundary as reverted, ensuring consistency with the current chain state. +// 6. Applies all missing un-indexed tipsets starting from the last matching tipset b/w index and canonical chain +// to the current chain head. +// +// This function is crucial for maintaining index integrity, especially after chain reorgs. +// It ensures that the index accurately reflects the current state of the blockchain. +func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.TipSet) error { + if !si.cs.IsStoringEvents() { + log.Warn("chain indexer is not storing events during reconciliation; please ensure this is intentional") + } + + if si.isClosed() { + return ErrClosed + } + + if head == nil { + return nil + } + + return withTx(ctx, si.db, func(tx *sql.Tx) error { + var isIndexEmpty bool + err := tx.StmtContext(ctx, si.stmts.isIndexEmptyStmt).QueryRowContext(ctx).Scan(&isIndexEmpty) + if err != nil { + return xerrors.Errorf("failed to check if index is empty: %w", err) + } + + if isIndexEmpty && !si.reconcileEmptyIndex { + log.Info("chain index is empty and reconcileEmptyIndex is disabled; skipping reconciliation") + return nil + } + + if isIndexEmpty { + log.Info("chain index is empty; backfilling from head") + return si.backfillIndex(ctx, tx, head, 0) + } + + reconciliationEpoch, err := si.getReconciliationEpoch(ctx, tx) + if err != nil { + return xerrors.Errorf("failed to get reconciliation epoch: %w", err) + } + + currTs := head + + log.Infof("starting chain reconciliation from head height %d; reconciliation epoch is %d", head.Height(), reconciliationEpoch) + + // The goal here is to walk the canonical chain backwards from head until we find a matching non-reverted tipset + // in the db so we know where to start reconciliation from + // All tipsets that exist in the DB but not in the canonical chain are then marked as reverted + // All tipsets that exist in the canonical chain but not in the db are then applied + + // we only need to walk back as far as the reconciliation epoch as all the tipsets in the index + // below the reconciliation epoch are already marked as reverted because the reconciliation epoch + // is the minimum non-reverted height in the index + for currTs != nil && currTs.Height() >= reconciliationEpoch { + tsKeyCidBytes, err := toTipsetKeyCidBytes(currTs) + if err != nil { + return xerrors.Errorf("failed to compute tipset cid: %w", err) + } + + var exists bool + err = tx.StmtContext(ctx, si.stmts.hasNonRevertedTipsetStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists) + if err != nil { + return xerrors.Errorf("failed to check if tipset exists and is not reverted: %w", err) + } + + if exists { + // found it! + reconciliationEpoch = currTs.Height() + 1 + log.Infof("found matching tipset at height %d, setting reconciliation epoch to %d", currTs.Height(), reconciliationEpoch) + break + } + + if currTs.Height() == 0 { + log.Infof("ReconcileWithChain reached genesis but no matching tipset found in index") + break + } + + parents := currTs.Parents() + currTs, err = si.cs.GetTipSetFromKey(ctx, parents) + if err != nil { + return xerrors.Errorf("failed to walk chain: %w", err) + } + } + + if currTs.Height() == 0 { + log.Warn("ReconcileWithChain reached genesis without finding matching tipset") + } + + // mark all tipsets from the reconciliation epoch onwards in the Index as reverted as they are not in the current canonical chain + log.Infof("Marking tipsets as reverted from height %d", reconciliationEpoch) + result, err := tx.StmtContext(ctx, si.stmts.updateTipsetsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch)) + if err != nil { + return xerrors.Errorf("failed to mark tipsets as reverted: %w", err) + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return xerrors.Errorf("failed to get number of rows affected: %w", err) + } + + // also need to mark events as reverted for the corresponding inclusion tipsets + if _, err = tx.StmtContext(ctx, si.stmts.updateEventsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch-1)); err != nil { + return xerrors.Errorf("failed to mark events as reverted: %w", err) + } + + log.Infof("marked %d tipsets as reverted from height %d", rowsAffected, reconciliationEpoch) + + // if the head is less than the reconciliation epoch, we don't need to index any tipsets as we're already caught up + if head.Height() < reconciliationEpoch { + log.Info("no missing tipsets to index; index is already caught up with chain") + return nil + } + + // apply all missing tipsets by walking the chain backwards starting from head upto the reconciliation epoch + log.Infof("indexing missing tipsets backwards from head height %d to reconciliation epoch %d", head.Height(), reconciliationEpoch) + + // if head.Height == reconciliationEpoch, this will only index head and return + if err := si.backfillIndex(ctx, tx, head, reconciliationEpoch); err != nil { + return xerrors.Errorf("failed to backfill index: %w", err) + } + + return nil + }) +} + +func (si *SqliteIndexer) getReconciliationEpoch(ctx context.Context, tx *sql.Tx) (abi.ChainEpoch, error) { + var reconciliationEpochInIndex sql.NullInt64 + + err := tx.StmtContext(ctx, si.stmts.getMinNonRevertedHeightStmt). + QueryRowContext(ctx). + Scan(&reconciliationEpochInIndex) + + if err != nil { + if err == sql.ErrNoRows { + log.Warn("index only contains reverted tipsets; setting reconciliation epoch to 0") + return 0, nil + } + return 0, xerrors.Errorf("failed to scan minimum non-reverted height: %w", err) + } + + if !reconciliationEpochInIndex.Valid { + log.Warn("index only contains reverted tipsets; setting reconciliation epoch to 0") + return 0, nil + } + + return abi.ChainEpoch(reconciliationEpochInIndex.Int64), nil +} + +// backfillIndex backfills the chain index with missing tipsets starting from the given head tipset +// and stopping after the specified stopAfter epoch (inclusive). +// +// The behavior of this function depends on the relationship between head.Height and stopAfter: +// +// 1. If head.Height > stopAfter: +// - The function will apply missing tipsets from head.Height down to stopAfter (inclusive). +// - It will stop applying tipsets if the maximum number of tipsets to apply (si.maxReconcileTipsets) is reached. +// - If the chain store only contains data up to a certain height, the function will stop backfilling at that height. +// +// 2. If head.Height == stopAfter: +// - The function will only apply the head tipset and then return. +// +// 3. If head.Height < stopAfter: +// - The function will immediately return without applying any tipsets. +// +// The si.maxReconcileTipsets parameter is used to limit the maximum number of tipsets that can be applied during the backfill process. +// If the number of applied tipsets reaches si.maxReconcileTipsets, the function will stop backfilling and return. +// +// The function also logs progress information at regular intervals (every builtin.EpochsInDay) to provide visibility into the backfill process. +func (si *SqliteIndexer) backfillIndex(ctx context.Context, tx *sql.Tx, head *types.TipSet, stopAfter abi.ChainEpoch) error { + if head.Height() < stopAfter { + return nil + } + + currTs := head + totalApplied := uint64(0) + lastLoggedEpoch := head.Height() + + log.Infof("backfilling chain index backwards starting from head height %d", head.Height()) + + // Calculate the actual number of tipsets to apply + totalTipsetsToApply := min(uint64(head.Height()-stopAfter+1), si.maxReconcileTipsets) + + for currTs != nil { + if totalApplied >= si.maxReconcileTipsets { + log.Infof("reached maximum number of tipsets to apply (%d), finishing backfill; backfill applied %d tipsets", + si.maxReconcileTipsets, totalApplied) + return nil + } + + err := si.applyMissingTipset(ctx, tx, currTs) + if err != nil { + if ipld.IsNotFound(err) { + log.Infof("stopping backfill at height %d as chain store only contains data up to this height as per error %s; backfill applied %d tipsets", + currTs.Height(), err, totalApplied) + return nil + } + + return xerrors.Errorf("failed to apply tipset at height %d: %w", currTs.Height(), err) + } + + totalApplied++ + + if lastLoggedEpoch-currTs.Height() >= builtin.EpochsInDay { + progress := float64(totalApplied) / float64(totalTipsetsToApply) * 100 + log.Infof("backfill progress: %.2f%% complete (%d out of %d tipsets applied), ongoing", progress, totalApplied, totalTipsetsToApply) + lastLoggedEpoch = currTs.Height() + } + + if currTs.Height() == 0 { + log.Infof("reached genesis tipset and have backfilled everything up to genesis; backfilled %d tipsets", totalApplied) + return nil + } + + if currTs.Height() <= stopAfter { + log.Infof("reached stop height %d; backfilled %d tipsets", stopAfter, totalApplied) + return nil + } + + currTs, err = si.cs.GetTipSetFromKey(ctx, currTs.Parents()) + if err != nil { + return xerrors.Errorf("failed to walk chain at height %d: %w", currTs.Height(), err) + } + } + + log.Infof("applied %d tipsets during backfill", totalApplied) + return nil +} + +// applyMissingTipset indexes a single missing tipset and its parent events +// It's a simplified version of applyMissingTipsets, handling one tipset at a time +func (si *SqliteIndexer) applyMissingTipset(ctx context.Context, tx *sql.Tx, currTs *types.TipSet) error { + if currTs == nil { + return xerrors.Errorf("failed to apply missing tipset: tipset is nil") + } + + // Special handling for genesis tipset + if currTs.Height() == 0 { + if err := si.indexTipset(ctx, tx, currTs); err != nil { + return xerrors.Errorf("failed to index genesis tipset: %w", err) + } + return nil + } + + parentTs, err := si.cs.GetTipSetFromKey(ctx, currTs.Parents()) + if err != nil { + return xerrors.Errorf("failed to get parent tipset: %w", err) + } + + // Index the tipset along with its parent events + if err := si.indexTipsetWithParentEvents(ctx, tx, parentTs, currTs); err != nil { + return xerrors.Errorf("failed to index tipset with parent events: %w", err) + } + + return nil +} diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go index 8c022755371..1c9183731f3 100644 --- a/chain/stmgr/forks_test.go +++ b/chain/stmgr/forks_test.go @@ -36,7 +36,6 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/stmgr" . "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" @@ -169,7 +168,7 @@ func TestForkHeightTriggers(t *testing.T) { } return st.Flush(ctx) - }}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex) + }}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), nil) if err != nil { t.Fatal(err) } @@ -287,7 +286,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) { root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { migrationCount++ return root, nil - }}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex) + }}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), nil) if err != nil { t.Fatal(err) } @@ -519,7 +518,7 @@ func TestForkPreMigration(t *testing.T) { return nil }, }}}, - }, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex) + }, cg.BeaconSchedule(), datastore.NewMapDatastore(), nil) if err != nil { t.Fatal(err) } @@ -594,7 +593,7 @@ func TestDisablePreMigration(t *testing.T) { }, cg.BeaconSchedule(), datastore.NewMapDatastore(), - index.DummyMsgIndex, + nil, ) require.NoError(t, err) require.NoError(t, sm.Start(context.Background())) @@ -649,7 +648,7 @@ func TestMigrtionCache(t *testing.T) { }, cg.BeaconSchedule(), metadataDs, - index.DummyMsgIndex, + nil, ) require.NoError(t, err) require.NoError(t, sm.Start(context.Background())) @@ -702,7 +701,7 @@ func TestMigrtionCache(t *testing.T) { }, cg.BeaconSchedule(), metadataDs, - index.DummyMsgIndex, + nil, ) require.NoError(t, err) sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) { diff --git a/chain/stmgr/searchwait.go b/chain/stmgr/searchwait.go index 3377389b9b6..288a5bfd082 100644 --- a/chain/stmgr/searchwait.go +++ b/chain/stmgr/searchwait.go @@ -190,7 +190,10 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet } func (sm *StateManager) searchForIndexedMsg(ctx context.Context, mcid cid.Cid, m types.ChainMsg) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { - minfo, err := sm.msgIndex.GetMsgInfo(ctx, mcid) + if sm.chainIndexer == nil { + return nil, nil, cid.Undef, index.ErrNotFound + } + minfo, err := sm.chainIndexer.GetMsgInfo(ctx, mcid) if err != nil { return nil, nil, cid.Undef, xerrors.Errorf("error looking up message in index: %w", err) } diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index 49be6fdaec4..5b227fe922e 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -156,7 +156,7 @@ type StateManager struct { tsExecMonitor ExecMonitor beacon beacon.Schedule - msgIndex index.MsgIndex + chainIndexer index.Indexer // We keep a small cache for calls to ExecutionTrace which helps improve // performance for node operators like exchanges and block explorers @@ -177,7 +177,8 @@ type tipSetCacheEntry struct { invocTrace []*api.InvocResult } -func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, metadataDs dstore.Batching, msgIndex index.MsgIndex) (*StateManager, error) { +func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, + metadataDs dstore.Batching, chainIndexer index.Indexer) (*StateManager, error) { // If we have upgrades, make sure they're in-order and make sense. if err := us.Validate(); err != nil { return nil, err @@ -242,13 +243,13 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, tree: nil, }, compWait: make(map[string]chan struct{}), - msgIndex: msgIndex, + chainIndexer: chainIndexer, execTraceCache: execTraceCache, }, nil } -func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching, msgIndex index.MsgIndex) (*StateManager, error) { - sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs, msgIndex) +func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching, chainIndexer index.Indexer) (*StateManager, error) { + sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs, chainIndexer) if err != nil { return nil, err } diff --git a/chain/store/store_test.go b/chain/store/store_test.go index 1ecfc474a02..81569149c01 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -19,7 +19,6 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" @@ -216,7 +215,7 @@ func TestChainExportImportFull(t *testing.T) { t.Fatal("imported chain differed from exported chain") } - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), ds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), ds, nil) if err != nil { t.Fatal(err) } diff --git a/chain/types/index.go b/chain/types/index.go new file mode 100644 index 00000000000..93c44ad2e43 --- /dev/null +++ b/chain/types/index.go @@ -0,0 +1,23 @@ +package types + +import ( + "github.com/filecoin-project/go-state-types/abi" +) + +// IndexValidation contains detailed information about the validation status of a specific chain epoch. +type IndexValidation struct { + // TipSetKey is the key of the canonical tipset for this epoch. + TipSetKey TipSetKey + // Height is the epoch height at which the validation is performed. + Height abi.ChainEpoch + // IndexedMessagesCount is the number of indexed messages for the canonical tipset at this epoch. + IndexedMessagesCount uint64 + // IndexedEventsCount is the number of indexed events for the canonical tipset at this epoch. + IndexedEventsCount uint64 + // IndexedEventEntriesCount is the number of indexed event entries for the canonical tipset at this epoch. + IndexedEventEntriesCount uint64 + // Backfilled denotes whether missing data was successfully backfilled into the index during validation. + Backfilled bool + // IsNullRound indicates if the epoch corresponds to a null round and therefore does not have any indexed messages or events. + IsNullRound bool +} diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index 500ef4af3ed..46f2411bf86 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -35,7 +35,6 @@ import ( badgerbs "github.com/filecoin-project/lotus/blockstore/badger" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/proofs" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/stmgr" @@ -229,7 +228,8 @@ var importBenchCmd = &cli.Command{ defer cs.Close() //nolint:errcheck // TODO: We need to supply the actual beacon after v14 - stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil, metadataDs, index.DummyMsgIndex) + stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil, + metadataDs, nil) if err != nil { return err } diff --git a/cmd/lotus-shed/balances.go b/cmd/lotus-shed/balances.go index 8d003950fe1..b5b55a8ce18 100644 --- a/cmd/lotus-shed/balances.go +++ b/cmd/lotus-shed/balances.go @@ -35,7 +35,6 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/gen/genesis" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" @@ -514,7 +513,7 @@ var chainBalanceStateCmd = &cli.Command{ cst := cbor.NewCborStore(bs) store := adt.WrapStore(ctx, cst) - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, nil) if err != nil { return err } @@ -738,7 +737,8 @@ var chainPledgeCmd = &cli.Command{ cst := cbor.NewCborStore(bs) store := adt.WrapStore(ctx, cst) - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), + vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, nil) if err != nil { return err } diff --git a/cmd/lotus-shed/chain_index.go b/cmd/lotus-shed/chain_index.go new file mode 100644 index 00000000000..53cc06d2b61 --- /dev/null +++ b/cmd/lotus-shed/chain_index.go @@ -0,0 +1,213 @@ +package main + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + + lcli "github.com/filecoin-project/lotus/cli" +) + +var chainIndexCmds = &cli.Command{ + Name: "chainindex", + Usage: "Commands related to managing the chainindex", + Subcommands: []*cli.Command{ + validateBackfillChainIndexCmd, + }, +} + +var validateBackfillChainIndexCmd = &cli.Command{ + Name: "validate-backfill", + Usage: "Validates and optionally backfills the chainindex for a range of epochs", + Description: ` +lotus-shed chainindex validate-backfill --from --to [--backfill] [--log-good] [--quiet] + +The command validates the chain index entries for each epoch in the specified range, checking for missing or +inconsistent entries (i.e. the indexed data does not match the actual chain state). If '--backfill' is enabled +(which it is by default), it will attempt to backfill any missing entries using the 'ChainValidateIndex' API. + +Error conditions: + - If 'from' or 'to' are invalid (<=0 or 'to' > 'from'), an error is returned. + - If the 'ChainValidateIndex' API returns an error for an epoch, indicating an inconsistency between the index + and chain state, an error message is logged for that epoch. + +Logging: + - Progress is logged every 2880 epochs (1 day worth of epochs) processed during the validation process. + - If '--log-good' is enabled, details are also logged for each epoch that has no detected problems. This includes: + - Null rounds with no messages/events. + - Epochs with a valid indexed entry. + - If --quiet is enabled, only errors are logged, unless --log-good is also enabled, in which case good tipsets + are also logged. + +Example usage: + +To validate and backfill the chain index for the last 5760 epochs (2 days) and log details for all epochs: + +lotus-shed chainindex validate-backfill --from 1000000 --to 994240 --log-good + +This command is useful for backfilling the chain index over a range of historical epochs during the migration to +the new ChainIndexer. It can also be run periodically to validate the index's integrity using system schedulers +like cron. + +If there are any errors during the validation process, the command will exit with a non-zero status and log the +number of failed RPC calls. Otherwise, it will exit with a zero status. + `, + Flags: []cli.Flag{ + &cli.IntFlag{ + Name: "from", + Usage: "from specifies the starting tipset epoch for validation (inclusive)", + Required: true, + }, + &cli.IntFlag{ + Name: "to", + Usage: "to specifies the ending tipset epoch for validation (inclusive)", + Required: true, + }, + &cli.BoolFlag{ + Name: "backfill", + Usage: "backfill determines whether to backfill missing index entries during validation (default: true)", + Value: true, + }, + &cli.BoolFlag{ + Name: "log-good", + Usage: "log tipsets that have no detected problems", + Value: false, + }, + &cli.BoolFlag{ + Name: "quiet", + Usage: "suppress output except for errors (or good tipsets if log-good is enabled)", + }, + }, + Action: func(cctx *cli.Context) error { + srv, err := lcli.GetFullNodeServices(cctx) + if err != nil { + return xerrors.Errorf("failed to get full node services: %w", err) + } + defer func() { + if closeErr := srv.Close(); closeErr != nil { + log.Errorf("error closing services: %w", closeErr) + } + }() + + api := srv.FullNodeAPI() + ctx := lcli.ReqContext(cctx) + + fromEpoch := cctx.Int("from") + if fromEpoch <= 0 { + return xerrors.Errorf("invalid from epoch: %d, must be greater than 0", fromEpoch) + } + + toEpoch := cctx.Int("to") + if toEpoch <= 0 { + return xerrors.Errorf("invalid to epoch: %d, must be greater than 0", toEpoch) + } + if toEpoch > fromEpoch { + return xerrors.Errorf("to epoch (%d) must be less than or equal to from epoch (%d)", toEpoch, fromEpoch) + } + + head, err := api.ChainHead(ctx) + if err != nil { + return xerrors.Errorf("failed to get chain head: %w", err) + } + if head.Height() <= abi.ChainEpoch(fromEpoch) { + return xerrors.Errorf("from epoch (%d) must be less than chain head (%d)", fromEpoch, head.Height()) + } + + backfill := cctx.Bool("backfill") + + // Results Tracking + logGood := cctx.Bool("log-good") + + quiet := cctx.Bool("quiet") + + failedRPCs := 0 + successfulBackfills := 0 + successfulValidations := 0 + successfulNullRounds := 0 + + startTime := time.Now() + if !quiet { + _, _ = fmt.Fprintf(cctx.App.Writer, "%s starting chainindex validation; from epoch: %d; to epoch: %d; backfill: %t; log-good: %t\n", currentTimeString(), + fromEpoch, toEpoch, backfill, logGood) + } + totalEpochs := fromEpoch - toEpoch + 1 + haltHeight := -1 + + for epoch := fromEpoch; epoch >= toEpoch; epoch-- { + if ctx.Err() != nil { + return ctx.Err() + } + + if (fromEpoch-epoch+1)%2880 == 0 || epoch == toEpoch { + progress := float64(fromEpoch-epoch+1) / float64(totalEpochs) * 100 + elapsed := time.Since(startTime) + if !quiet { + _, _ = fmt.Fprintf(cctx.App.ErrWriter, "%s -------- Chain index validation progress: %.2f%%; Time elapsed: %s\n", + currentTimeString(), progress, elapsed) + } + } + + indexValidateResp, err := api.ChainValidateIndex(ctx, abi.ChainEpoch(epoch), backfill) + if err != nil { + if strings.Contains(err.Error(), "chain store does not contain data") { + haltHeight = epoch + break + } + + _, _ = fmt.Fprintf(cctx.App.Writer, "%s ✗ Epoch %d; failure: %s\n", currentTimeString(), epoch, err) + failedRPCs++ + continue + } + + if indexValidateResp.Backfilled { + successfulBackfills++ + } else if indexValidateResp.IsNullRound { + successfulNullRounds++ + } else { + successfulValidations++ + } + + if !logGood { + continue + } + + if indexValidateResp.IsNullRound { + _, _ = fmt.Fprintf(cctx.App.Writer, "%s ✓ Epoch %d; null round\n", currentTimeString(), epoch) + } else { + jsonData, err := json.Marshal(indexValidateResp) + if err != nil { + return fmt.Errorf("failed to marshal results to JSON: %w", err) + } + + _, _ = fmt.Fprintf(cctx.App.Writer, "%s ✓ Epoch %d (%s)\n", currentTimeString(), epoch, string(jsonData)) + } + } + + if !quiet { + _, _ = fmt.Fprintf(cctx.App.Writer, "\n%s Chain index validation summary:\n", currentTimeString()) + _, _ = fmt.Fprintf(cctx.App.Writer, "Total failed RPC calls: %d\n", failedRPCs) + _, _ = fmt.Fprintf(cctx.App.Writer, "Total successful backfills: %d\n", successfulBackfills) + _, _ = fmt.Fprintf(cctx.App.Writer, "Total successful validations without backfilling: %d\n", successfulValidations) + _, _ = fmt.Fprintf(cctx.App.Writer, "Total successful Null round validations: %d\n", successfulNullRounds) + } + + if haltHeight >= 0 { + return fmt.Errorf("chain index validation and backfilled halted at height %d as chain state does contain data for that height", haltHeight) + } else if failedRPCs > 0 { + return fmt.Errorf("chain index validation failed with %d RPC errors", failedRPCs) + } + + return nil + }, +} + +func currentTimeString() string { + currentTime := time.Now().Format("2006-01-02 15:04:05.000") + return currentTime +} diff --git a/cmd/lotus-shed/gas-estimation.go b/cmd/lotus-shed/gas-estimation.go index 85573ddb9a0..b1c61b62f2c 100644 --- a/cmd/lotus-shed/gas-estimation.go +++ b/cmd/lotus-shed/gas-estimation.go @@ -19,7 +19,6 @@ import ( "github.com/filecoin-project/lotus/chain/beacon/drand" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -107,7 +106,8 @@ var gasTraceCmd = &cli.Command{ cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) defer cs.Close() //nolint:errcheck - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), + shd, mds, nil) if err != nil { return err } @@ -203,7 +203,8 @@ var replayOfflineCmd = &cli.Command{ cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) defer cs.Close() //nolint:errcheck - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), + shd, mds, nil) if err != nil { return err } diff --git a/cmd/lotus-shed/indexes.go b/cmd/lotus-shed/indexes.go deleted file mode 100644 index e1c9be182fe..00000000000 --- a/cmd/lotus-shed/indexes.go +++ /dev/null @@ -1,1081 +0,0 @@ -package main - -import ( - "context" - "database/sql" - "fmt" - "math" - "path" - "path/filepath" - "strings" - "time" - - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/mitchellh/go-homedir" - "github.com/urfave/cli/v2" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - amt4 "github.com/filecoin-project/go-amt-ipld/v4" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/exitcode" - - lapi "github.com/filecoin-project/lotus/api" - bstore "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/types/ethtypes" - lcli "github.com/filecoin-project/lotus/cli" -) - -const ( - // same as in chain/events/index.go - eventExists = `SELECT MAX(id) FROM event WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?` - eventCount = `SELECT COUNT(*) FROM event WHERE tipset_key_cid=?` - entryCount = `SELECT COUNT(*) FROM event_entry JOIN event ON event_entry.event_id=event.id WHERE event.tipset_key_cid=?` - insertEvent = `INSERT OR IGNORE INTO event(height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)` - insertEntry = `INSERT OR IGNORE INTO event_entry(event_id, indexed, flags, key, codec, value) VALUES(?, ?, ?, ?, ?, ?)` - upsertEventsSeen = `INSERT INTO events_seen(height, tipset_key_cid, reverted) VALUES(?, ?, false) ON CONFLICT(height, tipset_key_cid) DO UPDATE SET reverted=false` - tipsetSeen = `SELECT height,reverted FROM events_seen WHERE tipset_key_cid=?` - - // these queries are used to extract just the information used to reconstruct the event AMT from the database - selectEventIdAndEmitter = `SELECT id, emitter_addr FROM event WHERE tipset_key_cid=? and message_cid=? ORDER BY event_index ASC` - selectEventEntries = `SELECT flags, key, codec, value FROM event_entry WHERE event_id=? ORDER BY _rowid_ ASC` -) - -func withCategory(cat string, cmd *cli.Command) *cli.Command { - cmd.Category = strings.ToUpper(cat) - return cmd -} - -var indexesCmd = &cli.Command{ - Name: "indexes", - Usage: "Commands related to managing sqlite indexes", - HideHelpCommand: true, - Subcommands: []*cli.Command{ - withCategory("msgindex", backfillMsgIndexCmd), - withCategory("msgindex", pruneMsgIndexCmd), - withCategory("txhash", backfillTxHashCmd), - withCategory("events", backfillEventsCmd), - withCategory("events", inspectEventsCmd), - }, -} - -var backfillEventsCmd = &cli.Command{ - Name: "backfill-events", - Usage: "Backfill the events.db for a number of epochs starting from a specified height and working backward", - Flags: []cli.Flag{ - &cli.UintFlag{ - Name: "from", - Value: 0, - Usage: "the tipset height to start backfilling from (0 is head of chain)", - }, - &cli.IntFlag{ - Name: "epochs", - Value: 2000, - Usage: "the number of epochs to backfill (working backwards)", - }, - &cli.BoolFlag{ - Name: "temporary-index", - Value: false, - Usage: "use a temporary index to speed up the backfill process", - }, - &cli.BoolFlag{ - Name: "vacuum", - Value: false, - Usage: "run VACUUM on the database after backfilling is complete; this will reclaim space from deleted rows, but may take a long time", - }, - }, - Action: func(cctx *cli.Context) error { - srv, err := lcli.GetFullNodeServices(cctx) - if err != nil { - return err - } - defer srv.Close() //nolint:errcheck - - api := srv.FullNodeAPI() - ctx := lcli.ReqContext(cctx) - - // currTs will be the tipset where we start backfilling from - currTs, err := api.ChainHead(ctx) - if err != nil { - return err - } - if cctx.IsSet("from") { - // we need to fetch the tipset after the epoch being specified since we will need to advance currTs - currTs, err = api.ChainGetTipSetAfterHeight(ctx, abi.ChainEpoch(cctx.Int("from")+1), currTs.Key()) - if err != nil { - return err - } - } - - // advance currTs by one epoch and maintain prevTs as the previous tipset (this allows us to easily use the ChainGetParentMessages/Receipt API) - prevTs := currTs - currTs, err = api.ChainGetTipSet(ctx, currTs.Parents()) - if err != nil { - return fmt.Errorf("failed to load tipset %s: %w", prevTs.Parents(), err) - } - - epochs := cctx.Int("epochs") - - basePath, err := homedir.Expand(cctx.String("repo")) - if err != nil { - return err - } - - log.Infof( - "WARNING: If this command is run against a node that is currently collecting events with DisableHistoricFilterAPI=false, " + - "it may cause the node to fail to record recent events due to the need to obtain an exclusive lock on the database for writes.") - - dbPath := path.Join(basePath, "sqlite", "events.db") - db, err := sql.Open("sqlite3", dbPath+"?_txlock=immediate") - if err != nil { - return err - } - - defer func() { - err := db.Close() - if err != nil { - fmt.Printf("ERROR: closing db: %s", err) - } - }() - - if cctx.Bool("temporary-index") { - log.Info("creating temporary index (tmp_event_backfill_index) on event table to speed up backfill") - _, err := db.Exec("CREATE INDEX IF NOT EXISTS tmp_event_backfill_index ON event (height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted);") - if err != nil { - return err - } - } - - addressLookups := make(map[abi.ActorID]address.Address) - - // TODO: We don't need this address resolution anymore once https://github.com/filecoin-project/lotus/issues/11594 lands - resolveFn := func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { - // we only want to match using f4 addresses - idAddr, err := address.NewIDAddress(uint64(emitter)) - if err != nil { - return address.Undef, false - } - - actor, err := api.StateGetActor(ctx, idAddr, ts.Key()) - if err != nil || actor.DelegatedAddress == nil { - return idAddr, true - } - - return *actor.DelegatedAddress, true - } - - isIndexedValue := func(b uint8) bool { - // currently we mark the full entry as indexed if either the key - // or the value are indexed; in the future we will need finer-grained - // management of indices - return b&(types.EventFlagIndexedKey|types.EventFlagIndexedValue) > 0 - } - - var totalEventsAffected int64 - var totalEntriesAffected int64 - - stmtEventExists, err := db.Prepare(eventExists) - if err != nil { - return err - } - stmtInsertEvent, err := db.Prepare(insertEvent) - if err != nil { - return err - } - stmtInsertEntry, err := db.Prepare(insertEntry) - if err != nil { - return err - } - - stmtUpsertEventSeen, err := db.Prepare(upsertEventsSeen) - if err != nil { - return err - } - - processHeight := func(ctx context.Context, cnt int, msgs []lapi.Message, receipts []*types.MessageReceipt) error { - var tx *sql.Tx - for { - var err error - tx, err = db.BeginTx(ctx, nil) - if err != nil { - if err.Error() == "database is locked" { - log.Warnf("database is locked, retrying in 200ms") - time.Sleep(200 * time.Millisecond) - continue - } - return err - } - break - } - defer tx.Rollback() //nolint:errcheck - - var eventsAffected int64 - var entriesAffected int64 - - tsKeyCid, err := currTs.Key().Cid() - if err != nil { - return fmt.Errorf("failed to get tipset key cid: %w", err) - } - - eventCount := 0 - // loop over each message receipt and backfill the events - for idx, receipt := range receipts { - msg := msgs[idx] - - if receipt.ExitCode != exitcode.Ok { - continue - } - - if receipt.EventsRoot == nil { - continue - } - - events, err := api.ChainGetEvents(ctx, *receipt.EventsRoot) - if err != nil { - return fmt.Errorf("failed to load events for tipset %s: %w", currTs, err) - } - - for _, event := range events { - addr, found := addressLookups[event.Emitter] - if !found { - var ok bool - addr, ok = resolveFn(ctx, event.Emitter, currTs) - if !ok { - // not an address we will be able to match against - continue - } - addressLookups[event.Emitter] = addr - } - - // select the highest event id that exists in database, or null if none exists - var entryID sql.NullInt64 - err = tx.Stmt(stmtEventExists).QueryRow( - currTs.Height(), - currTs.Key().Bytes(), - tsKeyCid.Bytes(), - addr.Bytes(), - eventCount, - msg.Cid.Bytes(), - idx, - ).Scan(&entryID) - if err != nil { - return fmt.Errorf("error checking if event exists: %w", err) - } - - // we already have this event - if entryID.Valid { - continue - } - - // event does not exist, lets backfill it - res, err := tx.Stmt(stmtInsertEvent).Exec( - currTs.Height(), // height - currTs.Key().Bytes(), // tipset_key - tsKeyCid.Bytes(), // tipset_key_cid - addr.Bytes(), // emitter_addr - eventCount, // event_index - msg.Cid.Bytes(), // message_cid - idx, // message_index - false, // reverted - ) - if err != nil { - return fmt.Errorf("error inserting event: %w", err) - } - - entryID.Int64, err = res.LastInsertId() - if err != nil { - return fmt.Errorf("could not get last insert id: %w", err) - } - - rowsAffected, err := res.RowsAffected() - if err != nil { - return fmt.Errorf("could not get rows affected: %w", err) - } - eventsAffected += rowsAffected - - // backfill the event entries - for _, entry := range event.Entries { - _, err := tx.Stmt(stmtInsertEntry).Exec( - entryID.Int64, // event_id - isIndexedValue(entry.Flags), // indexed - []byte{entry.Flags}, // flags - entry.Key, // key - entry.Codec, // codec - entry.Value, // value - ) - if err != nil { - return fmt.Errorf("error inserting entry: %w", err) - } - - rowsAffected, err := res.RowsAffected() - if err != nil { - return fmt.Errorf("could not get rows affected: %w", err) - } - entriesAffected += rowsAffected - } - eventCount++ - } - } - - // mark the tipset as processed - _, err = tx.Stmt(stmtUpsertEventSeen).Exec( - currTs.Height(), - tsKeyCid.Bytes(), - ) - if err != nil { - return xerrors.Errorf("exec upsert events seen: %w", err) - } - - err = tx.Commit() - if err != nil { - return fmt.Errorf("failed to commit transaction: %w", err) - } - - log.Infof("[%d] backfilling actor events epoch:%d, eventsAffected:%d, entriesAffected:%d", cnt, currTs.Height(), eventsAffected, entriesAffected) - - totalEventsAffected += eventsAffected - totalEntriesAffected += entriesAffected - - return nil - } - - for i := 0; i < epochs; i++ { - select { - case <-ctx.Done(): - return nil - default: - } - - blockCid := prevTs.Blocks()[0].Cid() - - // get messages for the parent of the previous tipset (which will be currTs) - msgs, err := api.ChainGetParentMessages(ctx, blockCid) - if err != nil { - return fmt.Errorf("failed to get parent messages for block %s: %w", blockCid, err) - } - - // get receipts for the parent of the previous tipset (which will be currTs) - receipts, err := api.ChainGetParentReceipts(ctx, blockCid) - if err != nil { - return fmt.Errorf("failed to get parent receipts for block %s: %w", blockCid, err) - } - - if len(msgs) != len(receipts) { - return fmt.Errorf("mismatched in message and receipt count: %d != %d", len(msgs), len(receipts)) - } - - err = processHeight(ctx, i, msgs, receipts) - if err != nil { - return err - } - - // advance prevTs and currTs up the chain - prevTs = currTs - currTs, err = api.ChainGetTipSet(ctx, currTs.Parents()) - if err != nil { - return fmt.Errorf("failed to load tipset %s: %w", currTs, err) - } - } - - log.Infof("backfilling events complete, totalEventsAffected:%d, totalEntriesAffected:%d", totalEventsAffected, totalEntriesAffected) - - if cctx.Bool("temporary-index") { - log.Info("dropping temporary index (tmp_event_backfill_index) on event table") - _, err := db.Exec("DROP INDEX IF EXISTS tmp_event_backfill_index;") - if err != nil { - fmt.Printf("ERROR: dropping index: %s", err) - } - } - - if cctx.Bool("vacuum") { - log.Info("running VACUUM on the database") - _, err := db.Exec("VACUUM;") - if err != nil { - return fmt.Errorf("failed to run VACUUM on the database: %w", err) - } - } - - return nil - }, -} - -var inspectEventsCmd = &cli.Command{ - Name: "inspect-events", - Usage: "Perform a health-check on the events.db for a number of epochs starting from a specified height and working backward. " + - "Logs tipsets with problems and optionally logs tipsets without problems. Without specifying a fixed number of epochs, " + - "the command will continue until it reaches a tipset that is not in the blockstore.", - Flags: []cli.Flag{ - &cli.UintFlag{ - Name: "from", - Value: 0, - Usage: "the tipset height to start inspecting from (0 is head of chain)", - }, - &cli.IntFlag{ - Name: "epochs", - Value: 0, - Usage: "the number of epochs to inspect (working backwards) [0 = until we reach a block we don't have]", - }, - &cli.BoolFlag{ - Name: "log-good", - Usage: "log tipsets that have no detected problems", - Value: false, - }, - }, - Action: func(cctx *cli.Context) error { - srv, err := lcli.GetFullNodeServices(cctx) - if err != nil { - return err - } - defer srv.Close() //nolint:errcheck - - api := srv.FullNodeAPI() - ctx := lcli.ReqContext(cctx) - - // currTs will be the tipset where we start backfilling from - currTs, err := api.ChainHead(ctx) - if err != nil { - return err - } - if cctx.IsSet("from") { - // we need to fetch the tipset after the epoch being specified since we will need to advance currTs - currTs, err = api.ChainGetTipSetAfterHeight(ctx, abi.ChainEpoch(cctx.Int("from")+1), currTs.Key()) - if err != nil { - return err - } - } - - logGood := cctx.Bool("log-good") - - // advance currTs by one epoch and maintain prevTs as the previous tipset (this allows us to easily use the ChainGetParentMessages/Receipt API) - prevTs := currTs - currTs, err = api.ChainGetTipSet(ctx, currTs.Parents()) - if err != nil { - return fmt.Errorf("failed to load tipset %s: %w", prevTs.Parents(), err) - } - - epochs := cctx.Int("epochs") - if epochs <= 0 { - epochs = math.MaxInt32 - } - - basePath, err := homedir.Expand(cctx.String("repo")) - if err != nil { - return err - } - - dbPath := path.Join(basePath, "sqlite", "events.db") - db, err := sql.Open("sqlite3", dbPath+"?mode=ro") - if err != nil { - return err - } - - defer func() { - err := db.Close() - if err != nil { - fmt.Printf("ERROR: closing db: %s", err) - } - }() - - stmtEventCount, err := db.Prepare(eventCount) - if err != nil { - return err - } - stmtEntryCount, err := db.Prepare(entryCount) - if err != nil { - return err - } - stmtTipsetSeen, err := db.Prepare(tipsetSeen) - if err != nil { - return err - } - stmtSelectEventIdAndEmitter, err := db.Prepare(selectEventIdAndEmitter) - if err != nil { - return err - } - stmtSelectEventEntries, err := db.Prepare(selectEventEntries) - if err != nil { - return err - } - - processHeight := func(ctx context.Context, messages []lapi.Message, receipts []*types.MessageReceipt) error { - tsKeyCid, err := currTs.Key().Cid() - if err != nil { - return xerrors.Errorf("failed to get tipset key cid: %w", err) - } - - var problems []string - - checkEventAndEntryCounts := func() error { - // compare by counting events, using ChainGetEvents to load the events from the chain - expectEvents, expectEntries, err := chainEventAndEntryCountsAt(ctx, currTs, receipts, api) - if err != nil { - return err - } - - actualEvents, actualEntries, err := dbEventAndEntryCountsAt(currTs, stmtEventCount, stmtEntryCount) - if err != nil { - return err - } - - if actualEvents != expectEvents { - problems = append(problems, fmt.Sprintf("expected %d events, got %d", expectEvents, actualEvents)) - } - if actualEntries != expectEntries { - problems = append(problems, fmt.Sprintf("expected %d entries, got %d", expectEntries, actualEntries)) - } - - return nil - } - - // Compare the AMT roots: we reconstruct the event AMT from the database data we have and - // compare it with the on-chain AMT root from the receipt. If it's the same CID then we have - // exactly the same event data. Any variation, in number of events, and even a single byte - // in event data, will be considered a mismatch. - - // cache for address -> actorID because it's typical for tipsets to generate many events for - // the same actors so we can try and avoid too many StateLookupID calls - addrIdCache := make(map[address.Address]abi.ActorID) - - eventIndex := 0 - var hasEvents bool - for msgIndex, receipt := range receipts { - if receipt.EventsRoot == nil { - continue - } - - amtRoot, has, problem, err := amtRootForEvents( - ctx, - api, - tsKeyCid, - prevTs.Key(), - stmtSelectEventIdAndEmitter, - stmtSelectEventEntries, - messages[msgIndex], - addrIdCache, - ) - if err != nil { - return err - } - if has && !hasEvents { - hasEvents = true - } - - if problem != "" { - problems = append(problems, problem) - } else if amtRoot != *receipt.EventsRoot { - problems = append(problems, fmt.Sprintf("events root mismatch for message %s", messages[msgIndex].Cid)) - // also provide more information about the mismatch - if err := checkEventAndEntryCounts(); err != nil { - return err - } - } - - eventIndex++ - } - - var seenHeight int - var seenReverted int - if err := stmtTipsetSeen.QueryRow(tsKeyCid.Bytes()).Scan(&seenHeight, &seenReverted); err != nil { - if err == sql.ErrNoRows { - if hasEvents { - problems = append(problems, "not in events_seen table") - } else { - problems = append(problems, "zero-event epoch not in events_seen table") - } - } else { - return xerrors.Errorf("failed to check if tipset is seen: %w", err) - } - } else { - if seenHeight != int(currTs.Height()) { - problems = append(problems, fmt.Sprintf("events_seen height mismatch (%d)", seenHeight)) - } - if seenReverted != 0 { - problems = append(problems, "events_seen marked as reverted") - } - } - - if len(problems) > 0 { - _, _ = fmt.Fprintf(cctx.App.Writer, "✗ Epoch %d (%s): %s\n", currTs.Height(), tsKeyCid, strings.Join(problems, ", ")) - } else if logGood { - _, _ = fmt.Fprintf(cctx.App.Writer, "✓ Epoch %d (%s)\n", currTs.Height(), tsKeyCid) - } - - return nil - } - - for i := 0; ctx.Err() == nil && i < epochs; i++ { - // get receipts and messages for the parent of the previous tipset (which will be currTs) - - blockCid := prevTs.Blocks()[0].Cid() - - messages, err := api.ChainGetParentMessages(ctx, blockCid) - if err != nil { - _, _ = fmt.Fprintf(cctx.App.ErrWriter, "Missing parent messages for epoch %d (checked %d epochs)", prevTs.Height(), i) - break - } - receipts, err := api.ChainGetParentReceipts(ctx, blockCid) - if err != nil { - _, _ = fmt.Fprintf(cctx.App.ErrWriter, "Missing parent receipts for epoch %d (checked %d epochs)", prevTs.Height(), i) - break - } - - if len(messages) != len(receipts) { - return fmt.Errorf("mismatched in message and receipt count: %d != %d", len(messages), len(receipts)) - } - - err = processHeight(ctx, messages, receipts) - if err != nil { - return err - } - - // advance prevTs and currTs up the chain - prevTs = currTs - currTs, err = api.ChainGetTipSet(ctx, currTs.Parents()) - if err != nil { - return xerrors.Errorf("failed to load tipset %s: %w", currTs, err) - } - } - - return nil - }, -} - -// amtRootForEvents generates the events AMT root CID for a given message's events, and returns -// whether the message has events, a string describing any non-fatal problem encountered, -// and a fatal error if one occurred. -func amtRootForEvents( - ctx context.Context, - api lapi.FullNode, - tsKeyCid cid.Cid, - prevTsKey types.TipSetKey, - stmtSelectEventIdAndEmitter, stmtSelectEventEntries *sql.Stmt, - message lapi.Message, - addrIdCache map[address.Address]abi.ActorID, -) (cid.Cid, bool, string, error) { - - events := make([]cbg.CBORMarshaler, 0) - - rows, err := stmtSelectEventIdAndEmitter.Query(tsKeyCid.Bytes(), message.Cid.Bytes()) - if err != nil { - return cid.Undef, false, "", xerrors.Errorf("failed to query events: %w", err) - } - defer func() { - _ = rows.Close() - }() - - for rows.Next() { - var eventId int - var emitterAddr []byte - if err := rows.Scan(&eventId, &emitterAddr); err != nil { - return cid.Undef, false, "", xerrors.Errorf("failed to scan row: %w", err) - } - - addr, err := address.NewFromBytes(emitterAddr) - if err != nil { - return cid.Undef, false, "", xerrors.Errorf("failed to parse address: %w", err) - } - var actorId abi.ActorID - if id, ok := addrIdCache[addr]; ok { - actorId = id - } else { - if addr.Protocol() != address.ID { - // use the previous tipset (height+1) to do an address lookup because the actor - // may have been created in the current tipset (i.e. deferred execution means the - // changed state isn't available until the next epoch) - idAddr, err := api.StateLookupID(ctx, addr, prevTsKey) - if err != nil { - // TODO: fix this? we should be able to resolve all addresses - return cid.Undef, false, fmt.Sprintf("failed to resolve address (%s), could not compare amt", addr.String()), nil - } - addr = idAddr - } - id, err := address.IDFromAddress(addr) - if err != nil { - return cid.Undef, false, "", xerrors.Errorf("failed to get ID from address: %w", err) - } - actorId = abi.ActorID(id) - addrIdCache[addr] = actorId - } - - event := types.Event{ - Emitter: actorId, - Entries: make([]types.EventEntry, 0), - } - - rows2, err := stmtSelectEventEntries.Query(eventId) - if err != nil { - return cid.Undef, false, "", xerrors.Errorf("failed to query event entries: %w", err) - } - defer func() { - _ = rows2.Close() - }() - - for rows2.Next() { - var flags []byte - var key string - var codec uint64 - var value []byte - if err := rows2.Scan(&flags, &key, &codec, &value); err != nil { - return cid.Undef, false, "", xerrors.Errorf("failed to scan row: %w", err) - } - entry := types.EventEntry{ - Flags: flags[0], - Key: key, - Codec: codec, - Value: value, - } - event.Entries = append(event.Entries, entry) - } - - events = append(events, &event) - } - - // construct the AMT from our slice to an in-memory IPLD store just so we can get the root, - // we don't need the blocks themselves - root, err := amt4.FromArray(ctx, cbor.NewCborStore(bstore.NewMemory()), events, amt4.UseTreeBitWidth(types.EventAMTBitwidth)) - if err != nil { - return cid.Undef, false, "", xerrors.Errorf("failed to create AMT: %w", err) - } - return root, len(events) > 0, "", nil -} - -func chainEventAndEntryCountsAt(ctx context.Context, ts *types.TipSet, receipts []*types.MessageReceipt, api lapi.FullNode) (int, int, error) { - var expectEvents int - var expectEntries int - for _, receipt := range receipts { - if receipt.ExitCode != exitcode.Ok || receipt.EventsRoot == nil { - continue - } - events, err := api.ChainGetEvents(ctx, *receipt.EventsRoot) - if err != nil { - return 0, 0, xerrors.Errorf("failed to load events for tipset %s: %w", ts, err) - } - expectEvents += len(events) - for _, event := range events { - expectEntries += len(event.Entries) - } - } - return expectEvents, expectEntries, nil -} - -func dbEventAndEntryCountsAt(ts *types.TipSet, stmtEventCount, stmtEntryCount *sql.Stmt) (int, int, error) { - tsKeyCid, err := ts.Key().Cid() - if err != nil { - return 0, 0, xerrors.Errorf("failed to get tipset key cid: %w", err) - } - var actualEvents int - if err := stmtEventCount.QueryRow(tsKeyCid.Bytes()).Scan(&actualEvents); err != nil { - return 0, 0, xerrors.Errorf("failed to count events for epoch %d (tsk CID %s): %w", ts.Height(), tsKeyCid, err) - } - var actualEntries int - if err := stmtEntryCount.QueryRow(tsKeyCid.Bytes()).Scan(&actualEntries); err != nil { - return 0, 0, xerrors.Errorf("failed to count entries for epoch %d (tsk CID %s): %w", ts.Height(), tsKeyCid, err) - } - return actualEvents, actualEntries, nil -} - -var backfillMsgIndexCmd = &cli.Command{ - Name: "backfill-msgindex", - Usage: "Backfill the msgindex.db for a number of epochs starting from a specified height", - Flags: []cli.Flag{ - &cli.IntFlag{ - Name: "from", - Value: 0, - Usage: "height to start the backfill; uses the current head if omitted", - }, - &cli.IntFlag{ - Name: "epochs", - Value: 1800, - Usage: "number of epochs to backfill; defaults to 1800 (2 finalities)", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - - defer closer() - ctx := lcli.ReqContext(cctx) - - curTs, err := api.ChainHead(ctx) - if err != nil { - return err - } - - startHeight := int64(cctx.Int("from")) - if startHeight == 0 { - startHeight = int64(curTs.Height()) - 1 - } - epochs := cctx.Int("epochs") - - basePath, err := homedir.Expand(cctx.String("repo")) - if err != nil { - return err - } - - dbPath := path.Join(basePath, "sqlite", "msgindex.db") - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - return err - } - - defer func() { - err := db.Close() - if err != nil { - fmt.Printf("ERROR: closing db: %s", err) - } - }() - - insertStmt, err := db.Prepare("INSERT OR IGNORE INTO messages (cid, tipset_cid, epoch) VALUES (?, ?, ?)") - if err != nil { - return err - } - - var nrRowsAffected int64 - for i := 0; i < epochs; i++ { - epoch := abi.ChainEpoch(startHeight - int64(i)) - - if i%100 == 0 { - log.Infof("%d/%d processing epoch:%d, nrRowsAffected:%d", i, epochs, epoch, nrRowsAffected) - } - - ts, err := api.ChainGetTipSetByHeight(ctx, epoch, curTs.Key()) - if err != nil { - return fmt.Errorf("failed to get tipset at epoch %d: %w", epoch, err) - } - - tsCid, err := ts.Key().Cid() - if err != nil { - return fmt.Errorf("failed to get tipset cid at epoch %d: %w", epoch, err) - } - - msgs, err := api.ChainGetMessagesInTipset(ctx, ts.Key()) - if err != nil { - return fmt.Errorf("failed to get messages in tipset at epoch %d: %w", epoch, err) - } - - for _, msg := range msgs { - key := msg.Cid.String() - tskey := tsCid.String() - res, err := insertStmt.Exec(key, tskey, int64(epoch)) - if err != nil { - return fmt.Errorf("failed to insert message cid %s in tipset %s at epoch %d: %w", key, tskey, epoch, err) - } - rowsAffected, err := res.RowsAffected() - if err != nil { - return fmt.Errorf("failed to get rows affected for message cid %s in tipset %s at epoch %d: %w", key, tskey, epoch, err) - } - nrRowsAffected += rowsAffected - } - } - - log.Infof("Done backfilling, nrRowsAffected:%d", nrRowsAffected) - - return nil - }, -} - -var pruneMsgIndexCmd = &cli.Command{ - Name: "prune-msgindex", - Usage: "Prune the msgindex.db for messages included before a given epoch", - Flags: []cli.Flag{ - &cli.IntFlag{ - Name: "from", - Usage: "height to start the prune; if negative it indicates epochs from current head", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - - defer closer() - ctx := lcli.ReqContext(cctx) - - startHeight := int64(cctx.Int("from")) - if startHeight < 0 { - curTs, err := api.ChainHead(ctx) - if err != nil { - return err - } - - startHeight += int64(curTs.Height()) - - if startHeight < 0 { - return xerrors.Errorf("bogus start height %d", startHeight) - } - } - - basePath, err := homedir.Expand(cctx.String("repo")) - if err != nil { - return err - } - - dbPath := path.Join(basePath, "sqlite", "msgindex.db") - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - return err - } - - defer func() { - err := db.Close() - if err != nil { - fmt.Printf("ERROR: closing db: %s", err) - } - }() - - tx, err := db.Begin() - if err != nil { - return err - } - - if _, err := tx.Exec("DELETE FROM messages WHERE epoch < ?", startHeight); err != nil { - if err := tx.Rollback(); err != nil { - fmt.Printf("ERROR: rollback: %s", err) - } - return err - } - - if err := tx.Commit(); err != nil { - return err - } - - return nil - }, -} - -var backfillTxHashCmd = &cli.Command{ - Name: "backfill-txhash", - Usage: "Backfills the txhash.db for a number of epochs starting from a specified height", - Flags: []cli.Flag{ - &cli.UintFlag{ - Name: "from", - Value: 0, - Usage: "the tipset height to start backfilling from (0 is head of chain)", - }, - &cli.IntFlag{ - Name: "epochs", - Value: 2000, - Usage: "the number of epochs to backfill", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - curTs, err := api.ChainHead(ctx) - if err != nil { - return err - } - - startHeight := int64(cctx.Int("from")) - if startHeight == 0 { - startHeight = int64(curTs.Height()) - 1 - } - - epochs := cctx.Int("epochs") - - basePath, err := homedir.Expand(cctx.String("repo")) - if err != nil { - return err - } - - dbPath := filepath.Join(basePath, "sqlite", "txhash.db") - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - return err - } - - defer func() { - err := db.Close() - if err != nil { - fmt.Printf("ERROR: closing db: %s", err) - } - }() - - insertStmt, err := db.Prepare("INSERT OR IGNORE INTO eth_tx_hashes(hash, cid) VALUES(?, ?)") - if err != nil { - return err - } - - var totalRowsAffected int64 = 0 - for i := 0; i < epochs; i++ { - epoch := abi.ChainEpoch(startHeight - int64(i)) - - select { - case <-cctx.Done(): - fmt.Println("request cancelled") - return nil - default: - } - - curTsk := curTs.Parents() - execTs, err := api.ChainGetTipSet(ctx, curTsk) - if err != nil { - return fmt.Errorf("failed to call ChainGetTipSet for %s: %w", curTsk, err) - } - - if i%100 == 0 { - log.Infof("%d/%d processing epoch:%d", i, epochs, epoch) - } - - for _, blockheader := range execTs.Blocks() { - blkMsgs, err := api.ChainGetBlockMessages(ctx, blockheader.Cid()) - if err != nil { - log.Infof("Could not get block messages at epoch: %d, stopping walking up the chain", epoch) - epochs = i - break - } - - for _, smsg := range blkMsgs.SecpkMessages { - if smsg.Signature.Type != crypto.SigTypeDelegated { - continue - } - - tx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(smsg) - if err != nil { - return fmt.Errorf("failed to convert from signed message: %w at epoch: %d", err, epoch) - } - - hash, err := tx.TxHash() - if err != nil { - return fmt.Errorf("failed to calculate hash for ethTx: %w at epoch: %d", err, epoch) - } - - res, err := insertStmt.Exec(hash.String(), smsg.Cid().String()) - if err != nil { - return fmt.Errorf("error inserting tx mapping to db: %s at epoch: %d", err, epoch) - } - - rowsAffected, err := res.RowsAffected() - if err != nil { - return fmt.Errorf("error getting rows affected: %s at epoch: %d", err, epoch) - } - - if rowsAffected > 0 { - log.Debugf("Inserted txhash %s, cid: %s at epoch: %d", hash.String(), smsg.Cid().String(), epoch) - } - - totalRowsAffected += rowsAffected - } - } - - curTs = execTs - } - - log.Infof("Done, inserted %d missing txhashes", totalRowsAffected) - - return nil - }, -} diff --git a/cmd/lotus-shed/invariants.go b/cmd/lotus-shed/invariants.go index 91db29e613b..e8a6e734af9 100644 --- a/cmd/lotus-shed/invariants.go +++ b/cmd/lotus-shed/invariants.go @@ -30,7 +30,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -128,7 +127,7 @@ var invariantsCmd = &cli.Command{ cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) defer cs.Close() //nolint:errcheck - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, nil) if err != nil { return err } diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index 9bec737413c..1e7700acdea 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -86,7 +86,7 @@ func main() { invariantsCmd, gasTraceCmd, replayOfflineCmd, - indexesCmd, + chainIndexCmds, FevmAnalyticsCmd, mismatchesCmd, blockCmd, diff --git a/cmd/lotus-shed/migrations.go b/cmd/lotus-shed/migrations.go index 11f09c4d62c..10b7d073d5f 100644 --- a/cmd/lotus-shed/migrations.go +++ b/cmd/lotus-shed/migrations.go @@ -62,7 +62,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" @@ -178,7 +177,8 @@ var migrationsCmd = &cli.Command{ defer cs.Close() //nolint:errcheck // Note: we use a map datastore for the metadata to avoid writing / using cached migration results in the metadata store - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, datastore.NewMapDatastore(), index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, + datastore.NewMapDatastore(), nil) if err != nil { return err } diff --git a/cmd/lotus-shed/state-stats.go b/cmd/lotus-shed/state-stats.go index cf865f20194..e5145e5c178 100644 --- a/cmd/lotus-shed/state-stats.go +++ b/cmd/lotus-shed/state-stats.go @@ -33,7 +33,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -259,7 +258,7 @@ func loadChainStore(ctx context.Context, repoPath string) (*StoreHandle, error) } tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc) - sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, nil) if err != nil { return nil, fmt.Errorf("failed to open state manager: %w", err) } diff --git a/cmd/lotus-sim/simulation/node.go b/cmd/lotus-sim/simulation/node.go index fd9c0284614..cda3e69d839 100644 --- a/cmd/lotus-sim/simulation/node.go +++ b/cmd/lotus-sim/simulation/node.go @@ -12,7 +12,6 @@ import ( "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" @@ -107,7 +106,8 @@ func (nd *Node) LoadSim(ctx context.Context, name string) (*Simulation, error) { if err != nil { return nil, xerrors.Errorf("failed to create upgrade schedule for simulation %s: %w", name, err) } - sim.StateManager, err = stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), us, nil, nd.MetadataDS, index.DummyMsgIndex) + sim.StateManager, err = stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), us, + nil, nd.MetadataDS, nil) if err != nil { return nil, xerrors.Errorf("failed to create state manager for simulation %s: %w", name, err) } @@ -126,7 +126,8 @@ func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet) if err != nil { return nil, err } - sm, err := stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule(), nil, nd.MetadataDS, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), + vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule(), nil, nd.MetadataDS, nil) if err != nil { return nil, xerrors.Errorf("creating state manager: %w", err) } diff --git a/cmd/lotus-sim/simulation/simulation.go b/cmd/lotus-sim/simulation/simulation.go index d73a033cf96..9e85c7d6260 100644 --- a/cmd/lotus-sim/simulation/simulation.go +++ b/cmd/lotus-sim/simulation/simulation.go @@ -17,7 +17,6 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" @@ -202,7 +201,8 @@ func (sim *Simulation) SetUpgradeHeight(nv network.Version, epoch abi.ChainEpoch if err != nil { return err } - sm, err := stmgr.NewStateManager(sim.Node.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), newUpgradeSchedule, nil, sim.Node.MetadataDS, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(sim.Node.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), + vm.Syscalls(mock.Verifier), newUpgradeSchedule, nil, sim.Node.MetadataDS, nil) if err != nil { return err } diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index fa9be241c2e..b7fbd63e695 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -612,7 +612,8 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return xerrors.Errorf("failed to construct beacon schedule: %w", err) } - stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex) + stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), + vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, nil) if err != nil { return err } @@ -628,8 +629,6 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return err } - // populate the message index if user has EnableMsgIndex enabled - // c, err := lr.Config() if err != nil { return err @@ -638,17 +637,23 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) if !ok { return xerrors.Errorf("invalid config for repo, got: %T", c) } - if cfg.Index.EnableMsgIndex { - log.Info("populating message index...") - basePath, err := lr.SqlitePath() - if err != nil { - return err - } - if err := index.PopulateAfterSnapshot(ctx, filepath.Join(basePath, index.DefaultDbFilename), cst); err != nil { - return err - } - log.Info("populating message index done") + + if !cfg.ChainIndexer.EnableIndexer { + log.Info("chain indexer is disabled, not populating index from snapshot") + return nil + } + + // populate the chain Index from the snapshot + basePath, err := lr.ChainIndexPath() + if err != nil { + return err + } + + log.Info("populating chain index from snapshot...") + if err := index.PopulateFromSnapshot(ctx, filepath.Join(basePath, index.DefaultDbFilename), cst); err != nil { + return err } + log.Info("populating chain index from snapshot done") return nil } diff --git a/conformance/driver.go b/conformance/driver.go index f0dd5cc2a4c..15ae567063a 100644 --- a/conformance/driver.go +++ b/conformance/driver.go @@ -22,7 +22,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/rand" "github.com/filecoin-project/lotus/chain/state" @@ -110,7 +109,7 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params cs = store.NewChainStore(bs, bs, ds, filcns.Weight, nil) tse = consensus.NewTipSetExecutor(filcns.RewardFunc) - sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule(), nil, ds, index.DummyMsgIndex) + sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule(), nil, ds, nil) ) if err != nil { return nil, err diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md index 131b4344257..6becf849f81 100644 --- a/documentation/en/api-v1-unstable-methods.md +++ b/documentation/en/api-v1-unstable-methods.md @@ -37,6 +37,7 @@ * [ChainSetHead](#ChainSetHead) * [ChainStatObj](#ChainStatObj) * [ChainTipSetWeight](#ChainTipSetWeight) + * [ChainValidateIndex](#ChainValidateIndex) * [Create](#Create) * [CreateBackup](#CreateBackup) * [Eth](#Eth) @@ -1241,6 +1242,65 @@ Inputs: Response: `"0"` +### ChainValidateIndex +ChainValidateIndex validates the integrity of and optionally backfills +the chain index at a specific epoch. + +It can be used to: + +1. Validate the chain index at a specific epoch: + - Ensures consistency between indexed data and actual chain state + - Reports any errors found during validation (i.e. the indexed data does not match the actual chain state, missing data, etc.) + +2. Optionally backfill missing data: + - Backfills data if the index is missing information for the specified epoch + - Backfilling only occurs when the `backfill` parameter is set to `true` + +3. Detect "holes" in the index: + - If `backfill` is `false` and the index lacks data for the specified epoch, the API returns an error indicating missing data + +Parameters: + - epoch: The specific chain epoch for which to validate/backfill the index. + - backfill: A boolean flag indicating whether to attempt backfilling of missing data if the index does not have data for the + specified epoch. + +Returns: + - *types.IndexValidation: A pointer to an IndexValidation struct containing the results of the validation/backfill. + - error: An error object if the validation/backfill fails. The error message will contain details about the index + corruption if the call fails because of an incosistency between indexed data and the actual chain state. + Note: The API returns an error if the index does not have data for the specified epoch and backfill is set to false. + + +Perms: write + +Inputs: +```json +[ + 10101, + true +] +``` + +Response: +```json +{ + "TipSetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101, + "IndexedMessagesCount": 42, + "IndexedEventsCount": 42, + "IndexedEventEntriesCount": 42, + "Backfilled": true, + "IsNullRound": true +} +``` + ## Create diff --git a/documentation/en/chain-indexer-overview-for-operators.md b/documentation/en/chain-indexer-overview-for-operators.md new file mode 100644 index 00000000000..0e5869c510b --- /dev/null +++ b/documentation/en/chain-indexer-overview-for-operators.md @@ -0,0 +1,418 @@ +# ChainIndexer Documentation for Operators + +- [Introduction](#introduction) +- [ChainIndexer Config](#chainindexer-config) + - [Enablement](#enablement) + - [Garbage Collection](#garbage-collection) + - [Recommendations](#recommendations) + - [Removed Options](#removed-options) +- [Upgrade](#upgrade) + - [Preparation](#preparation) + - [Upgrade when using existing `LOTUS_PATH` chain state](#upgrade-when-using-existing-lotus_path-chain-state) + - [Part 1: Create a backfilled ChainIndexer `chainindex.db`](#part-1-create-a-backfilled-chainindexer-chainindexdb) + - [Part 2: Create a copyable `chainindex.db`](#part-2-create-a-copyable-chainindexdb) + - [Part 3: Update other nodes](#part-3-update-other-nodes) + - [Part 4: Cleanup](#part-4-cleanup) + - [Upgrade when importing chain state from a snapshot](#upgrade-when-importing-chain-state-from-a-snapshot) +- [Backfill](#backfill) + - [Backfill Timing](#backfill-timing) + - [Backfill Disk Space Requirements](#backfill-disk-space-requirements) + - [`lotus-shed chainindex validate-backfill` CLI tool](#lotus-shed-chainindex-validate-backfill-cli-tool) + - [Usage](#usage) +- [Regular Checks](#regular-checks) +- [Downgrade Steps](#downgrade-steps) +- [Terminology](#terminology) + - [Previous Indexing System](#previous-indexing-system) + - [ChainIndexer Indexing System](#chainindexer-indexing-system) +- [Appendix](#appendix) + - [Why isn't there an automated migration from the previous indexing system to the ChainIndexer indexing system?](#why-isnt-there-an-automated-migration-from-the-previous-indexing-system-to-the-chainindexer-indexing-system) + - [`ChainValidateIndex` RPC API](#chainvalidateindex-rpc-api) + +## Introduction + +This document is for externally-available, high-performance RPC providers and for node operators who use or expose the Ethereum and/or events APIs. It walks through the configuration changes, migration flow and operations/maintenance work needed to enable, backfill and maintain the [`ChainIndexer`](#chainindexer-indexing-system). The justification for and benefits of the `ChainIndexer` are documented [here](https://github.com/filecoin-project/lotus/issues/12453). + +The ChainIndexer is now also required if you enable: +1. Ethereum (`eth_*`) APIs using the `EnableEthRPC` Lotus configuration option OR +2. ActorEvent APIs using the `EnableActorEventsAPI` Lotus configuration option + +**Note: If you are a Storage Provider or node operator who does not serve public RPC requests or does not need Ethereum or Event APIs (i.e, if `Fevm.EnableEthRPC = false` and `Events.EnableActorEventsAPI = false`, their default values), you can skip this document as the `ChainIndexer` is already disabled by default**. + +## ChainIndexer Config +### Enablement + +The following must be enabled on an Lotus node before starting as they are disabled by default: + +```toml +[Fevm] +# Enable the ETH RPC APIs. +# This is not required for ChainIndexer support, but ChainIndexer is required if you enable this. + EnableEthRPC = true + +[Events] +# Enable the Actor Events APIs. +# This is not required for ChainIndexer support, but ChainIndexer is required if you enable this. + EnableActorEventsAPI = true + +[ChainIndexer] +# Enable the ChainIndexer, which is required for the ETH RPC APIs and Actor Events APIs. +# If they are enabled, but the ChainIndexer is not, Lotus will exit during startup. +# (ChainIndexer needs to be explicitly enabled to signal to the node operator the extra +# supporting functionality that will now be running.) + EnableIndexer = true +``` + +You can learn more about these configuration options and other configuration options available for the `ChainIndexer` [here](https://github.com/filecoin-project/lotus/blob/master/documentation/en/default-lotus-config.toml). + + +### Garbage Collection + +The `ChainIndexer` includes a garbage collection (GC) mechanism to manage the amount of historical data retained. See the [ChainIndexer size requirements](#backfill-disk-space-requirements). + +By default, GC is disabled to preserve all indexed data. + +To configure GC, use the `GCRetentionEpochs` parameter in the `ChainIndexer` section of your config. + +The ChainIndexer [periodically runs](https://github.com/filecoin-project/lotus/blob/master/chain/index/gc.go#L15) GC if `GCRetentionEpochs` is > 0 and removes indexed data for epochs older than `(current_head_height - GCRetentionEpochs)`. + +```toml +[ChainIndexer] + GCRetentionEpochs = X # Replace X with your desired value +``` + +- Setting `GCRetentionEpochs` to 0 (**default**) disables GC. +- Any positive value enables GC and determines the number of epochs of historical data to retain. + +#### Recommendations + +1. **Archival Nodes**: **Keep GC disabled** (`GCRetentionEpochs` = 0) to retain all indexed data. + +2. **Non-Archival Nodes**: Set `GCRetentionEpochs` to match the amount of chain state your node retains + +**Example:** if your node is configured to retain Filecoin chain state with a Splitstore Hotstore that approximates 2 days of epochs, set `GCRetentionEpochs` to at least `retentionDays * epochsPerDay = 2 * 2880 = 5760`). + +**Warning:** Setting this value below the chain state retention period may degrade RPC performance and reliability because the ChainIndexer will lack data for epochs still present in the chain state. + +**Note:** `Chainstore.Splitstore` is configured in terms of bytes (not epochs) and `ChainIndexer.GCRetentionEpochs` is in terms of epochs (not bytes). For the purposes of this discussion, we're assuming operators have determined `Chainstore.Splitstore.HotStoreMaxSpaceTarget` and `Chainstore.Splitstore.HotStoreMaxSpaceThreshold` values that approximate a certain number days of storage in the Splitstore Hotstore. The guidance here is to make sure this approximation exceeds `ChainIndexer.GCRetentionEpochs`. + +### Removed Options + +**Note: The following config options no longer exist in Lotus and have been removed in favor of the ChainIndexer config options explained above. They can be removed when upgrading to Lotus v1.31.0.** + +```toml +[Fevm] +EthTxHashMappingLifetimeDays + +[Events] +DisableRealTimeFilterAPI +DisableHistoricFilterAPI +DatabasePath + +[Index] +EnableMsgIndex +``` + +The `Fevm.Events` options were marked as deprecated in Lotus 1.26, having been moved to the new top-level `Events` section, and have now been removed with Lotus 1.31. + +* `Fevm.Events.DatabasePath` (no replacement available) +* `Fevm.Events.DisableRealTimeFilterAPI` (no replacement available) +* `Fevm.Events.DisableHistoricFilterAPI` (no replacement available) +* `Fevm.Events.FilterTTL` (use `Events.FilterTTL` intead) +* `Fevm.Events.MaxFilters` (use `Events.MaxFilters` instead) +* `Fevm.Events.MaxFilterResults` (use `Events.MaxFilterResults` instead) +* `Fevm.Events.MaxFilterHeightRange` (use `Events.MaxFilterHeightRange` instead) + +## Upgrade + +### Preparation +One can upgrade/downgrade between [pre-ChainIndexer](#previous-indexing-system) and [with-ChainIndexer](#chainindexer-indexing-system) Lotus versions without conflict because they persist state to different directories and don't rely on each other. No backup is necessary (but extra backups don't hurt). There is still a [backfilling step though when downgrading](#downgrade-steps). + +These upgrade steps assume one has multiple nodes in their fleet and can afford to have a node not handling traffic, potentially for days per [backfill timing below](#backfill-timing). + +One should also check to ensure they have [sufficient disk space](#backfill-disk-space-requirements). + +### Upgrade when using existing `LOTUS_PATH` chain state +* This upgrade path assumes one has an existing node with existing `LOTUS_PATH` chain state they want to keep using and they don't want to import chain state from a snapshot. A prime example is an existing archival node. +* Perform the [preparation steps](#preparation) before proceeding. +* See [here for the snapshot upgrade path](#upgrade-when-importing-chain-state-from-a-snapshot). + +#### Part 1: Create a backfilled ChainIndexer `chainindex.db` +1. **Route traffic away from an initial node** + - Example: prevent a load balancer from routing traffic to a designated node. +2. **Stop the designated Lotus Node** + - Stop the designated Lotus node before starting the upgrade and backfill process. +3. **Update Configuration** + - Modify the Lotus configuration to enable the `ChainIndexer` as described in the [`ChainIndexer Config` section above](#chainindexer-config). +4. **Restart Lotus Node** + - Restart the Lotus node with the new configuration. + - The `ChainIndexer` will begin indexing **real-time chain state changes** immediately in the `${LOTUS_PATH}/chainindex` directory. + - *However, it will not automatically index any historical chain state (i.e., any previously existing chain state prior to the upgrade).* +5. **Backfill** + - See the ["Backfill" section below](#backfill). + - This could potentially take days per [Backfill Timing](#backfill-timing). +6. **Ensure node health** + - Perform whatever steps are usually done to validate a node's health before handling traffic (e.g., log scans, smoke tests) +7. **Route traffic to the backfilled node that is now using ChainIndexer** +8. **Ensure equal or better correctness and performance** + - ChainIndexer-using nodes should have full correctness and better performance when compared to [pre-ChainIndexer](#previous-indexing-system) nodes. + +#### Part 2: Create a copyable `chainindex.db` +[Part 3 below](#part-3-update-other-nodes) is going to use the backfilled `chainindex.db` from above with other nodes so they don't have to undergo as long of a backfill process. That said, this backfilled `chaindex.db` shouldn't be done while the updated-and-backfilled node is running. Options include: +1. Stop the updated-and-backfilled node before copying it. + * `cp ${LOTUS_PATH}/chainindex/chainindex.db /copy/destination/path/chainindex.db` +2. While the node is running, use the `sqlite3` CLI utility (which should be at least version 3.37) to clone it. + * `sqlite3 ${LOTUS_PATH}/chainindex/chainindex.db '.clone /copy/destination/path/chainindex.db'` +Both of these will result in a file `/copy/destination/path/chainindex.db` that can be copied around in part 4 below. + +#### Part 3: Update other nodes +Now that one has a `${LOTUS_PATH}/chainindex/chainindex.db` from a trusted node, it can be copied to additional nodes to expedite bootstrapping. +1. **Route traffic away from the next node to upgrade** +2. **Stop the Lotus Node** +3. **Update Configuration** + - Modify the Lotus configuration to enable the `ChainIndexer` as described in the [`ChainIndexer Config` section above](#chainindexer-config). +4. **Copy `/copy/destination/path/chainindex.db` from the trusted node in [part 2 above](#part-2-create-a-copyable-chainindexdb)** +4. **Restart Lotus Node** + - Restart your Lotus node with the new configuration. + - The `ChainIndexer` will begin indexing **real-time chain state changes** immediately in the `${LOTUS_PATH}/chainindex` directory. + - *However, it will not automatically index the chain state from where the copied-in `chainindex.db` ends. This will need to be done manually.* +5. **Backfill the small data gap from after the copied-in `chainindex.db`** + - See the [`Backfill` section below](#backfill). + - This should be quick since this gaps is presumably on the order of epoch minutes, hours, or days rather than months. +6. **Ensure node health** + - Perform whatever steps are usually done to validate a node's health before handling traffic (e.g., log scans, smoke tests) +7. **Route traffic to this newly upgraded ChainIndexer-enabled node** +8. **Repeat for other nodes that need to upgrade** + +#### Part 4: Cleanup +It's recommended to keep the [pre-ChainIndexer](#previous-indexing-system) indexing database directory (`${LOTUS_PATH}/sqlite`) around until you've confirmed you don't need to [downgrade](#downgrade). After sustained successful operations after the upgrade, the [pre-ChainIndexer](#previous-indexing-system) database directory can be removed to reclaim disk space. + +### Upgrade when importing chain state from a snapshot +Note: this upgrade path assumes one is starting a fresh node and importing chain state with a snapshot (i.e., `lotus daemon --import-snapshot`). A prime example is an operator adding another node to their fleet that has limited history. If not using a snapshot, see the ["upgrade with existing chain state" path](#upgrade-when-using-existing-lotus_path-chain-state). + +1. **Review the [preparation steps](#preparation)** + - The disk space and upgrade times will be much smaller than the ["upgrade with existing chain state" path](#upgrade-when-using-existing-lotus_path-chain-state) assuming this is a non-archival node that is only indexing a limited number of days of epochs. +2. **Ensure the node is stopped and won't take any traffic initially upon starting** + - Example: prevent a load balancer from routing traffic to the node. +3. **Update Configuration** + - Modify the Lotus configuration to enable the `ChainIndexer` as described in the [`ChainIndexer Config` section above](#chainindexer-config). +4. **Start lotus with the snapshot import** + - `lotus daemon --import-snapshot` +5. **Wait for the Lotus daemon to sync** + - As the Lotus daemon syncs the chain, the ChainIndexer will automatically index the synced messages, but it will not automatically sync ETH RPC events and transactions. +6. **Backfill so ETH RPC events and transactions are indexed as well** + - See the ["Backfill" section below](#backfill). + - This will look something like `lotus-shed chainindex validate-backfill --from --to --backfill` + - Example: if the current head is epoch 4360000 and one wants to index a day's worth of epochs (2880), then they'd use `--from 4360000 --to 4357120` +6. **Ensure node health** + - Perform whatever steps are usually done to validate a node's health before handling traffic (e.g., log scans, smoke tests) +7. **Route traffic to the backfilled node that is now using ChainIndexer** + +## Backfill +There is no automated migration from [pre-ChainIndexer indices](#previous-indexing-system) to the [ChainIndex](#chainindexer-indexing-system). Instead one needs to index historical chain state (i.e., backfill), if RPC access to that historical state is required. (If curious, [read why](#wny-isnt-there-an-automated-migration-from-the-previous-indexing-system-to-the-chainindexer-indexing-system).) + +### Backfill Timing + +Backfilling the new `ChainIndexer` was [benchmarked to take approximately ~12 hours per month of epochs on a sample archival node doing no other work](https://github.com/filecoin-project/lotus/issues/12453#issuecomment-2405306468). Your results will vary depending on hardware, network, and competing processes. This means if one is upgrading a FEVM archival node, they should plan on the node being out of production service for ~10 days. Additional nodes to update don't need to go throuh the same time-intensive process though. They can get a `${LOTUS_PATH}/chainindex/chainindex.db` copied from a trusted node per the [upgrade steps](#upgrade). + +### Backfill Disk Space Requirements + +As of 202410, ChainIndexer will accumulate approximately ~340 MiB per day of data, or 10 GiB per month (see [here](https://github.com/filecoin-project/lotus/issues/12453)). + +### `lotus-shed chainindex validate-backfill` CLI tool +The `lotus-shed chainindex validate-backfill` command is a tool for validating and optionally backfilling the chain index over a range of epochs since calling the [`ChainValidateIndex` API](#chainvalidateindex-rpc-api) for a single epoch at a time can be cumbersome, especially when backfilling or validating the index over a range of historical epochs, such as during a backfill. This tool wraps the `ChainValidateIndex` API to efficiently process multiple epochs. + +**Note: This command can only be run when the Lotus daemon is already running with the [`ChainIndexer` enabled](#enablement) as it depends on the `ChainValidateIndex` RPC API.** + +#### Usage + +``` +lotus-shed chainindex validate-backfill --from --to [--backfill] [--log-good] +``` + +The command validates the chain index entries for each epoch in the specified range, checking for missing or inconsistent entries (i.e. the indexed data does not match the actual chain state). If `--backfill` is enabled (which it is by default), it will attempt to backfill any missing entries using the `ChainValidateIndex` API. + +You can learn about how to use the tool with `lotus-shed chainindex validate-backfill -h`. + +Note: If you are using a non-standard Lotus repo directory then you can run the command with `lotus-shed -repo /path/to/lotus/repo chainindex validate-backfill ...`, or by setting the `LOTUS_REPO` environment variable. + +## Regular Checks + +During normal operation, it is possible, but not strictly necessary, to run periodic checks on the index to ensure it remains consistent with the chain state. The ChainIndexer is designed to be resilient and consistent, but unconsidered edge-cases, or bugs, could cause the index to become inconsistent. + +The `lotus-shed chainindex validate-backfill` command can be used to validate the index over a range of epochs and can be run periodically via cron, systemd timers, or some other means, to ensure the index remains consistent. An example bash script one could use to validate the index over the last 24 hours every 24 hours is provided below: + + +```bash +#!/bin/bash + +LOGFILE="/var/log/lotus_chainindex_validate.log" +current_date=$(date '+%Y-%m-%d %H:%M:%S') + +# Configurable setting for backfill option, set to 'false' to simply report errors as we should +# not expect regular errors in the index. +BACKFILL_OPTION=false + +# Path to the lotus-shed binary +LOTUS_SHED_PATH="/path/to/lotus-shed" + +# Get the current chain head epoch number +start_epoch=$(lotus chain head --height) +# Subtract 1 to account for deferred execution +start_epoch=$((start_epoch - 1)) + +# Define the number of epochs for validation, set to 3000 to validate the last 24 hours plus some buffer +epochs_to_validate=3000 + +# Calculate the end epoch +end_epoch=$((start_epoch - epochs_to_validate + 1)) + +# Run the Lotus chainindex validate-backfill command +validation_output=$("$LOTUS_SHED_PATH" chainindex validate-backfill --from="$start_epoch" --to="$end_epoch" --backfill="$BACKFILL_OPTION" --quiet 2>&1) + +# Check the exit status of the command to determine if errors occurred +if [ $? -ne 0 ]; then + # Log the error with a timestamp + { + echo "[$current_date] Validation error:" + echo "$validation_output" + } >> "$LOGFILE" +else + echo "[$current_date] Validation completed successfully." >> "$LOGFILE" +fi +``` + +Note that this script simply logs any errors that occur during the validation process. It is up to the operator to determine the appropriate response to any errors that occur, including reporting potential bugs to Lotus maintainers. A further enhancement could be to send an alert to an operator if an error occurs. + +## Downgrade Steps + +In case you need to downgrade to the [previous indexing system](#previous-indexing-system), follow these steps: + +1. Prevent the node from receiving traffic. +2. Stop your Lotus node. +3. Download or build a Lotus binary for the rollback version which has the implementation of the old `EthTxHashLookup`, `MsgIndex`, and `EventIndex` indices. +4. Ensure that you've set the correct config for the existing `EthTxHashLookup`, `MsgIndex`, and `EventIndex` indices in the `config.toml` file. +5. Restart your Lotus node. +6. Backfill the `EthTxHashLookup`, `MsgIndex`, and `EventIndex` indices using the `lotus-shed index backfill-*` CLI tooling available in the [previous indexing system](#previous-indexing-system) for the range of epochs between the upgrade to `ChainIndexer` and the rollback of `ChainIndexer`. +7. Route traffic back to the node. + +## Terminology +### Previous Indexing System +* This corresponds to the indexing system used in Lotus versions before v1.31.0. +* It has been replaced by the [ChainIndexer](#chainindexer-indexing-system). +* It was composed of three indexers using three separate databases: [`EthTxHashLookup`](https://github.com/filecoin-project/lotus/blob/v1.31.0/chain/ethhashlookup/eth_transaction_hash_lookup.go), [`MsgIndex`](https://github.com/filecoin-project/lotus/blob/v1.31.0/chain/index/msgindex.go), and [`EventIndex`](https://github.com/filecoin-project/lotus/blob/v1.31.0/chain/events/filter/index.go). +* It persisted state to the [removed option](#removed-options) for `Events.DatabasePath`, which defaulted to `${LOTUS_PATH}/sqlite`. +* It had CLI backfill tooling: `lotus-shed index backfill-*` + +### ChainIndexer Indexing System +* This corresponds to the indexing system used in Lotus versions v1.31.0 onwards. +* It replaced the [previous indexing system](#previous-indexing-system). +* It is composed of a single indexer, [`ChainIndexer`](https://github.com/filecoin-project/lotus/blob/master/chain/index/indexer.go), using a [single database for transactions, messages, and events](https://github.com/filecoin-project/lotus/blob/master/chain/index/ddls.go). +* It persists state to `${LOTUS_PATH}/chainindex`. +* It has this CLI backfill tooling: [`lotus-shed chainindex validate-backfill`](#lotus-shed-chainindex-validate-backfill-cli-tool) +* **Storage requirements:** See the [backfill disk space requirements](#backfill-disk-space-requirements). +* **Backfil times:** See the [backfill timing](#backfill-timing). + +## Appendix + +### Why isn't there an automated migration from the [previous indexing system](#previous-indexing-system) to the [ChainIndexer indexing system](#chainindexer-indexing-system)? + +The decision to not invest here ultimately comes down to the development-time cost vs. benefit ratio. + +For achival nodes, we don't have the confidence that the [previous indexing system](#previous-indexing-system) has the correct data to bootstrap from. In 2024, Lotus maintainers have fixed multiple bugs in the [previous indexing system](#previous-indexing-system), but they still see reports of missing data, mismatched event index counts, etc. Investing here in a migration isn't guaranteed to yield a correct index. As a result, one would still need to perform the [backfill steps](#backfill) to validate and correct the data anyway. While this should be faster having partially correct data than no data, it would still require an archival node to take an outage on the order of days which isn't good enough. + +The schemas of [the old fragmented Indices](#previous-indexing-system) don't naturally map to the schema of the [ChainIndexer](#chainindexer-indexing-system). There would be additional data wrangling work to ultimately get this right. + +[Backfilling](#backfill) is a one time cost. If an operator provider is running multiple nodes, they only need to do it on one node and can then simply copy over the Index to the other node per [the upgrade steps](#upgrade-steps). The new `chainindex.db` copy can also be shared among operators if there is a trust relationship. + +Note that this lack of an automated migration is primarily a concern for the relatively small-in-number archival nodes. It isn't as much of a concern for snapshot-synced nodes. For snapshot-synced nodes with only a portion of the chain state because they only serve queries going back a few days can expect the backfill take closer to an hour per [backfill timing](#backfill-timing). + +### `ChainValidateIndex` RPC API + +Please refer to the [Lotus API documentation](https://github.com/filecoin-project/lotus/blob/master/documentation/en/api-v1-unstable-methods.md) for detailed documentation of the `ChainValidateIndex` JSON RPC API. + +The `ChainValidateIndex` JSON RPC API serves a dual purpose: it validates/diagnoses the integrity of the index at a specific epoch (i.e., it ensures consistency between indexed data and actual chain state), while also providing the option to backfill the `ChainIndexer` if it does not have data for the specified epoch. + +The `ChainValidateIndex` RPC API is available for use once the Lotus daemon has started with `ChainIndexer` [enabled](#enablement). + +Here are some examples of how to use the `ChainValidateIndex` JSON RPC API for validating/ backfilling the index: + +1) Validating the index for an epoch that is a NULL round: + + ```bash + curl -X POST -H "Content-Type: application/json" --data '{ + "jsonrpc": "2.0", + "method": "Filecoin.ChainValidateIndex", + "params": [1954383, false], + "id": 1 +}' http://localhost:1234/rpc/v1 | jq . +``` +```json +{ + "id": 1, + "jsonrpc": "2.0", + "result": { + "TipSetKey": [], + "Height": 1954383, + "IndexedMessagesCount": 0, + "IndexedEventsCount": 0, + "Backfilled": false, + "IsNullRound": true + } +} +``` + +2) Validating the Index for an epoch for which the Indexer has missing data with backfilling disabled: + +```bash +curl -X POST -H "Content-Type: application/json" --data '{ + "jsonrpc": "2.0", + "method": "Filecoin.ChainValidateIndex", + "params": [1995103, false], + "id": 1 +}' http://localhost:1234/rpc/v1 | jq . +``` +```json +{ + "error": { + "code": 1, + "message": "missing tipset at height 1995103 in the chain index, set backfill flag to true to fix" + }, + "id": 1, + "jsonrpc": "2.0" +} +``` + +3) Validating the Index for an epoch for which the Indexer has missing data with backfilling enabled: + +```bash +curl -X POST -H "Content-Type: application/json" --data '{ + "jsonrpc": "2.0", + "method": "Filecoin.ChainValidateIndex", + "params": [1995103, true], + "id": 1 +}' http://localhost:1234/rpc/v1 | jq . +``` +```json +{ + "id": 1, + "jsonrpc": "2.0", + "result": { + "TipSetKey": [ + { + "/": "bafy2bzacebvzbpbdwxsclwyorlzclv6cbsvcbtq34sajow2sn7mnksy3wehew" + }, + { + "/": "bafy2bzacedgei4ve3spkfp3oou5oajwd5cogn7lljsuvoj644fgj3gv7luamu" + }, + { + "/": "bafy2bzacebbpcnjoi46obpaheylyxfy5y2lrtdsyglqw3hx2qg64quip5u76s" + } + ], + "Height": 1995103, + "IndexedMessagesCount": 0, + "IndexedEventsCount": 0, + "Backfilled": true, + "IsNullRound": false + } +} +``` diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index 7f39f23a5b8..8d3eda9d496 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -221,20 +221,13 @@ [Fevm] - # EnableEthRPC enables eth_ rpc, and enables storing a mapping of eth transaction hashes to filecoin message Cids. - # This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be disabled by config options above. + # EnableEthRPC enables eth_ RPC methods. + # Note: Setting this to true will also require that ChainIndexer is enabled, otherwise it will cause an error at startup. # # type: bool # env var: LOTUS_FEVM_ENABLEETHRPC #EnableEthRPC = false - # EthTxHashMappingLifetimeDays the transaction hash lookup database will delete mappings that have been stored for more than x days - # Set to 0 to keep all mappings - # - # type: int - # env var: LOTUS_FEVM_ETHTXHASHMAPPINGLIFETIMEDAYS - #EthTxHashMappingLifetimeDays = 0 - # EthTraceFilterMaxResults sets the maximum results returned per request by trace_filter # # type: uint64 @@ -253,25 +246,9 @@ [Events] - # DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. - # The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. - # - # type: bool - # env var: LOTUS_EVENTS_DISABLEREALTIMEFILTERAPI - #DisableRealTimeFilterAPI = false - - # DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events - # that occurred in the past. HistoricFilterAPI maintains a queryable index of events. - # The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. - # - # type: bool - # env var: LOTUS_EVENTS_DISABLEHISTORICFILTERAPI - #DisableHistoricFilterAPI = false - # EnableActorEventsAPI enables the Actor events API that enables clients to consume events # emitted by (smart contracts + built-in Actors). - # This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be - # disabled by setting their respective Disable* options. + # Note: Setting this to true will also require that ChainIndexer is enabled, otherwise it will cause an error at startup. # # type: bool # env var: LOTUS_EVENTS_ENABLEACTOREVENTSAPI @@ -307,23 +284,64 @@ # env var: LOTUS_EVENTS_MAXFILTERHEIGHTRANGE #MaxFilterHeightRange = 2880 - # DatabasePath is the full path to a sqlite database that will be used to index actor events to - # support the historic filter APIs. If the database does not exist it will be created. The directory containing - # the database must already exist and be writeable. If a relative path is provided here, sqlite treats it as - # relative to the CWD (current working directory). + +[ChainIndexer] + # EnableIndexer controls whether the chain indexer is active. + # The chain indexer is responsible for indexing tipsets, messages, and events from the chain state. + # It is a crucial component for optimizing Lotus RPC response times. + # + # Default: false (indexer is disabled) + # + # Setting this to true will enable the indexer, which will significantly improve RPC performance. + # It is strongly recommended to keep this set to true if you are an RPC provider. # - # type: string - # env var: LOTUS_EVENTS_DATABASEPATH - #DatabasePath = "" + # type: bool + # env var: LOTUS_CHAININDEXER_ENABLEINDEXER + #EnableIndexer = false + # GCRetentionEpochs specifies the number of epochs for which data is retained in the Indexer. + # The garbage collection (GC) process removes data older than this retention period. + # Setting this to 0 disables GC, preserving all historical data indefinitely. + # + # If set, the minimum value must be greater than builtin.EpochsInDay (i.e. "2880" epochs for mainnet). + # This ensures a reasonable retention period for the indexed data. + # + # Default: 0 (GC disabled) + # + # type: int64 + # env var: LOTUS_CHAININDEXER_GCRETENTIONEPOCHS + #GCRetentionEpochs = 0 -[Index] - # EXPERIMENTAL FEATURE. USE WITH CAUTION - # EnableMsgIndex enables indexing of messages on chain. + # ReconcileEmptyIndex determines whether to reconcile the index with the chain state + # during startup when the index is empty. + # + # When set to true: + # - On startup, if the index is empty, the indexer will index the available + # chain state on the node albeit within the MaxReconcileTipsets limit. + # + # When set to false: + # - The indexer will not automatically re-index the chain state on startup if the index is empty. + # + # Default: false + # + # Note: The number of tipsets reconciled (i.e. indexed) during this process can be + # controlled using the MaxReconcileTipsets option. # # type: bool - # env var: LOTUS_INDEX_ENABLEMSGINDEX - #EnableMsgIndex = false + # env var: LOTUS_CHAININDEXER_RECONCILEEMPTYINDEX + #ReconcileEmptyIndex = false + + # MaxReconcileTipsets limits the number of tipsets to reconcile with the chain during startup. + # It represents the maximum number of tipsets to index from the chain state that are absent in the index. + # + # Default: 3 * epochsPerDay (approximately 3 days of chain history) + # + # Note: Setting this value too low may result in incomplete indexing, while setting it too high + # may increase startup time. + # + # type: uint64 + # env var: LOTUS_CHAININDEXER_MAXRECONCILETIPSETS + #MaxReconcileTipsets = 8640 [FaultReporter] diff --git a/itests/eth_config_test.go b/itests/eth_config_test.go index 8b74d011aa2..ecd0379e2b2 100644 --- a/itests/eth_config_test.go +++ b/itests/eth_config_test.go @@ -17,7 +17,7 @@ func TestEthFilterAPIDisabledViaConfig(t *testing.T) { kit.QuietMiningLogs() - // pass kit.DisableEthRPC() so RealTimeFilterAPI will not be enabled + // pass kit.DisableEthRPC() to disable ETH RPC client, _, _ := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC(), kit.DisableEthRPC()) _, err := client.EthNewPendingTransactionFilter(ctx) diff --git a/itests/eth_filter_test.go b/itests/eth_filter_test.go index 2872717a975..a1ba56bcd1f 100644 --- a/itests/eth_filter_test.go +++ b/itests/eth_filter_test.go @@ -523,6 +523,56 @@ func TestEthGetLogsBasic(t *testing.T) { } AssertEthLogs(t, rctLogs, expected, received) + + head, err := client.ChainHead(ctx) + require.NoError(err) + + for height := 0; height < int(head.Height()); height++ { + // for each tipset + ts, err := client.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(height), types.EmptyTSK) + require.NoError(err) + + if ts.Height() != abi.ChainEpoch(height) { + iv, err := client.ChainValidateIndex(ctx, abi.ChainEpoch(height), false) + require.NoError(err) + require.True(iv.IsNullRound) + t.Logf("tipset %d is a null round", height) + continue + } + + expectedValidation := types.IndexValidation{ + TipSetKey: ts.Key(), + Height: ts.Height(), + IndexedMessagesCount: 0, + IndexedEventsCount: 0, + IndexedEventEntriesCount: 0, + Backfilled: false, + IsNullRound: false, + } + messages, err := client.ChainGetMessagesInTipset(ctx, ts.Key()) + require.NoError(err) + expectedValidation.IndexedMessagesCount = uint64(len(messages)) + for _, m := range messages { + receipt, err := client.StateSearchMsg(ctx, types.EmptyTSK, m.Cid, -1, false) + require.NoError(err) + require.NotNil(receipt) + // receipt + if receipt.Receipt.EventsRoot != nil { + events, err := client.ChainGetEvents(ctx, *receipt.Receipt.EventsRoot) + require.NoError(err) + expectedValidation.IndexedEventsCount += uint64(len(events)) + for _, event := range events { + expectedValidation.IndexedEventEntriesCount += uint64(len(event.Entries)) + } + } + } + + t.Logf("tipset %d: %+v", height, expectedValidation) + + iv, err := client.ChainValidateIndex(ctx, abi.ChainEpoch(height), false) + require.NoError(err) + require.Equal(iv, &expectedValidation) + } } func TestEthSubscribeLogsNoTopicSpec(t *testing.T) { diff --git a/itests/eth_transactions_test.go b/itests/eth_transactions_test.go index 412bfb2bf12..478e61df8f3 100644 --- a/itests/eth_transactions_test.go +++ b/itests/eth_transactions_test.go @@ -744,12 +744,6 @@ func TestTraceTransaction(t *testing.T) { require.Contains(t, err.Error(), "transaction not found") require.Nil(t, traces) - // EthTraceTransaction errors when a trace for pending transactions is requested - traces, err = client.EthTraceTransaction(ctx, hash.String()) - require.Error(t, err) - require.Contains(t, err.Error(), "no trace for pending transactions") - require.Nil(t, traces) - receipt, err := client.EVM().WaitTransaction(ctx, hash) require.NoError(t, err) require.NotNil(t, receipt) diff --git a/itests/kit/node_opts.go b/itests/kit/node_opts.go index ad1f7e3edb4..1bfd4550977 100644 --- a/itests/kit/node_opts.go +++ b/itests/kit/node_opts.go @@ -90,6 +90,7 @@ var DefaultNodeOpts = nodeOpts{ // test defaults cfg.Fevm.EnableEthRPC = true + cfg.ChainIndexer.EnableIndexer = true cfg.Events.MaxFilterHeightRange = math.MaxInt64 cfg.Events.EnableActorEventsAPI = true @@ -103,6 +104,8 @@ var DefaultNodeOpts = nodeOpts{ cfg.Libp2p.ConnMgrLow = 1024 cfg.Libp2p.ConnMgrHigh = 2048 cfg.Libp2p.ConnMgrGrace = config.Duration(time.Hour) + cfg.ChainIndexer.ReconcileEmptyIndex = true + cfg.ChainIndexer.MaxReconcileTipsets = 10000 return nil }, }, diff --git a/itests/msgindex_test.go b/itests/msgindex_test.go deleted file mode 100644 index d9ed752797e..00000000000 --- a/itests/msgindex_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package itests - -import ( - "context" - "os" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/exitcode" - - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/index" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/itests/kit" - "github.com/filecoin-project/lotus/node" -) - -func init() { - // adjust those to make tests snappy - index.CoalesceMinDelay = time.Millisecond - index.CoalesceMaxDelay = 10 * time.Millisecond - index.CoalesceMergeInterval = time.Millisecond -} - -func testMsgIndex( - t *testing.T, - name string, - run func(t *testing.T, makeMsgIndex func(cs *store.ChainStore) (index.MsgIndex, error)), - check func(t *testing.T, i int, msgIndex index.MsgIndex), -) { - - // create the message indices in the test context - var mx sync.Mutex - var tmpDirs []string - var msgIndices []index.MsgIndex - - t.Cleanup(func() { - for _, msgIndex := range msgIndices { - _ = msgIndex.Close() - } - - for _, tmp := range tmpDirs { - _ = os.RemoveAll(tmp) - } - }) - - makeMsgIndex := func(cs *store.ChainStore) (index.MsgIndex, error) { - var err error - tmp := t.TempDir() - msgIndex, err := index.NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs) - if err == nil { - mx.Lock() - tmpDirs = append(tmpDirs, tmp) - msgIndices = append(msgIndices, msgIndex) - mx.Unlock() - } - return msgIndex, err - } - - t.Run(name, func(t *testing.T) { - run(t, makeMsgIndex) - }) - - if len(msgIndices) == 0 { - t.Fatal("no message indices") - } - - for i, msgIndex := range msgIndices { - check(t, i, msgIndex) - } -} - -func checkNonEmptyMsgIndex(t *testing.T, _ int, msgIndex index.MsgIndex) { - mi, ok := msgIndex.(interface{ CountMessages() (int64, error) }) - if !ok { - t.Fatal("index does not allow counting") - } - count, err := mi.CountMessages() - require.NoError(t, err) - require.NotEqual(t, count, 0) -} - -func TestMsgIndex(t *testing.T) { - testMsgIndex(t, "testSearchMsg", testSearchMsgWithIndex, checkNonEmptyMsgIndex) -} - -func testSearchMsgWithIndex(t *testing.T, makeMsgIndex func(cs *store.ChainStore) (index.MsgIndex, error)) { - // copy of apiSuite.testSearchMsgWith; needs to be copied or else CI is angry, tests are built individually there - ctx := context.Background() - - full, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts(node.Override(new(index.MsgIndex), makeMsgIndex))) - - senderAddr, err := full.WalletDefaultAddress(ctx) - require.NoError(t, err) - - msg := &types.Message{ - From: senderAddr, - To: senderAddr, - Value: big.Zero(), - } - - ens.BeginMining(100 * time.Millisecond) - - sm, err := full.MpoolPushMessage(ctx, msg, nil) - require.NoError(t, err) - - //stm: @CHAIN_STATE_WAIT_MSG_001 - res, err := full.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) - require.NoError(t, err) - - require.Equal(t, exitcode.Ok, res.Receipt.ExitCode, "message not successful") - - //stm: @CHAIN_STATE_SEARCH_MSG_001 - searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true) - require.NoError(t, err) - require.NotNil(t, searchRes) - - require.Equalf(t, res.TipSet, searchRes.TipSet, "search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet) -} diff --git a/lib/sqlite/sqlite.go b/lib/sqlite/sqlite.go index cb489284c9a..96d3b5fa9ba 100644 --- a/lib/sqlite/sqlite.go +++ b/lib/sqlite/sqlite.go @@ -11,6 +11,7 @@ import ( "time" logging "github.com/ipfs/go-log/v2" + _ "github.com/mattn/go-sqlite3" "golang.org/x/xerrors" ) @@ -22,12 +23,11 @@ var pragmas = []string{ "PRAGMA synchronous = normal", "PRAGMA temp_store = memory", "PRAGMA mmap_size = 30000000000", - "PRAGMA page_size = 32768", "PRAGMA auto_vacuum = NONE", "PRAGMA automatic_index = OFF", "PRAGMA journal_mode = WAL", - "PRAGMA wal_autocheckpoint = 256", // checkpoint @ 256 pages - "PRAGMA journal_size_limit = 0", // always reset journal and wal files + "PRAGMA journal_size_limit = 0", // always reset journal and wal files + "PRAGMA foreign_keys = ON", } const metaTableDdl = `CREATE TABLE IF NOT EXISTS _meta ( @@ -45,30 +45,37 @@ func metaDdl(version uint64) []string { } // Open opens a database at the given path. If the database does not exist, it will be created. -func Open(path string) (*sql.DB, bool, error) { +func Open(path string) (*sql.DB, error) { if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return nil, false, xerrors.Errorf("error creating database base directory [@ %s]: %w", path, err) + return nil, xerrors.Errorf("error creating database base directory [@ %s]: %w", path, err) } _, err := os.Stat(path) if err != nil && !errors.Is(err, fs.ErrNotExist) { - return nil, false, xerrors.Errorf("error checking file status for database [@ %s]: %w", path, err) + return nil, xerrors.Errorf("error checking file status for database [@ %s]: %w", path, err) } - exists := err == nil db, err := sql.Open("sqlite3", path+"?mode=rwc") if err != nil { - return nil, false, xerrors.Errorf("error opening database [@ %s]: %w", path, err) + return nil, xerrors.Errorf("error opening database [@ %s]: %w", path, err) } for _, pragma := range pragmas { if _, err := db.Exec(pragma); err != nil { _ = db.Close() - return nil, false, xerrors.Errorf("error setting database pragma %q: %w", pragma, err) + return nil, xerrors.Errorf("error setting database pragma %q: %w", pragma, err) } } - return db, exists, nil + var foreignKeysEnabled int + if err := db.QueryRow("PRAGMA foreign_keys;").Scan(&foreignKeysEnabled); err != nil { + return nil, xerrors.Errorf("failed to check foreign keys setting: %w", err) + } + if foreignKeysEnabled == 0 { + return nil, xerrors.Errorf("foreign keys are not enabled for database [@ %s]", path) + } + + return db, nil } // InitDb initializes the database by checking whether it needs to be created or upgraded. diff --git a/lib/sqlite/sqlite_test.go b/lib/sqlite/sqlite_test.go index bda6432f5e6..f492b092a5e 100644 --- a/lib/sqlite/sqlite_test.go +++ b/lib/sqlite/sqlite_test.go @@ -32,9 +32,8 @@ func TestSqlite(t *testing.T) { tmpDir := t.TempDir() dbPath := filepath.Join(tmpDir, "/test.db") - db, exists, err := sqlite.Open(dbPath) + db, err := sqlite.Open(dbPath) req.NoError(err) - req.False(exists) req.NotNil(db) err = sqlite.InitDb(context.Background(), "testdb", db, ddl, nil) @@ -95,9 +94,8 @@ func TestSqlite(t *testing.T) { // open again, check contents is the same - db, exists, err = sqlite.Open(dbPath) + db, err = sqlite.Open(dbPath) req.NoError(err) - req.True(exists) req.NotNil(db) err = sqlite.InitDb(context.Background(), "testdb", db, ddl, nil) @@ -113,9 +111,9 @@ func TestSqlite(t *testing.T) { // open again, with a migration - db, exists, err = sqlite.Open(dbPath) + db, err = sqlite.Open(dbPath) req.NoError(err) - req.True(exists) + req.NotNil(db) req.NotNil(db) migration1 := func(ctx context.Context, tx *sql.Tx) error { @@ -156,9 +154,8 @@ func TestSqlite(t *testing.T) { // open again, with another migration - db, exists, err = sqlite.Open(dbPath) + db, err = sqlite.Open(dbPath) req.NoError(err) - req.True(exists) req.NotNil(db) migration2 := func(ctx context.Context, tx *sql.Tx) error { diff --git a/node/builder.go b/node/builder.go index 94fe170cc21..7d03e9593a4 100644 --- a/node/builder.go +++ b/node/builder.go @@ -25,7 +25,6 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/beacon" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/journal/alerting" @@ -129,6 +128,8 @@ const ( StoreEventsKey + InitChainIndexerKey + _nInvokes // keep this last ) @@ -368,7 +369,6 @@ func Test() Option { Unset(RunPeerMgrKey), Unset(new(*peermgr.PeerMgr)), Override(new(beacon.Schedule), testing.RandomBeacon), - Override(new(index.MsgIndex), modules.DummyMsgIndex), ) } diff --git a/node/builder_chain.go b/node/builder_chain.go index d367ab4e3eb..8672fa56803 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -141,9 +141,12 @@ var ChainNode = Options( Override(new(full.StateModuleAPI), From(new(api.Gateway))), Override(new(stmgr.StateManagerAPI), rpcstmgr.NewRPCStateManager), Override(new(full.EthModuleAPI), From(new(api.Gateway))), - Override(new(full.EthTxHashManager), &full.EthTxHashManagerDummy{}), Override(new(full.EthEventAPI), From(new(api.Gateway))), Override(new(full.ActorEventAPI), From(new(api.Gateway))), + + Override(new(index.Indexer), modules.ChainIndexer(config.ChainIndexerConfig{ + EnableIndexer: false, + })), ), // Full node API / service startup @@ -176,6 +179,13 @@ func ConfigFullNode(c interface{}) Option { return Error(xerrors.Errorf("invalid config from repo, got: %T", c)) } + if cfg.Fevm.EnableEthRPC && !cfg.ChainIndexer.EnableIndexer { + return Error(xerrors.New("chain indexer must be enabled if ETH RPC is enabled")) + } + if cfg.Events.EnableActorEventsAPI && !cfg.ChainIndexer.EnableIndexer { + return Error(xerrors.New("chain indexer must be enabled if actor events API is enabled")) + } + return Options( ConfigCommon(&cfg.Common, build.NodeUserVersion()), @@ -240,7 +250,7 @@ func ConfigFullNode(c interface{}) Option { // If the Eth JSON-RPC is enabled, enable storing events at the ChainStore. // This is the case even if real-time and historic filtering are disabled, // as it enables us to serve logs in eth_getTransactionReceipt. - If(cfg.Fevm.EnableEthRPC || cfg.Events.EnableActorEventsAPI, Override(StoreEventsKey, modules.EnableStoringEvents)), + If(cfg.Fevm.EnableEthRPC || cfg.Events.EnableActorEventsAPI || cfg.ChainIndexer.EnableIndexer, Override(StoreEventsKey, modules.EnableStoringEvents)), If(cfg.Wallet.RemoteBackend != "", Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)), @@ -263,14 +273,12 @@ func ConfigFullNode(c interface{}) Option { If(cfg.Fevm.EnableEthRPC, Override(new(*full.EthEventHandler), modules.EthEventHandler(cfg.Events, cfg.Fevm.EnableEthRPC)), - Override(new(full.EthTxHashManager), modules.EthTxHashManager(cfg.Fevm)), Override(new(full.EthModuleAPI), modules.EthModuleAPI(cfg.Fevm)), Override(new(full.EthEventAPI), From(new(*full.EthEventHandler))), ), If(!cfg.Fevm.EnableEthRPC, Override(new(full.EthModuleAPI), &full.EthModuleDummy{}), Override(new(full.EthEventAPI), &full.EthModuleDummy{}), - Override(new(full.EthTxHashManager), &full.EthTxHashManagerDummy{}), ), If(cfg.Events.EnableActorEventsAPI, @@ -281,14 +289,22 @@ func ConfigFullNode(c interface{}) Option { ), ), - // enable message index for full node when configured by the user, otherwise use dummy. - If(cfg.Index.EnableMsgIndex, Override(new(index.MsgIndex), modules.MsgIndex)), - If(!cfg.Index.EnableMsgIndex, Override(new(index.MsgIndex), modules.DummyMsgIndex)), - // enable fault reporter when configured by the user If(cfg.FaultReporter.EnableConsensusFaultReporter, Override(ConsensusReporterKey, modules.RunConsensusFaultReporter(cfg.FaultReporter)), ), + + ApplyIf(isLiteNode, + Override(new(full.ChainIndexerAPI), func() full.ChainIndexerAPI { return nil }), + ), + + ApplyIf(isFullNode, + Override(new(index.Indexer), modules.ChainIndexer(cfg.ChainIndexer)), + Override(new(full.ChainIndexerAPI), modules.ChainIndexHandler(cfg.ChainIndexer)), + If(cfg.ChainIndexer.EnableIndexer, + Override(InitChainIndexerKey, modules.InitChainIndexer), + ), + ), ) } diff --git a/node/config/def.go b/node/config/def.go index cc390371302..e6bdc04bdb8 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -83,19 +83,22 @@ func DefaultFullNode() *FullNode { }, }, Fevm: FevmConfig{ - EnableEthRPC: false, - EthTxHashMappingLifetimeDays: 0, - EthTraceFilterMaxResults: 500, - EthBlkCacheSize: 500, + EnableEthRPC: false, + EthTraceFilterMaxResults: 500, + EthBlkCacheSize: 500, }, Events: EventsConfig{ - DisableRealTimeFilterAPI: false, - DisableHistoricFilterAPI: false, - EnableActorEventsAPI: false, - FilterTTL: Duration(time.Hour * 1), - MaxFilters: 100, - MaxFilterResults: 10000, - MaxFilterHeightRange: 2880, // conservative limit of one day + EnableActorEventsAPI: false, + FilterTTL: Duration(time.Hour * 1), + MaxFilters: 100, + MaxFilterResults: 10000, + MaxFilterHeightRange: 2880, // conservative limit of one day + }, + ChainIndexer: ChainIndexerConfig{ + EnableIndexer: false, + GCRetentionEpochs: 0, + ReconcileEmptyIndex: false, + MaxReconcileTipsets: 3 * builtin.EpochsInDay, }, } } diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 6420c0f5f14..d448f5b9ab3 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -71,6 +71,65 @@ your node if metadata log is disabled`, Comment: ``, }, }, + "ChainIndexerConfig": { + { + Name: "EnableIndexer", + Type: "bool", + + Comment: `EnableIndexer controls whether the chain indexer is active. +The chain indexer is responsible for indexing tipsets, messages, and events from the chain state. +It is a crucial component for optimizing Lotus RPC response times. + +Default: false (indexer is disabled) + +Setting this to true will enable the indexer, which will significantly improve RPC performance. +It is strongly recommended to keep this set to true if you are an RPC provider.`, + }, + { + Name: "GCRetentionEpochs", + Type: "int64", + + Comment: `GCRetentionEpochs specifies the number of epochs for which data is retained in the Indexer. +The garbage collection (GC) process removes data older than this retention period. +Setting this to 0 disables GC, preserving all historical data indefinitely. + +If set, the minimum value must be greater than builtin.EpochsInDay (i.e. "2880" epochs for mainnet). +This ensures a reasonable retention period for the indexed data. + +Default: 0 (GC disabled)`, + }, + { + Name: "ReconcileEmptyIndex", + Type: "bool", + + Comment: `ReconcileEmptyIndex determines whether to reconcile the index with the chain state +during startup when the index is empty. + +When set to true: +- On startup, if the index is empty, the indexer will index the available +chain state on the node albeit within the MaxReconcileTipsets limit. + +When set to false: +- The indexer will not automatically re-index the chain state on startup if the index is empty. + +Default: false + +Note: The number of tipsets reconciled (i.e. indexed) during this process can be +controlled using the MaxReconcileTipsets option.`, + }, + { + Name: "MaxReconcileTipsets", + Type: "uint64", + + Comment: `MaxReconcileTipsets limits the number of tipsets to reconcile with the chain during startup. +It represents the maximum number of tipsets to index from the chain state that are absent in the index. + +Default: 3 * epochsPerDay (approximately 3 days of chain history) + +Note: Setting this value too low may result in incomplete indexing, while setting it too high +may increase startup time.`, + }, + }, "Chainstore": { { Name: "EnableSplitstore", @@ -114,29 +173,13 @@ your node if metadata log is disabled`, }, }, "EventsConfig": { - { - Name: "DisableRealTimeFilterAPI", - Type: "bool", - - Comment: `DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. -The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag.`, - }, - { - Name: "DisableHistoricFilterAPI", - Type: "bool", - - Comment: `DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events -that occurred in the past. HistoricFilterAPI maintains a queryable index of events. -The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag.`, - }, { Name: "EnableActorEventsAPI", Type: "bool", Comment: `EnableActorEventsAPI enables the Actor events API that enables clients to consume events emitted by (smart contracts + built-in Actors). -This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be -disabled by setting their respective Disable* options.`, +Note: Setting this to true will also require that ChainIndexer is enabled, otherwise it will cause an error at startup.`, }, { Name: "FilterTTL", @@ -168,15 +211,6 @@ of filters per connection.`, Comment: `MaxFilterHeightRange specifies the maximum range of heights that can be used in a filter (to avoid querying the entire chain)`, }, - { - Name: "DatabasePath", - Type: "string", - - Comment: `DatabasePath is the full path to a sqlite database that will be used to index actor events to -support the historic filter APIs. If the database does not exist it will be created. The directory containing -the database must already exist and be writeable. If a relative path is provided here, sqlite treats it as -relative to the CWD (current working directory).`, - }, }, "FaultReporterConfig": { { @@ -220,15 +254,8 @@ rewards. This address should have adequate funds to cover gas fees.`, Name: "EnableEthRPC", Type: "bool", - Comment: `EnableEthRPC enables eth_ rpc, and enables storing a mapping of eth transaction hashes to filecoin message Cids. -This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be disabled by config options above.`, - }, - { - Name: "EthTxHashMappingLifetimeDays", - Type: "int", - - Comment: `EthTxHashMappingLifetimeDays the transaction hash lookup database will delete mappings that have been stored for more than x days -Set to 0 to keep all mappings`, + Comment: `EnableEthRPC enables eth_ RPC methods. +Note: Setting this to true will also require that ChainIndexer is enabled, otherwise it will cause an error at startup.`, }, { Name: "EthTraceFilterMaxResults", @@ -236,12 +263,6 @@ Set to 0 to keep all mappings`, Comment: `EthTraceFilterMaxResults sets the maximum results returned per request by trace_filter`, }, - { - Name: "Events", - Type: "DeprecatedEvents", - - Comment: ``, - }, { Name: "EthBlkCacheSize", Type: "int", @@ -297,8 +318,8 @@ Note: Setting this value to 0 disables the cache.`, Comment: ``, }, { - Name: "Index", - Type: "IndexConfig", + Name: "ChainIndexer", + Type: "ChainIndexerConfig", Comment: ``, }, @@ -342,15 +363,6 @@ in a cluster. Only 1 is required`, Comment: `The port to find Yugabyte. Blank for default.`, }, }, - "IndexConfig": { - { - Name: "EnableMsgIndex", - Type: "bool", - - Comment: `EXPERIMENTAL FEATURE. USE WITH CAUTION -EnableMsgIndex enables indexing of messages on chain.`, - }, - }, "JournalConfig": { { Name: "DisabledEvents", diff --git a/node/config/types.go b/node/config/types.go index d7753d4e19e..46be4089d63 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -25,7 +25,7 @@ type FullNode struct { Chainstore Chainstore Fevm FevmConfig Events EventsConfig - Index IndexConfig + ChainIndexer ChainIndexerConfig FaultReporter FaultReporterConfig } @@ -538,19 +538,13 @@ type FeeConfig struct { } type FevmConfig struct { - // EnableEthRPC enables eth_ rpc, and enables storing a mapping of eth transaction hashes to filecoin message Cids. - // This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be disabled by config options above. + // EnableEthRPC enables eth_ RPC methods. + // Note: Setting this to true will also require that ChainIndexer is enabled, otherwise it will cause an error at startup. EnableEthRPC bool - // EthTxHashMappingLifetimeDays the transaction hash lookup database will delete mappings that have been stored for more than x days - // Set to 0 to keep all mappings - EthTxHashMappingLifetimeDays int - // EthTraceFilterMaxResults sets the maximum results returned per request by trace_filter EthTraceFilterMaxResults uint64 - Events DeprecatedEvents `toml:"Events,omitempty"` - // EthBlkCacheSize specifies the size of the cache used for caching Ethereum blocks. // This cache enhances the performance of the eth_getBlockByHash RPC call by minimizing the need to access chain state for // recently requested blocks that are already cached. @@ -559,43 +553,10 @@ type FevmConfig struct { EthBlkCacheSize int } -type DeprecatedEvents struct { - // DisableRealTimeFilterAPI is DEPRECATED and will be removed in a future release. Use Events.DisableRealTimeFilterAPI instead. - DisableRealTimeFilterAPI bool `moved:"Events.DisableRealTimeFilterAPI" toml:"DisableRealTimeFilterAPI,omitempty"` - - // DisableHistoricFilterAPI is DEPRECATED and will be removed in a future release. Use Events.DisableHistoricFilterAPI instead. - DisableHistoricFilterAPI bool `moved:"Events.DisableHistoricFilterAPI" toml:"DisableHistoricFilterAPI,omitempty"` - - // FilterTTL is DEPRECATED and will be removed in a future release. Use Events.FilterTTL instead. - FilterTTL Duration `moved:"Events.FilterTTL" toml:"FilterTTL,omitzero"` - - // MaxFilters is DEPRECATED and will be removed in a future release. Use Events.MaxFilters instead. - MaxFilters int `moved:"Events.MaxFilters" toml:"MaxFilters,omitzero"` - - // MaxFilterResults is DEPRECATED and will be removed in a future release. Use Events.MaxFilterResults instead. - MaxFilterResults int `moved:"Events.MaxFilterResults" toml:"MaxFilterResults,omitzero"` - - // MaxFilterHeightRange is DEPRECATED and will be removed in a future release. Use Events.MaxFilterHeightRange instead. - MaxFilterHeightRange uint64 `moved:"Events.MaxFilterHeightRange" toml:"MaxFilterHeightRange,omitzero"` - - // DatabasePath is DEPRECATED and will be removed in a future release. Use Events.DatabasePath instead. - DatabasePath string `moved:"Events.DatabasePath" toml:"DatabasePath,omitempty"` -} - type EventsConfig struct { - // DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. - // The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. - DisableRealTimeFilterAPI bool - - // DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events - // that occurred in the past. HistoricFilterAPI maintains a queryable index of events. - // The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. - DisableHistoricFilterAPI bool - // EnableActorEventsAPI enables the Actor events API that enables clients to consume events // emitted by (smart contracts + built-in Actors). - // This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be - // disabled by setting their respective Disable* options. + // Note: Setting this to true will also require that ChainIndexer is enabled, otherwise it will cause an error at startup. EnableActorEventsAPI bool // FilterTTL specifies the time to live for actor event filters. Filters that haven't been accessed longer than @@ -615,23 +576,53 @@ type EventsConfig struct { // MaxFilterHeightRange specifies the maximum range of heights that can be used in a filter (to avoid querying // the entire chain) MaxFilterHeightRange uint64 +} - // DatabasePath is the full path to a sqlite database that will be used to index actor events to - // support the historic filter APIs. If the database does not exist it will be created. The directory containing - // the database must already exist and be writeable. If a relative path is provided here, sqlite treats it as - // relative to the CWD (current working directory). - DatabasePath string +type ChainIndexerConfig struct { + // EnableIndexer controls whether the chain indexer is active. + // The chain indexer is responsible for indexing tipsets, messages, and events from the chain state. + // It is a crucial component for optimizing Lotus RPC response times. + // + // Default: false (indexer is disabled) + // + // Setting this to true will enable the indexer, which will significantly improve RPC performance. + // It is strongly recommended to keep this set to true if you are an RPC provider. + EnableIndexer bool - // Others, not implemented yet: - // Set a limit on the number of active websocket subscriptions (may be zero) - // Set a timeout for subscription clients - // Set upper bound on index size -} + // GCRetentionEpochs specifies the number of epochs for which data is retained in the Indexer. + // The garbage collection (GC) process removes data older than this retention period. + // Setting this to 0 disables GC, preserving all historical data indefinitely. + // + // If set, the minimum value must be greater than builtin.EpochsInDay (i.e. "2880" epochs for mainnet). + // This ensures a reasonable retention period for the indexed data. + // + // Default: 0 (GC disabled) + GCRetentionEpochs int64 + + // ReconcileEmptyIndex determines whether to reconcile the index with the chain state + // during startup when the index is empty. + // + // When set to true: + // - On startup, if the index is empty, the indexer will index the available + // chain state on the node albeit within the MaxReconcileTipsets limit. + // + // When set to false: + // - The indexer will not automatically re-index the chain state on startup if the index is empty. + // + // Default: false + // + // Note: The number of tipsets reconciled (i.e. indexed) during this process can be + // controlled using the MaxReconcileTipsets option. + ReconcileEmptyIndex bool -type IndexConfig struct { - // EXPERIMENTAL FEATURE. USE WITH CAUTION - // EnableMsgIndex enables indexing of messages on chain. - EnableMsgIndex bool + // MaxReconcileTipsets limits the number of tipsets to reconcile with the chain during startup. + // It represents the maximum number of tipsets to index from the chain state that are absent in the index. + // + // Default: 3 * epochsPerDay (approximately 3 days of chain history) + // + // Note: Setting this value too low may result in incomplete indexing, while setting it too high + // may increase startup time. + MaxReconcileTipsets uint64 } type HarmonyDB struct { diff --git a/node/impl/full.go b/node/impl/full.go index 6ed0bf3eb11..24240e3df2c 100644 --- a/node/impl/full.go +++ b/node/impl/full.go @@ -36,6 +36,7 @@ type FullNodeAPI struct { full.EthAPI full.ActorEventsAPI full.F3API + full.ChainIndexAPI DS dtypes.MetadataDS NetworkName dtypes.NetworkName diff --git a/node/impl/full/actor_events.go b/node/impl/full/actor_events.go index 4216ef3c6bf..29faae2eb4f 100644 --- a/node/impl/full/actor_events.go +++ b/node/impl/full/actor_events.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/types" ) @@ -283,7 +284,7 @@ func (a *ActorEventHandler) SubscribeActorEventsRaw(ctx context.Context, evtFilt nextBacklogHeightUpdate := a.clock.Now().Add(a.blockDelay) collectEvent := func(ev interface{}) bool { - ce, ok := ev.(*filter.CollectedEvent) + ce, ok := ev.(*index.CollectedEvent) if !ok { log.Errorf("got unexpected value from event filter: %T", ev) return false diff --git a/node/impl/full/actor_events_test.go b/node/impl/full/actor_events_test.go index 16bcfe06ab9..4367f715f81 100644 --- a/node/impl/full/actor_events_test.go +++ b/node/impl/full/actor_events_test.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/types" ) @@ -343,7 +344,7 @@ func TestSubscribeActorEventsRaw(t *testing.T) { req.NoError(err) mockChain.setHeaviestTipSet(ts) - var eventsThisEpoch []*filter.CollectedEvent + var eventsThisEpoch []*index.CollectedEvent if thisHeight <= finishHeight { eventsThisEpoch = allEvents[(thisHeight-filterStartHeight)*eventsPerEpoch : (thisHeight-filterStartHeight+2)*eventsPerEpoch] } @@ -541,13 +542,13 @@ type mockFilter struct { id types.FilterID lastTaken time.Time ch chan<- interface{} - historicalEvents []*filter.CollectedEvent + historicalEvents []*index.CollectedEvent subChannelCalls int clearSubChannelCalls int lk sync.Mutex } -func newMockFilter(ctx context.Context, t *testing.T, rng *pseudo.Rand, historicalEvents []*filter.CollectedEvent) *mockFilter { +func newMockFilter(ctx context.Context, t *testing.T, rng *pseudo.Rand, historicalEvents []*index.CollectedEvent) *mockFilter { t.Helper() var id [32]byte _, err := rng.Read(id[:]) @@ -560,7 +561,7 @@ func newMockFilter(ctx context.Context, t *testing.T, rng *pseudo.Rand, historic } } -func (m *mockFilter) sendEventToChannel(e *filter.CollectedEvent) { +func (m *mockFilter) sendEventToChannel(e *index.CollectedEvent) { m.lk.Lock() defer m.lk.Unlock() if m.ch != nil { @@ -614,7 +615,7 @@ func (m *mockFilter) ClearSubChannel() { m.ch = nil } -func (m *mockFilter) TakeCollectedEvents(context.Context) []*filter.CollectedEvent { +func (m *mockFilter) TakeCollectedEvents(context.Context) []*index.CollectedEvent { e := m.historicalEvents m.historicalEvents = nil m.lastTaken = time.Now() @@ -768,7 +769,7 @@ func epochPtr(i int) *abi.ChainEpoch { return &e } -func collectedToActorEvents(collected []*filter.CollectedEvent) []*types.ActorEvent { +func collectedToActorEvents(collected []*index.CollectedEvent) []*types.ActorEvent { var out []*types.ActorEvent for _, c := range collected { out = append(out, &types.ActorEvent{ @@ -783,8 +784,8 @@ func collectedToActorEvents(collected []*filter.CollectedEvent) []*types.ActorEv return out } -func makeCollectedEvents(t *testing.T, rng *pseudo.Rand, eventStartHeight, eventsPerHeight, eventEndHeight int64) []*filter.CollectedEvent { - var out []*filter.CollectedEvent +func makeCollectedEvents(t *testing.T, rng *pseudo.Rand, eventStartHeight, eventsPerHeight, eventEndHeight int64) []*index.CollectedEvent { + var out []*index.CollectedEvent for h := eventStartHeight; h <= eventEndHeight; h++ { for i := int64(0); i < eventsPerHeight; i++ { out = append(out, makeCollectedEvent(t, rng, types.NewTipSetKey(mkCid(t, fmt.Sprintf("h=%d", h))), abi.ChainEpoch(h))) @@ -793,11 +794,11 @@ func makeCollectedEvents(t *testing.T, rng *pseudo.Rand, eventStartHeight, event return out } -func makeCollectedEvent(t *testing.T, rng *pseudo.Rand, tsKey types.TipSetKey, height abi.ChainEpoch) *filter.CollectedEvent { +func makeCollectedEvent(t *testing.T, rng *pseudo.Rand, tsKey types.TipSetKey, height abi.ChainEpoch) *index.CollectedEvent { addr, err := address.NewIDAddress(uint64(rng.Int63())) require.NoError(t, err) - return &filter.CollectedEvent{ + return &index.CollectedEvent{ Entries: []types.EventEntry{ {Flags: 0x01, Key: "k1", Codec: cid.Raw, Value: []byte("v1")}, {Flags: 0x01, Key: "k2", Codec: cid.Raw, Value: []byte("v2")}, diff --git a/node/impl/full/chain_index.go b/node/impl/full/chain_index.go new file mode 100644 index 00000000000..09c7a1ce3d3 --- /dev/null +++ b/node/impl/full/chain_index.go @@ -0,0 +1,46 @@ +package full + +import ( + "context" + "errors" + + "go.uber.org/fx" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/index" + "github.com/filecoin-project/lotus/chain/types" +) + +type ChainIndexerAPI interface { + ChainValidateIndex(ctx context.Context, epoch abi.ChainEpoch, backfill bool) (*types.IndexValidation, error) +} + +var ( + _ ChainIndexerAPI = *new(api.FullNode) +) + +type ChainIndexAPI struct { + fx.In + ChainIndexerAPI +} + +type ChainIndexHandler struct { + indexer index.Indexer +} + +func (ch *ChainIndexHandler) ChainValidateIndex(ctx context.Context, epoch abi.ChainEpoch, backfill bool) (*types.IndexValidation, error) { + if ch.indexer == nil { + return nil, errors.New("chain indexer is disabled") + } + return ch.indexer.ChainValidateIndex(ctx, epoch, backfill) +} + +var _ ChainIndexerAPI = (*ChainIndexHandler)(nil) + +func NewChainIndexHandler(indexer index.Indexer) *ChainIndexHandler { + return &ChainIndexHandler{ + indexer: indexer, + } +} diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go index 76da5d849eb..4e20e0ca534 100644 --- a/node/impl/full/eth.go +++ b/node/impl/full/eth.go @@ -32,8 +32,8 @@ import ( "github.com/filecoin-project/lotus/chain/actors" builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" builtinevm "github.com/filecoin-project/lotus/chain/actors/builtin/evm" - "github.com/filecoin-project/lotus/chain/ethhashlookup" "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -42,15 +42,13 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" ) -var ErrUnsupported = errors.New("unsupported method") - -const maxEthFeeHistoryRewardPercentiles = 100 - var ( - // wait for 3 epochs - eventReadTimeout = 90 * time.Second + ErrUnsupported = errors.New("unsupported method") + ErrChainIndexerDisabled = errors.New("chain indexer is disabled; please enable the ChainIndexer to use the ETH RPC API") ) +const maxEthFeeHistoryRewardPercentiles = 100 + type EthModuleAPI interface { EthBlockNumber(ctx context.Context) (ethtypes.EthUint64, error) EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error) @@ -103,6 +101,7 @@ type EthEventAPI interface { var ( _ EthModuleAPI = *new(api.FullNode) _ EthEventAPI = *new(api.FullNode) + _ EthModuleAPI = *new(api.Gateway) ) @@ -137,13 +136,14 @@ type EthModule struct { Chain *store.ChainStore Mpool *messagepool.MessagePool StateManager *stmgr.StateManager - EthTxHashManager EthTxHashManager EthTraceFilterMaxResults uint64 EthEventHandler *EthEventHandler EthBlkCache *arc.ARCCache[cid.Cid, *ethtypes.EthBlock] // caches blocks by their CID but blocks only have the transaction hashes EthBlkTxCache *arc.ARCCache[cid.Cid, *ethtypes.EthBlock] // caches blocks along with full transaction payload by their CID + ChainIndexer index.Indexer + ChainAPI MpoolAPI StateAPI @@ -168,10 +168,10 @@ var _ EthEventAPI = (*EthEventHandler)(nil) type EthAPI struct { fx.In - Chain *store.ChainStore - StateManager *stmgr.StateManager - EthTxHashManager EthTxHashManager - MpoolAPI MpoolAPI + Chain *store.ChainStore + StateManager *stmgr.StateManager + ChainIndexer index.Indexer + MpoolAPI MpoolAPI EthModuleAPI EthEventAPI @@ -355,10 +355,18 @@ func (a *EthModule) EthGetTransactionByHashLimited(ctx context.Context, txHash * if txHash == nil { return nil, nil } + if a.ChainIndexer == nil { + return nil, ErrChainIndexerDisabled + } - c, err := a.EthTxHashManager.GetCidFromHash(*txHash) - if err != nil { - log.Debug("could not find transaction hash %s in lookup table", txHash.String()) + var c cid.Cid + var err error + c, err = a.ChainIndexer.GetCidFromHash(ctx, *txHash) + if err != nil && errors.Is(err, index.ErrNotFound) { + log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) + } else if err != nil { + log.Errorf("failed to lookup transaction hash %s in chain indexer: %s", txHash.String(), err) + return nil, xerrors.Errorf("failed to lookup transaction hash %s in chain indexer: %w", txHash.String(), err) } // This isn't an eth transaction we have the mapping for, so let's look it up as a filecoin message @@ -414,14 +422,23 @@ func (a *EthModule) EthGetMessageCidByTransactionHash(ctx context.Context, txHas if txHash == nil { return nil, nil } + if a.ChainIndexer == nil { + return nil, ErrChainIndexerDisabled + } - c, err := a.EthTxHashManager.GetCidFromHash(*txHash) - // We fall out of the first condition and continue - if errors.Is(err, ethhashlookup.ErrNotFound) { - log.Debug("could not find transaction hash %s in lookup table", txHash.String()) + var c cid.Cid + var err error + c, err = a.ChainIndexer.GetCidFromHash(ctx, *txHash) + if err != nil && errors.Is(err, index.ErrNotFound) { + log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) } else if err != nil { - return nil, xerrors.Errorf("database error: %w", err) - } else { + log.Errorf("failed to lookup transaction hash %s in chain indexer: %s", txHash.String(), err) + return nil, xerrors.Errorf("failed to lookup transaction hash %s in chain indexer: %w", txHash.String(), err) + } + + if errors.Is(err, index.ErrNotFound) { + log.Debug("could not find transaction hash %s in lookup table", txHash.String()) + } else if a.ChainIndexer != nil { return &c, nil } @@ -510,9 +527,18 @@ func (a *EthModule) EthGetTransactionReceipt(ctx context.Context, txHash ethtype } func (a *EthModule) EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*api.EthTxReceipt, error) { - c, err := a.EthTxHashManager.GetCidFromHash(txHash) - if err != nil { - log.Debug("could not find transaction hash %s in lookup table", txHash.String()) + var c cid.Cid + var err error + if a.ChainIndexer == nil { + return nil, ErrChainIndexerDisabled + } + + c, err = a.ChainIndexer.GetCidFromHash(ctx, txHash) + if err != nil && errors.Is(err, index.ErrNotFound) { + log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) + } else if err != nil { + log.Errorf("failed to lookup transaction hash %s in chain indexer: %s", txHash.String(), err) + return nil, xerrors.Errorf("failed to lookup transaction hash %s in chain indexer: %w", txHash.String(), err) } // This isn't an eth transaction we have the mapping for, so let's look it up as a filecoin message @@ -1049,14 +1075,14 @@ func (a *EthModule) EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error) } func (a *EthModule) EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) { - return ethSendRawTransaction(ctx, a.MpoolAPI, a.EthTxHashManager, rawTx, false) + return ethSendRawTransaction(ctx, a.MpoolAPI, a.ChainIndexer, rawTx, false) } func (a *EthAPI) EthSendRawTransactionUntrusted(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) { - return ethSendRawTransaction(ctx, a.MpoolAPI, a.EthTxHashManager, rawTx, true) + return ethSendRawTransaction(ctx, a.MpoolAPI, a.ChainIndexer, rawTx, true) } -func ethSendRawTransaction(ctx context.Context, mpool MpoolAPI, ethTxHashManager EthTxHashManager, rawTx ethtypes.EthBytes, untrusted bool) (ethtypes.EthHash, error) { +func ethSendRawTransaction(ctx context.Context, mpool MpoolAPI, indexer index.Indexer, rawTx ethtypes.EthBytes, untrusted bool) (ethtypes.EthHash, error) { txArgs, err := ethtypes.ParseEthTransaction(rawTx) if err != nil { return ethtypes.EmptyEthHash, err @@ -1084,8 +1110,10 @@ func ethSendRawTransaction(ctx context.Context, mpool MpoolAPI, ethTxHashManager // make it immediately available in the transaction hash lookup db, even though it will also // eventually get there via the mpool - if err := ethTxHashManager.UpsertHash(txHash, smsg.Cid()); err != nil { - log.Errorf("error inserting tx mapping to db: %s", err) + if indexer != nil { + if err := indexer.IndexEthTxHash(ctx, txHash, smsg.Cid()); err != nil { + log.Errorf("error indexing tx: %s", err) + } } return ethtypes.EthHashFromTxBytes(rawTx), nil @@ -1705,18 +1733,18 @@ func (e *EthEventHandler) getEthLogsForBlockAndTransaction(ctx context.Context, func (e *EthEventHandler) EthGetLogs(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) { ces, err := e.ethGetEventsForFilter(ctx, filterSpec) if err != nil { - return nil, err + return nil, xerrors.Errorf("failed to get events for filter: %w", err) } return ethFilterResultFromEvents(ctx, ces, e.SubManager.StateAPI) } -func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) ([]*filter.CollectedEvent, error) { +func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) ([]*index.CollectedEvent, error) { if e.EventFilterManager == nil { return nil, api.ErrNotSupported } - if e.EventFilterManager.EventIndex == nil { - return nil, xerrors.Errorf("cannot use eth_get_logs if historical event index is disabled") + if e.EventFilterManager.ChainIndexer == nil { + return nil, ErrChainIndexerDisabled } pf, err := e.parseEthFilterSpec(filterSpec) @@ -1724,104 +1752,37 @@ func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, filterSpec return nil, xerrors.Errorf("failed to parse eth filter spec: %w", err) } - if pf.tipsetCid == cid.Undef { - maxHeight := pf.maxHeight - if maxHeight == -1 { - // heaviest tipset doesn't have events because its messages haven't been executed yet - maxHeight = e.Chain.GetHeaviestTipSet().Height() - 1 - } - - if maxHeight < 0 { - return nil, xerrors.Errorf("maxHeight requested is less than 0") - } - - // we can't return events for the heaviest tipset as the transactions in that tipset will be executed - // in the next non null tipset (because of Filecoin's "deferred execution" model) - if maxHeight > e.Chain.GetHeaviestTipSet().Height()-1 { - return nil, xerrors.Errorf("maxHeight requested is greater than the heaviest tipset") - } - - err := e.waitForHeightProcessed(ctx, maxHeight) - if err != nil { - return nil, err - } - // TODO: Ideally we should also check that events for the epoch at `pf.minheight` have been indexed - // However, it is currently tricky to check/guarantee this for two reasons: - // a) Event Index is not aware of null-blocks. This means that the Event Index wont be able to say whether the block at - // `pf.minheight` is a null block or whether it has no events - // b) There can be holes in the index where events at certain epoch simply haven't been indexed because of edge cases around - // node restarts while indexing. This needs a long term "auto-repair"/"automated-backfilling" implementation in the index - // So, for now, the best we can do is ensure that the event index has evenets for events at height >= `pf.maxHeight` - } else { + head := e.Chain.GetHeaviestTipSet() + // should not ask for events for a tipset >= head because of deferred execution + if pf.tipsetCid != cid.Undef { ts, err := e.Chain.GetTipSetByCid(ctx, pf.tipsetCid) if err != nil { return nil, xerrors.Errorf("failed to get tipset by cid: %w", err) } - err = e.waitForHeightProcessed(ctx, ts.Height()) - if err != nil { - return nil, err - } - - b, err := e.EventFilterManager.EventIndex.IsTipsetProcessed(ctx, pf.tipsetCid.Bytes()) - if err != nil { - return nil, xerrors.Errorf("failed to check if tipset events have been indexed: %w", err) + if ts.Height() >= head.Height() { + return nil, xerrors.New("cannot ask for events for a tipset at or greater than head") } - if !b { - return nil, xerrors.Errorf("event index failed to index tipset %s", pf.tipsetCid.String()) - } - } - - // Fill a filter and collect events - f, err := e.EventFilterManager.Fill(ctx, pf.minHeight, pf.maxHeight, pf.tipsetCid, pf.addresses, pf.keys) - if err != nil { - return nil, xerrors.Errorf("failed to install event filter: %w", err) } - ces := f.TakeCollectedEvents(ctx) - - return ces, nil -} -// note that we can have null blocks at the given height and the event Index is not null block aware -// so, what we do here is wait till we see the event index contain a block at a height greater than the given height -func (e *EthEventHandler) waitForHeightProcessed(ctx context.Context, height abi.ChainEpoch) error { - ei := e.EventFilterManager.EventIndex - if height > e.Chain.GetHeaviestTipSet().Height() { - return xerrors.New("height is in the future") + if pf.minHeight >= head.Height() || pf.maxHeight >= head.Height() { + return nil, xerrors.New("cannot ask for events for a tipset at or greater than head") } - ctx, cancel := context.WithTimeout(ctx, eventReadTimeout) - defer cancel() - - // if the height we're interested in has already been indexed -> there's nothing to do here - if b, err := ei.IsHeightPast(ctx, uint64(height)); err != nil { - return xerrors.Errorf("failed to check if event index has events for given height: %w", err) - } else if b { - return nil + ef := &index.EventFilter{ + MinHeight: pf.minHeight, + MaxHeight: pf.maxHeight, + TipsetCid: pf.tipsetCid, + Addresses: pf.addresses, + KeysWithCodec: pf.keys, + MaxResults: e.EventFilterManager.MaxFilterResults, } - // subscribe for updates to the event index - subCh, unSubscribeF := ei.SubscribeUpdates() - defer unSubscribeF() - - // it could be that the event index was update while the subscription was being processed -> check if index has what we need now - if b, err := ei.IsHeightPast(ctx, uint64(height)); err != nil { - return xerrors.Errorf("failed to check if event index has events for given height: %w", err) - } else if b { - return nil + ces, err := e.EventFilterManager.ChainIndexer.GetEventsForFilter(ctx, ef) + if err != nil { + return nil, xerrors.Errorf("failed to get events for filter from chain indexer: %w", err) } - for { - select { - case <-subCh: - if b, err := ei.IsHeightPast(ctx, uint64(height)); err != nil { - return xerrors.Errorf("failed to check if event index has events for given height: %w", err) - } else if b { - return nil - } - case <-ctx.Done(): - return ctx.Err() - } - } + return ces, nil } func (e *EthEventHandler) EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) { @@ -1947,7 +1908,8 @@ func (e *EthEventHandler) parseEthFilterSpec(filterSpec *ethtypes.EthFilterSpec) tipsetCid = filterSpec.BlockHash.ToCid() } else { var err error - minHeight, maxHeight, err = parseBlockRange(e.Chain.GetHeaviestTipSet().Height(), filterSpec.FromBlock, filterSpec.ToBlock, e.MaxFilterHeightRange) + // Because of deferred execution, we need to subtract 1 from the heaviest tipset height for the "heaviest" parameter + minHeight, maxHeight, err = parseBlockRange(e.Chain.GetHeaviestTipSet().Height()-1, filterSpec.FromBlock, filterSpec.ToBlock, e.MaxFilterHeightRange) if err != nil { return nil, err } diff --git a/node/impl/full/eth_events.go b/node/impl/full/eth_events.go index 0c474b92fe2..850826ecf9c 100644 --- a/node/impl/full/eth_events.go +++ b/node/impl/full/eth_events.go @@ -13,13 +13,14 @@ import ( "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" ) type filterEventCollector interface { - TakeCollectedEvents(context.Context) []*filter.CollectedEvent + TakeCollectedEvents(context.Context) []*index.CollectedEvent } type filterMessageCollector interface { @@ -93,7 +94,7 @@ func ethLogFromEvent(entries []types.EventEntry) (data []byte, topics []ethtypes return data, topics, true } -func ethFilterLogsFromEvents(ctx context.Context, evs []*filter.CollectedEvent, sa StateAPI) ([]ethtypes.EthLog, error) { +func ethFilterLogsFromEvents(ctx context.Context, evs []*index.CollectedEvent, sa StateAPI) ([]ethtypes.EthLog, error) { var logs []ethtypes.EthLog for _, ev := range evs { log := ethtypes.EthLog{ @@ -140,7 +141,7 @@ func ethFilterLogsFromEvents(ctx context.Context, evs []*filter.CollectedEvent, return logs, nil } -func ethFilterResultFromEvents(ctx context.Context, evs []*filter.CollectedEvent, sa StateAPI) (*ethtypes.EthFilterResult, error) { +func ethFilterResultFromEvents(ctx context.Context, evs []*index.CollectedEvent, sa StateAPI) (*ethtypes.EthFilterResult, error) { logs, err := ethFilterLogsFromEvents(ctx, evs, sa) if err != nil { return nil, err @@ -347,8 +348,8 @@ func (e *ethSubscription) start(ctx context.Context) { return case v := <-e.in: switch vt := v.(type) { - case *filter.CollectedEvent: - evs, err := ethFilterResultFromEvents(ctx, []*filter.CollectedEvent{vt}, e.StateAPI) + case *index.CollectedEvent: + evs, err := ethFilterResultFromEvents(ctx, []*index.CollectedEvent{vt}, e.StateAPI) if err != nil { continue } diff --git a/node/impl/full/txhashmanager.go b/node/impl/full/txhashmanager.go deleted file mode 100644 index 00a5980a3fe..00000000000 --- a/node/impl/full/txhashmanager.go +++ /dev/null @@ -1,201 +0,0 @@ -package full - -import ( - "context" - "time" - - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build/buildconstants" - "github.com/filecoin-project/lotus/chain/ethhashlookup" - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/types/ethtypes" -) - -type EthTxHashManager interface { - events.TipSetObserver - - PopulateExistingMappings(ctx context.Context, minHeight abi.ChainEpoch) error - ProcessSignedMessage(ctx context.Context, msg *types.SignedMessage) - UpsertHash(txHash ethtypes.EthHash, c cid.Cid) error - GetCidFromHash(txHash ethtypes.EthHash) (cid.Cid, error) - DeleteEntriesOlderThan(days int) (int64, error) -} - -var ( - _ EthTxHashManager = (*ethTxHashManager)(nil) - _ EthTxHashManager = (*EthTxHashManagerDummy)(nil) -) - -type ethTxHashManager struct { - stateAPI StateAPI - transactionHashLookup *ethhashlookup.EthTxHashLookup -} - -func NewEthTxHashManager(stateAPI StateAPI, transactionHashLookup *ethhashlookup.EthTxHashLookup) EthTxHashManager { - return ðTxHashManager{ - stateAPI: stateAPI, - transactionHashLookup: transactionHashLookup, - } -} - -func (m *ethTxHashManager) Revert(ctx context.Context, from, to *types.TipSet) error { - return nil -} - -func (m *ethTxHashManager) PopulateExistingMappings(ctx context.Context, minHeight abi.ChainEpoch) error { - if minHeight < buildconstants.UpgradeHyggeHeight { - minHeight = buildconstants.UpgradeHyggeHeight - } - - ts := m.stateAPI.Chain.GetHeaviestTipSet() - for ts.Height() > minHeight { - for _, block := range ts.Blocks() { - msgs, err := m.stateAPI.Chain.SecpkMessagesForBlock(ctx, block) - if err != nil { - // If we can't find the messages, we've either imported from snapshot or pruned the store - log.Debug("exiting message mapping population at epoch ", ts.Height()) - return nil - } - - for _, msg := range msgs { - m.ProcessSignedMessage(ctx, msg) - } - } - - var err error - ts, err = m.stateAPI.Chain.GetTipSetFromKey(ctx, ts.Parents()) - if err != nil { - return err - } - } - - return nil -} - -func (m *ethTxHashManager) Apply(ctx context.Context, from, to *types.TipSet) error { - for _, blk := range to.Blocks() { - _, smsgs, err := m.stateAPI.Chain.MessagesForBlock(ctx, blk) - if err != nil { - return err - } - - for _, smsg := range smsgs { - if smsg.Signature.Type != crypto.SigTypeDelegated { - continue - } - - hash, err := ethTxHashFromSignedMessage(smsg) - if err != nil { - return err - } - - err = m.transactionHashLookup.UpsertHash(hash, smsg.Cid()) - if err != nil { - return err - } - } - } - - return nil -} - -func (m *ethTxHashManager) UpsertHash(txHash ethtypes.EthHash, c cid.Cid) error { - return m.transactionHashLookup.UpsertHash(txHash, c) -} - -func (m *ethTxHashManager) GetCidFromHash(txHash ethtypes.EthHash) (cid.Cid, error) { - return m.transactionHashLookup.GetCidFromHash(txHash) -} - -func (m *ethTxHashManager) DeleteEntriesOlderThan(days int) (int64, error) { - return m.transactionHashLookup.DeleteEntriesOlderThan(days) -} - -func (m *ethTxHashManager) ProcessSignedMessage(ctx context.Context, msg *types.SignedMessage) { - if msg.Signature.Type != crypto.SigTypeDelegated { - return - } - - ethTx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(msg) - if err != nil { - log.Errorf("error converting filecoin message to eth tx: %s", err) - return - } - - txHash, err := ethTx.TxHash() - if err != nil { - log.Errorf("error hashing transaction: %s", err) - return - } - - err = m.UpsertHash(txHash, msg.Cid()) - if err != nil { - log.Errorf("error inserting tx mapping to db: %s", err) - return - } -} - -func WaitForMpoolUpdates(ctx context.Context, ch <-chan api.MpoolUpdate, manager EthTxHashManager) { - for { - select { - case <-ctx.Done(): - return - case u := <-ch: - if u.Type != api.MpoolAdd { - continue - } - - manager.ProcessSignedMessage(ctx, u.Message) - } - } -} - -func EthTxHashGC(ctx context.Context, retentionDays int, manager EthTxHashManager) { - if retentionDays == 0 { - return - } - - gcPeriod := 1 * time.Hour - for { - entriesDeleted, err := manager.DeleteEntriesOlderThan(retentionDays) - if err != nil { - log.Errorf("error garbage collecting eth transaction hash database: %s", err) - } - log.Info("garbage collection run on eth transaction hash lookup database. %d entries deleted", entriesDeleted) - time.Sleep(gcPeriod) - } -} - -type EthTxHashManagerDummy struct{} - -func (d *EthTxHashManagerDummy) PopulateExistingMappings(ctx context.Context, minHeight abi.ChainEpoch) error { - return nil -} - -func (d *EthTxHashManagerDummy) Revert(ctx context.Context, from, to *types.TipSet) error { - return nil -} - -func (d *EthTxHashManagerDummy) Apply(ctx context.Context, from, to *types.TipSet) error { - return nil -} - -func (d *EthTxHashManagerDummy) ProcessSignedMessage(ctx context.Context, msg *types.SignedMessage) {} - -func (d *EthTxHashManagerDummy) UpsertHash(txHash ethtypes.EthHash, c cid.Cid) error { - return nil -} - -func (d *EthTxHashManagerDummy) GetCidFromHash(txHash ethtypes.EthHash) (cid.Cid, error) { - return cid.Undef, nil -} - -func (d *EthTxHashManagerDummy) DeleteEntriesOlderThan(days int) (int64, error) { - return 0, nil -} diff --git a/node/modules/actorevent.go b/node/modules/actorevent.go index 3b02be1c4e0..a77ae271a63 100644 --- a/node/modules/actorevent.go +++ b/node/modules/actorevent.go @@ -2,11 +2,9 @@ package modules import ( "context" - "path/filepath" "time" "go.uber.org/fx" - "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" @@ -14,6 +12,7 @@ import ( "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -43,7 +42,7 @@ func EthEventHandler(cfg config.EventsConfig, enableEthRPC bool) func(helpers.Me SubscribtionCtx: ctx, } - if !enableEthRPC || cfg.DisableRealTimeFilterAPI { + if !enableEthRPC { // all event functionality is disabled // the historic filter API relies on the real time one return ee, nil @@ -95,40 +94,17 @@ func EthEventHandler(cfg config.EventsConfig, enableEthRPC bool) func(helpers.Me } } -func EventFilterManager(cfg config.EventsConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, full.ChainAPI) (*filter.EventFilterManager, error) { - return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, chainapi full.ChainAPI) (*filter.EventFilterManager, error) { +func EventFilterManager(cfg config.EventsConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, + *stmgr.StateManager, EventHelperAPI, full.ChainAPI, index.Indexer) (*filter.EventFilterManager, error) { + return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, + evapi EventHelperAPI, chainapi full.ChainAPI, ci index.Indexer) (*filter.EventFilterManager, error) { ctx := helpers.LifecycleCtx(mctx, lc) // Enable indexing of actor events - var eventIndex *filter.EventIndex - if !cfg.DisableHistoricFilterAPI { - var dbPath string - if cfg.DatabasePath == "" { - sqlitePath, err := r.SqlitePath() - if err != nil { - return nil, xerrors.Errorf("failed to resolve event index database path: %w", err) - } - dbPath = filepath.Join(sqlitePath, filter.DefaultDbFilename) - } else { - dbPath = cfg.DatabasePath - } - - var err error - eventIndex, err = filter.NewEventIndex(ctx, dbPath, chainapi.Chain) - if err != nil { - return nil, xerrors.Errorf("failed to initialize event index database: %w", err) - } - - lc.Append(fx.Hook{ - OnStop: func(context.Context) error { - return eventIndex.Close() - }, - }) - } fm := &filter.EventFilterManager{ - ChainStore: cs, - EventIndex: eventIndex, // will be nil unless EnableHistoricFilterAPI is true + ChainStore: cs, + ChainIndexer: ci, // TODO: // We don't need this address resolution anymore once https://github.com/filecoin-project/lotus/issues/11594 lands AddressResolver: func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { @@ -165,7 +141,7 @@ func EventFilterManager(cfg config.EventsConfig) func(helpers.MetricsCtx, repo.L func ActorEventHandler(cfg config.EventsConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *filter.EventFilterManager, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI) (*full.ActorEventHandler, error) { return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, fm *filter.EventFilterManager, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI) (*full.ActorEventHandler, error) { - if !cfg.EnableActorEventsAPI || cfg.DisableRealTimeFilterAPI { + if !cfg.EnableActorEventsAPI { return full.NewActorEventHandler( cs, nil, // no EventFilterManager disables API calls diff --git a/node/modules/chain.go b/node/modules/chain.go index d6779a6305a..cf088283ea5 100644 --- a/node/modules/chain.go +++ b/node/modules/chain.go @@ -21,7 +21,6 @@ import ( "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/exchange" "github.com/filecoin-project/lotus/chain/gen/slashfilter" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -124,7 +123,7 @@ func NetworkName(mctx helpers.MetricsCtx, ctx := helpers.LifecycleCtx(mctx, lc) - sm, err := stmgr.NewStateManager(cs, tsexec, syscalls, us, nil, nil, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, tsexec, syscalls, us, nil, nil, nil) if err != nil { return "", err } diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go new file mode 100644 index 00000000000..d2307a77600 --- /dev/null +++ b/node/modules/chainindex.go @@ -0,0 +1,134 @@ +package modules + +import ( + "context" + "path/filepath" + + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/index" + "github.com/filecoin-project/lotus/chain/messagepool" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/impl/full" + "github.com/filecoin-project/lotus/node/modules/helpers" + "github.com/filecoin-project/lotus/node/repo" +) + +func ChainIndexer(cfg config.ChainIndexerConfig) func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (index.Indexer, error) { + return func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (index.Indexer, error) { + if !cfg.EnableIndexer { + log.Infof("ChainIndexer is disabled") + return nil, nil + } + + chainIndexPath, err := r.ChainIndexPath() + if err != nil { + return nil, err + } + + dbPath := filepath.Join(chainIndexPath, index.DefaultDbFilename) + chainIndexer, err := index.NewSqliteIndexer(dbPath, cs, cfg.GCRetentionEpochs, cfg.ReconcileEmptyIndex, cfg.MaxReconcileTipsets) + if err != nil { + return nil, err + } + + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + return chainIndexer.Close() + }, + }) + + return chainIndexer, nil + } +} + +func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer index.Indexer, + evapi EventHelperAPI, mp *messagepool.MessagePool, sm *stmgr.StateManager) { + ctx := helpers.LifecycleCtx(mctx, lc) + + lc.Append(fx.Hook{ + OnStart: func(_ context.Context) error { + indexer.SetActorToDelegatedAddresFunc(func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { + idAddr, err := address.NewIDAddress(uint64(emitter)) + if err != nil { + return address.Undef, false + } + + actor, err := sm.LoadActor(ctx, idAddr, ts) + if err != nil || actor.DelegatedAddress == nil { + return idAddr, true + } + + return *actor.DelegatedAddress, true + }) + + indexer.SetRecomputeTipSetStateFunc(func(ctx context.Context, ts *types.TipSet) error { + _, _, err := sm.RecomputeTipSetState(ctx, ts) + return err + }) + + ch, err := mp.Updates(ctx) + if err != nil { + return err + } + go WaitForMpoolUpdates(ctx, ch, indexer) + + ev, err := events.NewEvents(ctx, &evapi) + if err != nil { + return err + } + + // Tipset listener + + // `ObserveAndBlock` returns the current head and guarantees that it will call the observer with all future tipsets + head, unlockObserver, err := ev.ObserveAndBlock(indexer) + if err != nil { + return xerrors.Errorf("error while observing tipsets: %w", err) + } + if err := indexer.ReconcileWithChain(ctx, head); err != nil { + unlockObserver() + return xerrors.Errorf("error while reconciling chain index with chain state: %w", err) + } + unlockObserver() + + indexer.Start() + + return nil + }, + }) +} + +func ChainIndexHandler(cfg config.ChainIndexerConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, index.Indexer) (*full.ChainIndexHandler, error) { + return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, indexer index.Indexer) (*full.ChainIndexHandler, error) { + return full.NewChainIndexHandler(indexer), nil + } +} + +func WaitForMpoolUpdates(ctx context.Context, ch <-chan api.MpoolUpdate, indexer index.Indexer) { + for ctx.Err() == nil { + select { + case <-ctx.Done(): + return + case u := <-ch: + if u.Type != api.MpoolAdd { + continue + } + if u.Message == nil { + continue + } + err := indexer.IndexSignedMessage(ctx, u.Message) + if err != nil { + log.Errorw("failed to index signed Mpool message", "error", err) + } + } + } +} diff --git a/node/modules/ethmodule.go b/node/modules/ethmodule.go index d701cfb0c14..61d957b7fad 100644 --- a/node/modules/ethmodule.go +++ b/node/modules/ethmodule.go @@ -1,9 +1,6 @@ package modules import ( - "context" - "os" - "path/filepath" "time" "github.com/hashicorp/golang-lru/arc/v2" @@ -13,8 +10,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain/ethhashlookup" - "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -25,40 +21,11 @@ import ( "github.com/filecoin-project/lotus/node/repo" ) -func EthTxHashManager(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.SyncAPI) (full.EthTxHashManager, error) { - return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, evapi EventHelperAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, syncapi full.SyncAPI) (full.EthTxHashManager, error) { - ctx := helpers.LifecycleCtx(mctx, lc) - - sqlitePath, err := r.SqlitePath() - if err != nil { - return nil, err - } - - dbPath := filepath.Join(sqlitePath, ethhashlookup.DefaultDbFilename) - - // Check if the db exists, if not, we'll back-fill some entries - _, err = os.Stat(dbPath) - dbAlreadyExists := err == nil - - transactionHashLookup, err := ethhashlookup.NewTransactionHashLookup(ctx, dbPath) - if err != nil { - return nil, err - } - - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - return transactionHashLookup.Close() - }, - }) - - ethTxHashManager := full.NewEthTxHashManager(stateapi, transactionHashLookup) - - if !dbAlreadyExists { - err = ethTxHashManager.PopulateExistingMappings(mctx, 0) - if err != nil { - return nil, err - } - } +func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, + EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI, full.MpoolAPI, full.SyncAPI, *full.EthEventHandler, index.Indexer) (*full.EthModule, error) { + return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, + mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI, mpoolapi full.MpoolAPI, syncapi full.SyncAPI, + ethEventHandler *full.EthEventHandler, chainIndexer index.Indexer) (*full.EthModule, error) { // prefill the whole skiplist cache maintained internally by the GetTipsetByHeight go func() { @@ -71,37 +38,9 @@ func EthTxHashManager(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.Locke log.Infof("Prefilling GetTipsetByHeight done in %s", time.Since(start)) }() - lc.Append(fx.Hook{ - OnStart: func(context.Context) error { - ev, err := events.NewEvents(ctx, &evapi) - if err != nil { - return err - } - - // Tipset listener - _ = ev.Observe(ethTxHashManager) - - ch, err := mp.Updates(ctx) - if err != nil { - return err - } - go full.WaitForMpoolUpdates(ctx, ch, ethTxHashManager) - go full.EthTxHashGC(ctx, cfg.EthTxHashMappingLifetimeDays, ethTxHashManager) - - return nil - }, - }) - - return ethTxHashManager, nil - } -} - -func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, *messagepool.MessagePool, full.StateAPI, full.ChainAPI, full.MpoolAPI, full.SyncAPI, *full.EthEventHandler, full.EthTxHashManager) (*full.EthModule, error) { - return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI, mpoolapi full.MpoolAPI, syncapi full.SyncAPI, ethEventHandler *full.EthEventHandler, ethTxHashManager full.EthTxHashManager) (*full.EthModule, error) { - + var err error var blkCache *arc.ARCCache[cid.Cid, *ethtypes.EthBlock] var blkTxCache *arc.ARCCache[cid.Cid, *ethtypes.EthBlock] - var err error if cfg.EthBlkCacheSize > 0 { blkCache, err = arc.NewARC[cid.Cid, *ethtypes.EthBlock](cfg.EthBlkCacheSize) if err != nil { @@ -125,11 +64,12 @@ func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRep SyncAPI: syncapi, EthEventHandler: ethEventHandler, - EthTxHashManager: ethTxHashManager, EthTraceFilterMaxResults: cfg.EthTraceFilterMaxResults, EthBlkCache: blkCache, EthBlkTxCache: blkTxCache, + + ChainIndexer: chainIndexer, }, nil } } diff --git a/node/modules/msgindex.go b/node/modules/msgindex.go deleted file mode 100644 index 423be65d1b7..00000000000 --- a/node/modules/msgindex.go +++ /dev/null @@ -1,37 +0,0 @@ -package modules - -import ( - "context" - "path/filepath" - - "go.uber.org/fx" - - "github.com/filecoin-project/lotus/chain/index" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/node/modules/helpers" - "github.com/filecoin-project/lotus/node/repo" -) - -func MsgIndex(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (index.MsgIndex, error) { - basePath, err := r.SqlitePath() - if err != nil { - return nil, err - } - - msgIndex, err := index.NewMsgIndex(helpers.LifecycleCtx(mctx, lc), filepath.Join(basePath, index.DefaultDbFilename), cs) - if err != nil { - return nil, err - } - - lc.Append(fx.Hook{ - OnStop: func(_ context.Context) error { - return msgIndex.Close() - }, - }) - - return msgIndex, nil -} - -func DummyMsgIndex() index.MsgIndex { - return index.DummyMsgIndex -} diff --git a/node/modules/stmgr.go b/node/modules/stmgr.go index f3eaee219c5..d07edba1a2b 100644 --- a/node/modules/stmgr.go +++ b/node/modules/stmgr.go @@ -11,8 +11,8 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" ) -func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule, b beacon.Schedule, metadataDs dtypes.MetadataDS, msgIndex index.MsgIndex) (*stmgr.StateManager, error) { - sm, err := stmgr.NewStateManager(cs, exec, sys, us, b, metadataDs, msgIndex) +func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule, b beacon.Schedule, metadataDs dtypes.MetadataDS, chainIndexer index.Indexer) (*stmgr.StateManager, error) { + sm, err := stmgr.NewStateManager(cs, exec, sys, us, b, metadataDs, chainIndexer) if err != nil { return nil, err } diff --git a/node/repo/fsrepo.go b/node/repo/fsrepo.go index 26cbbd6b135..1c2e9f738d4 100644 --- a/node/repo/fsrepo.go +++ b/node/repo/fsrepo.go @@ -37,7 +37,7 @@ const ( fsDatastore = "datastore" fsLock = "repo.lock" fsKeystore = "keystore" - fsSqlite = "sqlite" + fsChainIndex = "chainindex" ) func NewRepoTypeFromString(t string) RepoType { @@ -376,9 +376,9 @@ type fsLockedRepo struct { ssErr error ssOnce sync.Once - sqlPath string - sqlErr error - sqlOnce sync.Once + chainIndexPath string + chainIndexErr error + chainIndexOnce sync.Once storageLk sync.Mutex configLk sync.Mutex @@ -473,19 +473,19 @@ func (fsr *fsLockedRepo) SplitstorePath() (string, error) { return fsr.ssPath, fsr.ssErr } -func (fsr *fsLockedRepo) SqlitePath() (string, error) { - fsr.sqlOnce.Do(func() { - path := fsr.join(fsSqlite) +func (fsr *fsLockedRepo) ChainIndexPath() (string, error) { + fsr.chainIndexOnce.Do(func() { + path := fsr.join(fsChainIndex) if err := os.MkdirAll(path, 0755); err != nil { - fsr.sqlErr = err + fsr.chainIndexErr = err return } - fsr.sqlPath = path + fsr.chainIndexPath = path }) - return fsr.sqlPath, fsr.sqlErr + return fsr.chainIndexPath, fsr.chainIndexErr } // join joins path elements with fsr.path diff --git a/node/repo/interface.go b/node/repo/interface.go index 11c965bf55c..100d0dc58d5 100644 --- a/node/repo/interface.go +++ b/node/repo/interface.go @@ -69,8 +69,8 @@ type LockedRepo interface { // SplitstorePath returns the path for the SplitStore SplitstorePath() (string, error) - // SqlitePath returns the path for the Sqlite database - SqlitePath() (string, error) + // ChainIndexPath returns the path for the chain index database + ChainIndexPath() (string, error) // Returns config in this repo Config() (interface{}, error) diff --git a/node/repo/memrepo.go b/node/repo/memrepo.go index d1e9b214b4a..cda00f985f2 100644 --- a/node/repo/memrepo.go +++ b/node/repo/memrepo.go @@ -268,12 +268,12 @@ func (lmem *lockedMemRepo) SplitstorePath() (string, error) { return splitstorePath, nil } -func (lmem *lockedMemRepo) SqlitePath() (string, error) { - sqlitePath := filepath.Join(lmem.Path(), "sqlite") - if err := os.MkdirAll(sqlitePath, 0755); err != nil { +func (lmem *lockedMemRepo) ChainIndexPath() (string, error) { + chainIndexPath := filepath.Join(lmem.Path(), "chainindex") + if err := os.MkdirAll(chainIndexPath, 0755); err != nil { return "", err } - return sqlitePath, nil + return chainIndexPath, nil } func (lmem *lockedMemRepo) ListDatastores(ns string) ([]int64, error) {