From c98a57201d30b6ab8c690f06b813334f8511b8a3 Mon Sep 17 00:00:00 2001 From: Aarsh Shah Date: Thu, 20 Jun 2024 12:00:49 +0400 Subject: [PATCH 1/8] fix: events index: record processed epochs and tipsets for events and eth_get_log blocks till requested tipset has been indexed (#12080) * record seen event epochs * create correct index * migrate to version 6 * fix typo * test both conditions * changes as per review * record reverted tipsets * see if tipsets has events and has not been reverted * sub/unsub tipset updates from the index * eth_get_logs should wait for events * fix naming * changes as per review * solve issue with missing events * use correct var * changes as per review * add unique constraint * fix test wait * check for events at min_height as well * Apply suggestions from code review Co-authored-by: Rod Vagg * reduce duplication --------- Co-authored-by: Rod Vagg --- chain/events/filter/index.go | 253 +++++++++++++++++++++++++++++- chain/events/filter/index_test.go | 60 +++++++ node/impl/full/eth.go | 123 ++++++++++++++- 3 files changed, 424 insertions(+), 12 deletions(-) diff --git a/chain/events/filter/index.go b/chain/events/filter/index.go index 5ebb8fb580c..fed6c42eb31 100644 --- a/chain/events/filter/index.go +++ b/chain/events/filter/index.go @@ -7,6 +7,7 @@ import ( "fmt" "sort" "strings" + "sync" "time" "github.com/ipfs/go-cid" @@ -62,9 +63,13 @@ var ddls = []string{ value BLOB NOT NULL )`, + createTableEventsSeen, + createIndexEventEntryIndexedKey, createIndexEventEntryCodecValue, createIndexEventEntryEventId, + createIndexEventsSeenHeight, + createIndexEventsSeenTipsetKeyCid, // metadata containing version of schema `CREATE TABLE IF NOT EXISTS _meta ( @@ -76,6 +81,7 @@ var ddls = []string{ `INSERT OR IGNORE INTO _meta (version) VALUES (3)`, `INSERT OR IGNORE INTO _meta (version) VALUES (4)`, `INSERT OR IGNORE INTO _meta (version) VALUES (5)`, + `INSERT OR IGNORE INTO _meta (version) VALUES (6)`, } var ( @@ -83,13 +89,19 @@ var ( ) const ( - schemaVersion = 5 + schemaVersion = 6 eventExists = `SELECT MAX(id) FROM event WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?` insertEvent = `INSERT OR IGNORE INTO event(height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)` insertEntry = `INSERT OR IGNORE INTO event_entry(event_id, indexed, flags, key, codec, value) VALUES(?, ?, ?, ?, ?, ?)` revertEventsInTipset = `UPDATE event SET reverted=true WHERE height=? AND tipset_key=?` restoreEvent = `UPDATE event SET reverted=false WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?` + revertEventSeen = `UPDATE events_seen SET reverted=true WHERE height=? AND tipset_key_cid=?` + restoreEventSeen = `UPDATE events_seen SET reverted=false WHERE height=? AND tipset_key_cid=?` + upsertEventsSeen = `INSERT INTO events_seen(height, tipset_key_cid, reverted) VALUES(?, ?, false) ON CONFLICT(height, tipset_key_cid) DO UPDATE SET reverted=false` + isTipsetProcessed = `SELECT COUNT(*) > 0 FROM events_seen WHERE tipset_key_cid=?` + getMaxHeightInIndex = `SELECT MAX(height) FROM events_seen` + isHeightProcessed = `SELECT COUNT(*) > 0 FROM events_seen WHERE height=?` createIndexEventEmitterAddr = `CREATE INDEX IF NOT EXISTS event_emitter_addr ON event (emitter_addr)` createIndexEventTipsetKeyCid = `CREATE INDEX IF NOT EXISTS event_tipset_key_cid ON event (tipset_key_cid);` @@ -99,6 +111,17 @@ const ( createIndexEventEntryIndexedKey = `CREATE INDEX IF NOT EXISTS event_entry_indexed_key ON event_entry (indexed, key);` createIndexEventEntryCodecValue = `CREATE INDEX IF NOT EXISTS event_entry_codec_value ON event_entry (codec, value);` createIndexEventEntryEventId = `CREATE INDEX IF NOT EXISTS event_entry_event_id ON event_entry(event_id);` + + createTableEventsSeen = `CREATE TABLE IF NOT EXISTS events_seen ( + id INTEGER PRIMARY KEY, + height INTEGER NOT NULL, + tipset_key_cid BLOB NOT NULL, + reverted INTEGER NOT NULL, + UNIQUE(height, tipset_key_cid) + )` + + createIndexEventsSeenHeight = `CREATE INDEX IF NOT EXISTS events_seen_height ON events_seen (height);` + createIndexEventsSeenTipsetKeyCid = `CREATE INDEX IF NOT EXISTS events_seen_tipset_key_cid ON events_seen (tipset_key_cid);` ) type EventIndex struct { @@ -109,8 +132,27 @@ type EventIndex struct { stmtInsertEntry *sql.Stmt stmtRevertEventsInTipset *sql.Stmt stmtRestoreEvent *sql.Stmt + stmtUpsertEventsSeen *sql.Stmt + stmtRevertEventSeen *sql.Stmt + stmtRestoreEventSeen *sql.Stmt + + stmtIsTipsetProcessed *sql.Stmt + stmtGetMaxHeightInIndex *sql.Stmt + stmtIsHeightProcessed *sql.Stmt + + mu sync.Mutex + subIdCounter uint64 + updateSubs map[uint64]*updateSub +} + +type updateSub struct { + ctx context.Context + ch chan EventIndexUpdated + cancel context.CancelFunc } +type EventIndexUpdated struct{} + func (ei *EventIndex) initStatements() (err error) { ei.stmtEventExists, err = ei.db.Prepare(eventExists) if err != nil { @@ -137,6 +179,36 @@ func (ei *EventIndex) initStatements() (err error) { return xerrors.Errorf("prepare stmtRestoreEvent: %w", err) } + ei.stmtUpsertEventsSeen, err = ei.db.Prepare(upsertEventsSeen) + if err != nil { + return xerrors.Errorf("prepare stmtUpsertEventsSeen: %w", err) + } + + ei.stmtRevertEventSeen, err = ei.db.Prepare(revertEventSeen) + if err != nil { + return xerrors.Errorf("prepare stmtRevertEventSeen: %w", err) + } + + ei.stmtRestoreEventSeen, err = ei.db.Prepare(restoreEventSeen) + if err != nil { + return xerrors.Errorf("prepare stmtRestoreEventSeen: %w", err) + } + + ei.stmtIsTipsetProcessed, err = ei.db.Prepare(isTipsetProcessed) + if err != nil { + return xerrors.Errorf("prepare isTipsetProcessed: %w", err) + } + + ei.stmtGetMaxHeightInIndex, err = ei.db.Prepare(getMaxHeightInIndex) + if err != nil { + return xerrors.Errorf("prepare getMaxHeightInIndex: %w", err) + } + + ei.stmtIsHeightProcessed, err = ei.db.Prepare(isHeightProcessed) + if err != nil { + return xerrors.Errorf("prepare isHeightProcessed: %w", err) + } + return nil } @@ -402,9 +474,59 @@ func (ei *EventIndex) migrateToVersion5(ctx context.Context) error { return xerrors.Errorf("commit transaction: %w", err) } + log.Infof("Successfully migrated event index from version 4 to version 5 in %s", time.Since(now)) + return nil +} + +func (ei *EventIndex) migrateToVersion6(ctx context.Context) error { + now := time.Now() + + tx, err := ei.db.BeginTx(ctx, nil) + if err != nil { + return xerrors.Errorf("begin transaction: %w", err) + } + defer func() { _ = tx.Rollback() }() + + stmtCreateTableEventsSeen, err := tx.PrepareContext(ctx, createTableEventsSeen) + if err != nil { + return xerrors.Errorf("prepare stmtCreateTableEventsSeen: %w", err) + } + _, err = stmtCreateTableEventsSeen.ExecContext(ctx) + if err != nil { + return xerrors.Errorf("create table events_seen: %w", err) + } + + _, err = tx.ExecContext(ctx, createIndexEventsSeenHeight) + if err != nil { + return xerrors.Errorf("create index events_seen_height: %w", err) + } + _, err = tx.ExecContext(ctx, createIndexEventsSeenTipsetKeyCid) + if err != nil { + return xerrors.Errorf("create index events_seen_tipset_key_cid: %w", err) + } + + // INSERT an entry in the events_seen table for all epochs we do have events for in our DB + _, err = tx.ExecContext(ctx, ` + INSERT OR IGNORE INTO events_seen (height, tipset_key_cid, reverted) + SELECT DISTINCT height, tipset_key_cid, reverted FROM event +`) + if err != nil { + return xerrors.Errorf("insert events into events_seen: %w", err) + } + + _, err = tx.ExecContext(ctx, "INSERT OR IGNORE INTO _meta (version) VALUES (6)") + if err != nil { + return xerrors.Errorf("increment _meta version: %w", err) + } + + err = tx.Commit() + if err != nil { + return xerrors.Errorf("commit transaction: %w", err) + } + ei.vacuumDBAndCheckpointWAL(ctx) - log.Infof("Successfully migrated event index from version 4 to version 5 in %s", time.Since(now)) + log.Infof("Successfully migrated event index from version 5 to version 6 in %s", time.Since(now)) return nil } @@ -502,6 +624,16 @@ func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStor version = 5 } + if version == 5 { + log.Infof("Upgrading event index from version 5 to version 6") + err = eventIndex.migrateToVersion6(ctx) + if err != nil { + _ = db.Close() + return nil, xerrors.Errorf("could not migrate event index schema from version 5 to version 6: %w", err) + } + version = 6 + } + if version != schemaVersion { _ = db.Close() return nil, xerrors.Errorf("invalid database version: got %d, expected %d", version, schemaVersion) @@ -514,6 +646,8 @@ func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStor return nil, xerrors.Errorf("error preparing eventIndex database statements: %w", err) } + eventIndex.updateSubs = make(map[uint64]*updateSub) + return &eventIndex, nil } @@ -524,6 +658,60 @@ func (ei *EventIndex) Close() error { return ei.db.Close() } +func (ei *EventIndex) SubscribeUpdates() (chan EventIndexUpdated, func()) { + subCtx, subCancel := context.WithCancel(context.Background()) + ch := make(chan EventIndexUpdated) + + tSub := &updateSub{ + ctx: subCtx, + cancel: subCancel, + ch: ch, + } + + ei.mu.Lock() + subId := ei.subIdCounter + ei.subIdCounter++ + ei.updateSubs[subId] = tSub + ei.mu.Unlock() + + unSubscribeF := func() { + ei.mu.Lock() + tSub, ok := ei.updateSubs[subId] + if !ok { + ei.mu.Unlock() + return + } + delete(ei.updateSubs, subId) + ei.mu.Unlock() + + // cancel the subscription + tSub.cancel() + } + + return tSub.ch, unSubscribeF +} + +func (ei *EventIndex) GetMaxHeightInIndex(ctx context.Context) (uint64, error) { + row := ei.stmtGetMaxHeightInIndex.QueryRowContext(ctx) + var maxHeight uint64 + err := row.Scan(&maxHeight) + return maxHeight, err +} + +func (ei *EventIndex) IsHeightProcessed(ctx context.Context, height uint64) (bool, error) { + row := ei.stmtIsHeightProcessed.QueryRowContext(ctx, height) + var exists bool + err := row.Scan(&exists) + return exists, err +} + +func (ei *EventIndex) IsTipsetProcessed(ctx context.Context, tipsetKeyCid []byte) (bool, error) { + row := ei.stmtIsTipsetProcessed.QueryRowContext(ctx, tipsetKeyCid) + var exists bool + err := row.Scan(&exists) + return exists, err +} + func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, revert bool, resolver func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool)) error { tx, err := ei.db.BeginTx(ctx, nil) if err != nil { @@ -532,6 +720,11 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever // rollback the transaction (a no-op if the transaction was already committed) defer func() { _ = tx.Rollback() }() + tsKeyCid, err := te.msgTs.Key().Cid() + if err != nil { + return xerrors.Errorf("tipset key cid: %w", err) + } + // lets handle the revert case first, since its simpler and we can simply mark all events in this tipset as reverted and return if revert { _, err = tx.Stmt(ei.stmtRevertEventsInTipset).Exec(te.msgTs.Height(), te.msgTs.Key().Bytes()) @@ -539,11 +732,34 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever return xerrors.Errorf("revert event: %w", err) } + _, err = tx.Stmt(ei.stmtRevertEventSeen).Exec(te.msgTs.Height(), tsKeyCid.Bytes()) + if err != nil { + return xerrors.Errorf("revert event seen: %w", err) + } + err = tx.Commit() if err != nil { return xerrors.Errorf("commit transaction: %w", err) } + ei.mu.Lock() + tSubs := make([]*updateSub, 0, len(ei.updateSubs)) + for _, tSub := range ei.updateSubs { + tSubs = append(tSubs, tSub) + } + ei.mu.Unlock() + + for _, tSub := range tSubs { + tSub := tSub + select { + case tSub.ch <- EventIndexUpdated{}: + case <-tSub.ctx.Done(): + // subscription was cancelled, ignore + case <-ctx.Done(): + return ctx.Err() + } + } + return nil } @@ -571,11 +787,6 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever addressLookups[ev.Emitter] = addr } - tsKeyCid, err := te.msgTs.Key().Cid() - if err != nil { - return xerrors.Errorf("tipset key cid: %w", err) - } - // check if this event already exists in the database var entryID sql.NullInt64 err = tx.Stmt(ei.stmtEventExists).QueryRow( @@ -655,11 +866,39 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever } } + // this statement will mark the tipset as processed and will insert a new row if it doesn't exist + // or update the reverted field to false if it does + _, err = tx.Stmt(ei.stmtUpsertEventsSeen).Exec( + te.msgTs.Height(), + tsKeyCid.Bytes(), + ) + if err != nil { + return xerrors.Errorf("exec upsert events seen: %w", err) + } + err = tx.Commit() if err != nil { return xerrors.Errorf("commit transaction: %w", err) } + ei.mu.Lock() + tSubs := make([]*updateSub, 0, len(ei.updateSubs)) + for _, tSub := range ei.updateSubs { + tSubs = append(tSubs, tSub) + } + ei.mu.Unlock() + + for _, tSub := range tSubs { + tSub := tSub + select { + case tSub.ch <- EventIndexUpdated{}: + case <-tSub.ctx.Done(): + // subscription was cancelled, ignore + case <-ctx.Done(): + return ctx.Err() + } + } + return nil } diff --git a/chain/events/filter/index_test.go b/chain/events/filter/index_test.go index ce3f7b78a03..10b3eb57779 100644 --- a/chain/events/filter/index_test.go +++ b/chain/events/filter/index_test.go @@ -76,10 +76,50 @@ func TestEventIndexPrefillFilter(t *testing.T) { ei, err := NewEventIndex(context.Background(), dbPath, nil) require.NoError(t, err, "create event index") + + subCh, unSubscribe := ei.SubscribeUpdates() + defer unSubscribe() + + out := make(chan EventIndexUpdated, 1) + go func() { + tu := <-subCh + out <- tu + }() + if err := ei.CollectEvents(context.Background(), events14000, false, addrMap.ResolveAddress); err != nil { require.NoError(t, err, "collect events") } + mh, err := ei.GetMaxHeightInIndex(context.Background()) + require.NoError(t, err) + require.Equal(t, uint64(14000), mh) + + b, err := ei.IsHeightProcessed(context.Background(), 14000) + require.NoError(t, err) + require.True(t, b) + + b, err = ei.IsHeightProcessed(context.Background(), 14001) + require.NoError(t, err) + require.False(t, b) + + b, err = ei.IsHeightProcessed(context.Background(), 13000) + require.NoError(t, err) + require.False(t, b) + + tsKey := events14000.msgTs.Key() + tsKeyCid, err := tsKey.Cid() + require.NoError(t, err, "tipset key cid") + + seen, err := ei.IsTipsetProcessed(context.Background(), tsKeyCid.Bytes()) + require.NoError(t, err) + require.True(t, seen, "tipset key should be seen") + + seen, err = ei.IsTipsetProcessed(context.Background(), []byte{1}) + require.NoError(t, err) + require.False(t, seen, "tipset key should not be seen") + + _ = <-out + testCases := []struct { name string filter *eventFilter @@ -397,6 +437,22 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { ei, err := NewEventIndex(context.Background(), dbPath, nil) require.NoError(t, err, "create event index") + + tCh := make(chan EventIndexUpdated, 3) + subCh, unSubscribe := ei.SubscribeUpdates() + defer unSubscribe() + go func() { + cnt := 0 + for tu := range subCh { + tCh <- tu + cnt++ + if cnt == 3 { + close(tCh) + return + } + } + }() + if err := ei.CollectEvents(context.Background(), revertedEvents14000, false, addrMap.ResolveAddress); err != nil { require.NoError(t, err, "collect reverted events") } @@ -407,6 +463,10 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { require.NoError(t, err, "collect events") } + _ = <-tCh + _ = <-tCh + _ = <-tCh + inclusiveTestCases := []struct { name string filter *eventFilter diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go index 82f272c6cff..27d7002e440 100644 --- a/node/impl/full/eth.go +++ b/node/impl/full/eth.go @@ -44,6 +44,11 @@ var ErrUnsupported = errors.New("unsupported method") const maxEthFeeHistoryRewardPercentiles = 100 +var ( + // wait for 3 epochs + eventReadTimeout = 90 * time.Second +) + type EthModuleAPI interface { EthBlockNumber(ctx context.Context) (ethtypes.EthUint64, error) EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error) @@ -1258,10 +1263,58 @@ func (e *EthEventHandler) EthGetLogs(ctx context.Context, filterSpec *ethtypes.E return nil, api.ErrNotSupported } + if e.EventFilterManager.EventIndex == nil { + return nil, xerrors.Errorf("cannot use eth_get_logs if historical event index is disabled") + } + + pf, err := e.parseEthFilterSpec(ctx, filterSpec) + if err != nil { + return nil, xerrors.Errorf("failed to parse eth filter spec: %w", err) + } + + if pf.tipsetCid == cid.Undef { + maxHeight := pf.maxHeight + if maxHeight == -1 { + maxHeight = e.Chain.GetHeaviestTipSet().Height() + } + if maxHeight > e.Chain.GetHeaviestTipSet().Height() { + return nil, xerrors.Errorf("maxHeight requested is greater than the heaviest tipset") + } + + err := e.waitForHeightProcessed(ctx, maxHeight) + if err != nil { + return nil, err + } + + // should also have the minHeight in the filter indexed + if b, err := e.EventFilterManager.EventIndex.IsHeightProcessed(ctx, uint64(pf.minHeight)); err != nil { + return nil, xerrors.Errorf("failed to check if event index has events for the minHeight: %w", err) + } else if !b { + return nil, xerrors.Errorf("event index does not have event for epoch %d", pf.minHeight) + } + } else { + ts, err := e.Chain.GetTipSetByCid(ctx, pf.tipsetCid) + if err != nil { + return nil, xerrors.Errorf("failed to get tipset by cid: %w", err) + } + err = e.waitForHeightProcessed(ctx, ts.Height()) + if err != nil { + return nil, err + } + + b, err := e.EventFilterManager.EventIndex.IsTipsetProcessed(ctx, pf.tipsetCid.Bytes()) + if err != nil { + return nil, xerrors.Errorf("failed to check if tipset events have been indexed: %w", err) + } + if !b { + return nil, xerrors.Errorf("event index failed to index tipset %s", pf.tipsetCid.String()) + } + } + // Create a temporary filter - f, err := e.installEthFilterSpec(ctx, filterSpec) + f, err := e.EventFilterManager.Install(ctx, pf.minHeight, pf.maxHeight, pf.tipsetCid, pf.addresses, pf.keys, true) if err != nil { - return nil, err + return nil, xerrors.Errorf("failed to install event filter: %w", err) } ces := f.TakeCollectedEvents(ctx) @@ -1270,6 +1323,47 @@ func (e *EthEventHandler) EthGetLogs(ctx context.Context, filterSpec *ethtypes.E return ethFilterResultFromEvents(ctx, ces, e.SubManager.StateAPI) } +func (e *EthEventHandler) waitForHeightProcessed(ctx context.Context, height abi.ChainEpoch) error { + ei := e.EventFilterManager.EventIndex + if height > e.Chain.GetHeaviestTipSet().Height() { + return xerrors.New("height is in the future") + } + + ctx, cancel := context.WithTimeout(ctx, eventReadTimeout) + defer cancel() + + // if the height we're interested in has already been indexed -> there's nothing to do here + if b, err := ei.IsHeightProcessed(ctx, uint64(height)); err != nil { + return xerrors.Errorf("failed to check if event index has events for given height: %w", err) + } else if b { + return nil + } + + // subscribe for updates to the event index + subCh, unSubscribeF := ei.SubscribeUpdates() + defer unSubscribeF() + + // it could be that the event index was update while the subscription was being processed -> check if index has what we need now + if b, err := ei.IsHeightProcessed(ctx, uint64(height)); err != nil { + return xerrors.Errorf("failed to check if event index has events for given height: %w", err) + } else if b { + return nil + } + + for { + select { + case <-subCh: + if b, err := ei.IsHeightProcessed(ctx, uint64(height)); err != nil { + return xerrors.Errorf("failed to check if event index has events for given height: %w", err) + } else if b { + return nil + } + case <-ctx.Done(): + return ctx.Err() + } + } +} + func (e *EthEventHandler) EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) { if e.FilterStore == nil { return nil, api.ErrNotSupported @@ -1368,7 +1462,15 @@ func parseBlockRange(heaviest abi.ChainEpoch, fromBlock, toBlock *string, maxRan return minHeight, maxHeight, nil } -func (e *EthEventHandler) installEthFilterSpec(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (filter.EventFilter, error) { +type parsedFilter struct { + minHeight abi.ChainEpoch + maxHeight abi.ChainEpoch + tipsetCid cid.Cid + addresses []address.Address + keys map[string][]types.ActorEventBlock +} + +func (e *EthEventHandler) parseEthFilterSpec(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (*parsedFilter, error) { var ( minHeight abi.ChainEpoch maxHeight abi.ChainEpoch @@ -1405,7 +1507,13 @@ func (e *EthEventHandler) installEthFilterSpec(ctx context.Context, filterSpec * return nil, err } - return e.EventFilterManager.Install(ctx, minHeight, maxHeight, tipsetCid, addresses, keysToKeysWithCodec(keys), true) + return &parsedFilter{ + minHeight: minHeight, + maxHeight: maxHeight, + tipsetCid: tipsetCid, + addresses: addresses, + keys: keysToKeysWithCodec(keys), + }, nil } func keysToKeysWithCodec(keys map[string][][]byte) map[string][]types.ActorEventBlock { @@ -1426,11 +1534,16 @@ func (e *EthEventHandler) EthNewFilter(ctx context.Context, filterSpec *ethtypes return ethtypes.EthFilterID{}, api.ErrNotSupported } - f, err := e.installEthFilterSpec(ctx, filterSpec) + pf, err := e.parseEthFilterSpec(ctx, filterSpec) if err != nil { return ethtypes.EthFilterID{}, err } + f, err := e.EventFilterManager.Install(ctx, pf.minHeight, pf.maxHeight, pf.tipsetCid, pf.addresses, pf.keys, true) + if err != nil { + return ethtypes.EthFilterID{}, xerrors.Errorf("failed to install event filter: %w", err) + } + if err := e.FilterStore.Add(ctx, f); err != nil { // Could not record in store, attempt to delete filter to clean up err2 := e.TipSetFilterManager.Remove(ctx, f.ID()) From eb1620266e9c443eb6ee0213826790c104643d4d Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Wed, 3 Jul 2024 01:58:34 +0000 Subject: [PATCH 2/8] feat: api: sanity check the "to" address of outgoing messages (#12135) * feat: api: sanity check the "to" address of outgoing messages If the "to" address of an outgoing message is a _delegated_ address, verify that it maps to a valid Ethereum address. This isn't a consensus critical change, but it'll help prevent client-side address conversion libraries from directing messages into oblivion (e.g., by mis-translating `0xff0000....` addresses into `f410f...` addresses instead of `f0...` addresses. * tests for invalid delegated addresses * fix lint --------- Co-authored-by: aarshkshah1992 --- itests/mempool_test.go | 28 +++++++++++++++ node/impl/full/mpool.go | 42 ++++++++++++++++++++++ node/impl/full/mpool_test.go | 67 ++++++++++++++++++++++++++++++++++++ 3 files changed, 137 insertions(+) create mode 100644 node/impl/full/mpool_test.go diff --git a/itests/mempool_test.go b/itests/mempool_test.go index f07b46a737c..e366fe49bb4 100644 --- a/itests/mempool_test.go +++ b/itests/mempool_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/api" @@ -18,6 +19,33 @@ import ( const mPoolThrottle = time.Millisecond * 100 const mPoolTimeout = time.Second * 10 +func TestMemPoolPushOutgoingInvalidDelegated(t *testing.T) { + //stm: @CHAIN_MEMPOOL_PENDING_001, @CHAIN_STATE_WAIT_MSG_001, @CHAIN_MEMPOOL_CAP_GAS_FEE_001 + //stm: @CHAIN_MEMPOOL_PUSH_002 + ctx := context.Background() + firstNode, _, _, ens := kit.EnsembleTwoOne(t, kit.MockProofs()) + ens.InterconnectAll() + kit.QuietMiningLogs() + + sender := firstNode.DefaultKey.Address + badTo, err := address.NewFromString("f410f74aaaaaaaaaaaaaaaaaaaaaaaaac5sh2bf3lgta") + require.NoError(t, err) + + bal, err := firstNode.WalletBalance(ctx, sender) + require.NoError(t, err) + toSend := big.Div(bal, big.NewInt(10)) + + msg := &types.Message{ + From: sender, + Value: toSend, + To: badTo, + } + + _, err = firstNode.MpoolPushMessage(ctx, msg, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "is a delegated address but not a valid Eth Address") +} + func TestMemPoolPushSingleNode(t *testing.T) { //stm: @CHAIN_MEMPOOL_CREATE_MSG_CHAINS_001, @CHAIN_MEMPOOL_SELECT_001 //stm: @CHAIN_MEMPOOL_PENDING_001, @CHAIN_STATE_WAIT_MSG_001, @CHAIN_MEMPOOL_CAP_GAS_FEE_001 diff --git a/node/impl/full/mpool.go b/node/impl/full/mpool.go index fac48a3508f..13b7665e059 100644 --- a/node/impl/full/mpool.go +++ b/node/impl/full/mpool.go @@ -16,6 +16,7 @@ import ( "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/messagesigner" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/node/modules/dtypes" ) @@ -131,14 +132,24 @@ func (a *MpoolAPI) MpoolClear(ctx context.Context, local bool) error { } func (m *MpoolModule) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) { + if err := sanityCheckOutgoingMessage(&smsg.Message); err != nil { + return cid.Undef, xerrors.Errorf("message %s from %s with nonce %d failed sanity check: %w", smsg.Cid(), smsg.Message.From, smsg.Message.Nonce, err) + } return m.Mpool.Push(ctx, smsg, true) } func (a *MpoolAPI) MpoolPushUntrusted(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) { + if err := sanityCheckOutgoingMessage(&smsg.Message); err != nil { + return cid.Undef, xerrors.Errorf("message %s from %s with nonce %d failed sanity check: %w", smsg.Cid(), smsg.Message.From, smsg.Message.Nonce, err) + } return a.Mpool.PushUntrusted(ctx, smsg) } func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) { + if err := sanityCheckOutgoingMessage(msg); err != nil { + return nil, xerrors.Errorf("message from %s failed sanity check: %w", msg.From, err) + } + cp := *msg msg = &cp inMsg := *msg @@ -223,6 +234,11 @@ func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spe } func (a *MpoolAPI) MpoolBatchPush(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) { + for _, msg := range smsgs { + if err := sanityCheckOutgoingMessage(&msg.Message); err != nil { + return nil, xerrors.Errorf("message %s from %s with nonce %d failed sanity check: %w", msg.Cid(), msg.Message.From, msg.Message.Nonce, err) + } + } var messageCids []cid.Cid for _, smsg := range smsgs { smsgCid, err := a.Mpool.Push(ctx, smsg, true) @@ -235,6 +251,11 @@ func (a *MpoolAPI) MpoolBatchPush(ctx context.Context, smsgs []*types.SignedMess } func (a *MpoolAPI) MpoolBatchPushUntrusted(ctx context.Context, smsgs []*types.SignedMessage) ([]cid.Cid, error) { + for _, msg := range smsgs { + if err := sanityCheckOutgoingMessage(&msg.Message); err != nil { + return nil, xerrors.Errorf("message %s from %s with nonce %d failed sanity check: %w", msg.Cid(), msg.Message.From, msg.Message.Nonce, err) + } + } var messageCids []cid.Cid for _, smsg := range smsgs { smsgCid, err := a.Mpool.PushUntrusted(ctx, smsg) @@ -247,6 +268,11 @@ func (a *MpoolAPI) MpoolBatchPushUntrusted(ctx context.Context, smsgs []*types.S } func (a *MpoolAPI) MpoolBatchPushMessage(ctx context.Context, msgs []*types.Message, spec *api.MessageSendSpec) ([]*types.SignedMessage, error) { + for i, msg := range msgs { + if err := sanityCheckOutgoingMessage(msg); err != nil { + return nil, xerrors.Errorf("message #%d from %s with failed sanity check: %w", i, msg.From, err) + } + } var smsgs []*types.SignedMessage for _, msg := range msgs { smsg, err := a.MpoolPushMessage(ctx, msg, spec) @@ -277,3 +303,19 @@ func (a *MpoolAPI) MpoolGetNonce(ctx context.Context, addr address.Address) (uin func (a *MpoolAPI) MpoolSub(ctx context.Context) (<-chan api.MpoolUpdate, error) { return a.Mpool.Updates(ctx) } + +func sanityCheckOutgoingMessage(msg *types.Message) error { + // Check that the message's TO address is a _valid_ Eth address if it's a delegated address. + // + // It's legal (from a consensus perspective) to send funds to any 0xf410f address as long as + // the payload is at most 54 bytes, but the vast majority of this address space is + // essentially a black-hole. Unfortunately, the conversion from 0x addresses to Filecoin + // native addresses has a few pitfalls (especially with respect to masked ID addresses), so + // we've added this check to the API to avoid accidentally (and avoidably) sending messages + // to these black-hole addresses. + if msg.To.Protocol() == address.Delegated && !ethtypes.IsEthAddress(msg.To) { + return xerrors.Errorf("message recipient %s is a delegated address but not a valid Eth Address", msg.To) + } + + return nil +} diff --git a/node/impl/full/mpool_test.go b/node/impl/full/mpool_test.go new file mode 100644 index 00000000000..c8e44edbfd8 --- /dev/null +++ b/node/impl/full/mpool_test.go @@ -0,0 +1,67 @@ +package full + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/chain/types" +) + +func TestSanityCheckOutgoingMessage(t *testing.T) { + // fails for invalid delegated address + badTo, err := address.NewFromString("f410f74aaaaaaaaaaaaaaaaaaaaaaaaac5sh2bf3lgta") + require.NoError(t, err) + msg := &types.Message{ + To: badTo, + } + + err = sanityCheckOutgoingMessage(msg) + require.Error(t, err) + require.Contains(t, err.Error(), "is a delegated address but not a valid Eth Address") + + // works for valid delegated address + goodTo, err := address.NewFromString("f410faxfebiima2gp4lduo2k3vt2iuqapuk3logeftky") + require.NoError(t, err) + msg = &types.Message{ + To: goodTo, + } + err = sanityCheckOutgoingMessage(msg) + require.NoError(t, err) + + // works for valid non-delegated address + goodTo, err = address.NewFromString("f1z762skeib2v6zlkvhywmjxbv3dxoiv4hmb6gs4y") + require.NoError(t, err) + msg = &types.Message{ + To: goodTo, + } + err = sanityCheckOutgoingMessage(msg) + require.NoError(t, err) +} + +func TestMpoolPushInvalidDelegatedAddressFails(t *testing.T) { + badTo, err := address.NewFromString("f410f74aaaaaaaaaaaaaaaaaaaaaaaaac5sh2bf3lgta") + require.NoError(t, err) + module := &MpoolModule{} + m := &MpoolAPI{ + MpoolModuleAPI: module, + } + smsg := &types.SignedMessage{ + Message: types.Message{ + From: badTo, + To: badTo, + }, + Signature: crypto.Signature{ + Type: crypto.SigTypeSecp256k1, + Data: []byte("signature"), + }, + } + _, err = m.MpoolPush(context.Background(), smsg) + require.Error(t, err) + + require.Contains(t, err.Error(), "is a delegated address but not a valid Eth Address") +} From c5504284a3a33becdb8649a7984ad3350f617dc7 Mon Sep 17 00:00:00 2001 From: Peter Rabbitson Date: Sat, 15 Jun 2024 03:35:50 +0200 Subject: [PATCH 3/8] chore: deps: remove leftover curio `replace` (#12094) Move the rest of the replace's to the front of go.mod for visibility --- go.mod | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index b4f7d335247..37f900c723e 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,10 @@ retract v1.14.0 // Accidentally force-pushed tag, use v1.14.1+ instead. retract v1.20.2 // Wrongfully cherry picked PR, use v1.20.2+ instead. +replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi // provided via a git submodule + +replace github.com/filecoin-project/test-vectors => ./extern/test-vectors // provided via a git submodule + require ( contrib.go.opencensus.io/exporter/prometheus v0.4.2 github.com/BurntSushi/toml v1.3.0 @@ -328,11 +332,3 @@ require ( howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect lukechampine.com/blake3 v1.3.0 // indirect ) - -// https://github.com/magik6k/reflink/commit/cff5a40f3eeca17f44fc95a57ff3878e5ac761dc -// https://github.com/KarpelesLab/reflink/pull/2 -replace github.com/KarpelesLab/reflink => github.com/magik6k/reflink v1.0.2-patch1 - -replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi - -replace github.com/filecoin-project/test-vectors => ./extern/test-vectors From fcec55853347c4cee7017c0f2bd23a20f94e4036 Mon Sep 17 00:00:00 2001 From: Phi-rjan Date: Tue, 18 Jun 2024 07:37:33 +0200 Subject: [PATCH 4/8] Update bootstrap list to support both IPv4 and IPv6 (#12103) Update bootstrap list to support both IPv4 and IPv6 --- build/bootstrap/calibnet.pi | 8 ++++---- build/bootstrap/mainnet.pi | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/build/bootstrap/calibnet.pi b/build/bootstrap/calibnet.pi index 0a14d24baeb..f48e698934b 100644 --- a/build/bootstrap/calibnet.pi +++ b/build/bootstrap/calibnet.pi @@ -1,4 +1,4 @@ -/dns4/calibration.node.glif.io/tcp/1237/p2p/12D3KooWQPYouEAsUQKzvFUA9sQ8tz4rfpqtTzh2eL6USd9bwg7x -/dns4/bootstrap-calibnet-0.chainsafe-fil.io/tcp/34000/p2p/12D3KooWABQ5gTDHPWyvhJM7jPhtNwNJruzTEo32Lo4gcS5ABAMm -/dns4/bootstrap-calibnet-1.chainsafe-fil.io/tcp/34000/p2p/12D3KooWS3ZRhMYL67b4bD5XQ6fcpTyVQXnDe8H89LvwrDqaSbiT -/dns4/bootstrap-calibnet-2.chainsafe-fil.io/tcp/34000/p2p/12D3KooWEiBN8jBX8EBoM3M47pVRLRWV812gDRUJhMxgyVkUoR48 +/dns/calibration.node.glif.io/tcp/1237/p2p/12D3KooWQPYouEAsUQKzvFUA9sQ8tz4rfpqtTzh2eL6USd9bwg7x +/dns/bootstrap-calibnet-0.chainsafe-fil.io/tcp/34000/p2p/12D3KooWABQ5gTDHPWyvhJM7jPhtNwNJruzTEo32Lo4gcS5ABAMm +/dns/bootstrap-calibnet-1.chainsafe-fil.io/tcp/34000/p2p/12D3KooWS3ZRhMYL67b4bD5XQ6fcpTyVQXnDe8H89LvwrDqaSbiT +/dns/bootstrap-calibnet-2.chainsafe-fil.io/tcp/34000/p2p/12D3KooWEiBN8jBX8EBoM3M47pVRLRWV812gDRUJhMxgyVkUoR48 diff --git a/build/bootstrap/mainnet.pi b/build/bootstrap/mainnet.pi index 2afe6f29750..96fd9e4b3ff 100644 --- a/build/bootstrap/mainnet.pi +++ b/build/bootstrap/mainnet.pi @@ -1,6 +1,6 @@ -/dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt -/dns4/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt -/dns4/bootstrap-venus.mainnet.filincubator.com/tcp/8888/p2p/QmQu8C6deXwKvJP2D8B6QGyhngc3ZiDnFzEHBDx8yeBXST -/dns4/bootstrap-mainnet-0.chainsafe-fil.io/tcp/34000/p2p/12D3KooWKKkCZbcigsWTEu1cgNetNbZJqeNtysRtFpq7DTqw3eqH -/dns4/bootstrap-mainnet-1.chainsafe-fil.io/tcp/34000/p2p/12D3KooWGnkd9GQKo3apkShQDaq1d6cKJJmsVe6KiQkacUk1T8oZ -/dns4/bootstrap-mainnet-2.chainsafe-fil.io/tcp/34000/p2p/12D3KooWHQRSDFv4FvAjtU32shQ7znz7oRbLBryXzZ9NMK2feyyH +/dns/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt +/dns/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt +/dns/bootstrap-venus.mainnet.filincubator.com/tcp/8888/p2p/QmQu8C6deXwKvJP2D8B6QGyhngc3ZiDnFzEHBDx8yeBXST +/dns/bootstrap-mainnet-0.chainsafe-fil.io/tcp/34000/p2p/12D3KooWKKkCZbcigsWTEu1cgNetNbZJqeNtysRtFpq7DTqw3eqH +/dns/bootstrap-mainnet-1.chainsafe-fil.io/tcp/34000/p2p/12D3KooWGnkd9GQKo3apkShQDaq1d6cKJJmsVe6KiQkacUk1T8oZ +/dns/bootstrap-mainnet-2.chainsafe-fil.io/tcp/34000/p2p/12D3KooWHQRSDFv4FvAjtU32shQ7znz7oRbLBryXzZ9NMK2feyyH From 6891f56d66787565bef7ab7f4ee0bbf91b363424 Mon Sep 17 00:00:00 2001 From: Mikers Date: Thu, 20 Jun 2024 15:38:21 -1000 Subject: [PATCH 5/8] feat: eth: support "safe" and "finalized" for eth_getBlockByNumber (#12110) * add support for eth_getBlockByNumber to accept the term safe which we are using as 30 blocks * fix lint catch of unnecessary cast * add finalized to get block by number * Update chain/types/ethtypes/eth_types.go Co-authored-by: Rod Vagg * add test for eth get block by number to accept latest and safe and finalized as arguments --------- Co-authored-by: Rod Vagg --- chain/types/ethtypes/eth_types.go | 6 ++++++ gateway/proxy_eth.go | 5 +++++ itests/eth_api_test.go | 36 +++++++++++++++++++++++++++++++ node/impl/full/eth_utils.go | 16 ++++++++++++++ 4 files changed, 63 insertions(+) diff --git a/chain/types/ethtypes/eth_types.go b/chain/types/ethtypes/eth_types.go index 3c2b9bec031..251d8d501e9 100644 --- a/chain/types/ethtypes/eth_types.go +++ b/chain/types/ethtypes/eth_types.go @@ -28,6 +28,12 @@ import ( var ErrInvalidAddress = errors.New("invalid Filecoin Eth address") +// Research into Filecoin chain behaviour suggests that probabilistic finality +// generally approaches the intended stability guarantee at, or near, 30 epochs. +// Although a strictly "finalized" safe recommendation remains 900 epochs. +// See https://github.com/filecoin-project/FIPs/blob/master/FRCs/frc-0089.md +const SafeEpochDelay = abi.ChainEpoch(30) + type EthUint64 uint64 func (e EthUint64) MarshalJSON() ([]byte, error) { diff --git a/gateway/proxy_eth.go b/gateway/proxy_eth.go index eca6ae2bf41..55780c53a20 100644 --- a/gateway/proxy_eth.go +++ b/gateway/proxy_eth.go @@ -17,6 +17,7 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/events/filter" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" @@ -142,6 +143,10 @@ func (gw *Node) checkBlkParam(ctx context.Context, blkParam string, lookback eth break } num = ethtypes.EthUint64(head.Height()) - lookback + case "safe": + num = ethtypes.EthUint64(head.Height()) - lookback - ethtypes.EthUint64(ethtypes.SafeEpochDelay) + case "finalized": + num = ethtypes.EthUint64(head.Height()) - lookback - ethtypes.EthUint64(build.Finality) default: if err := num.UnmarshalJSON([]byte(`"` + blkParam + `"`)); err != nil { return fmt.Errorf("cannot parse block number: %v", err) diff --git a/itests/eth_api_test.go b/itests/eth_api_test.go index 43b4b526674..7b9f61662a7 100644 --- a/itests/eth_api_test.go +++ b/itests/eth_api_test.go @@ -124,3 +124,39 @@ func TestNetVersion(t *testing.T) { require.NoError(t, err) require.Equal(t, strconv.Itoa(build.Eip155ChainId), version) } + +func TestEthBlockNumberAliases(t *testing.T) { + blockTime := 2 * time.Millisecond + kit.QuietMiningLogs() + client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC()) + ens.InterconnectAll().BeginMining(blockTime) + ens.Start() + + build.Clock.Sleep(time.Second) + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + head := client.WaitTillChain(ctx, kit.HeightAtLeast(build.Finality+100)) + + // latest should be head-1 (parents) + latestEthBlk, err := client.EVM().EthGetBlockByNumber(ctx, "latest", true) + require.NoError(t, err) + diff := int64(latestEthBlk.Number) - int64(head.Height()-1) + require.GreaterOrEqual(t, diff, int64(0)) + require.LessOrEqual(t, diff, int64(2)) + + // safe should be latest-30 + safeEthBlk, err := client.EVM().EthGetBlockByNumber(ctx, "safe", true) + require.NoError(t, err) + diff = int64(latestEthBlk.Number-30) - int64(safeEthBlk.Number) + require.GreaterOrEqual(t, diff, int64(0)) + require.LessOrEqual(t, diff, int64(2)) + + // finalized should be Finality blocks behind latest + finalityEthBlk, err := client.EVM().EthGetBlockByNumber(ctx, "finalized", true) + require.NoError(t, err) + diff = int64(latestEthBlk.Number) - int64(build.Finality) - int64(finalityEthBlk.Number) + require.GreaterOrEqual(t, diff, int64(0)) + require.LessOrEqual(t, diff, int64(2)) +} diff --git a/node/impl/full/eth_utils.go b/node/impl/full/eth_utils.go index 56cc1e094e2..a5799b0dda4 100644 --- a/node/impl/full/eth_utils.go +++ b/node/impl/full/eth_utils.go @@ -57,6 +57,22 @@ func getTipsetByBlockNumber(ctx context.Context, chain *store.ChainStore, blkPar return nil, fmt.Errorf("cannot get parent tipset") } return parent, nil + case "safe": + latestHeight := head.Height() - 1 + safeHeight := latestHeight - ethtypes.SafeEpochDelay + ts, err := chain.GetTipsetByHeight(ctx, safeHeight, head, true) + if err != nil { + return nil, fmt.Errorf("cannot get tipset at height: %v", safeHeight) + } + return ts, nil + case "finalized": + latestHeight := head.Height() - 1 + safeHeight := latestHeight - build.Finality + ts, err := chain.GetTipsetByHeight(ctx, safeHeight, head, true) + if err != nil { + return nil, fmt.Errorf("cannot get tipset at height: %v", safeHeight) + } + return ts, nil default: var num ethtypes.EthUint64 err := num.UnmarshalJSON([]byte(`"` + blkParam + `"`)) From d0802cc6989984c50094b32c2eb2fd56f08539d2 Mon Sep 17 00:00:00 2001 From: Hubert Date: Mon, 24 Jun 2024 18:22:39 +0200 Subject: [PATCH 6/8] fix: bootstrap: remove unmaintained bootstrap node (#12133) --- build/bootstrap/mainnet.pi | 1 - 1 file changed, 1 deletion(-) diff --git a/build/bootstrap/mainnet.pi b/build/bootstrap/mainnet.pi index 96fd9e4b3ff..a248288cc33 100644 --- a/build/bootstrap/mainnet.pi +++ b/build/bootstrap/mainnet.pi @@ -1,5 +1,4 @@ /dns/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt -/dns/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt /dns/bootstrap-venus.mainnet.filincubator.com/tcp/8888/p2p/QmQu8C6deXwKvJP2D8B6QGyhngc3ZiDnFzEHBDx8yeBXST /dns/bootstrap-mainnet-0.chainsafe-fil.io/tcp/34000/p2p/12D3KooWKKkCZbcigsWTEu1cgNetNbZJqeNtysRtFpq7DTqw3eqH /dns/bootstrap-mainnet-1.chainsafe-fil.io/tcp/34000/p2p/12D3KooWGnkd9GQKo3apkShQDaq1d6cKJJmsVe6KiQkacUk1T8oZ From f4120fcf0d42dacc26fa12b26b30cb40cb56b85b Mon Sep 17 00:00:00 2001 From: Aarsh Shah Date: Wed, 19 Jun 2024 10:00:20 +0400 Subject: [PATCH 7/8] chore: ci: remove non-existent market tests from CI workflow (#12099) --- .github/workflows/test.yml | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2a5648a54a5..54b65e11da8 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -61,7 +61,6 @@ jobs: # to support resource intensive jobs. runners: | { - "itest-deals_concurrent": ["self-hosted", "linux", "x64", "4xlarge"], "itest-sector_pledge": ["self-hosted", "linux", "x64", "4xlarge"], "itest-worker": ["self-hosted", "linux", "x64", "4xlarge"], @@ -71,15 +70,8 @@ jobs: "itest-wdpost": ["self-hosted", "linux", "x64", "2xlarge"], "unit-storage": ["self-hosted", "linux", "x64", "2xlarge"], - "itest-batch_deal": ["self-hosted", "linux", "x64", "xlarge"], "itest-cli": ["self-hosted", "linux", "x64", "xlarge"], - "itest-deals_512mb": ["self-hosted", "linux", "x64", "xlarge"], - "itest-deals_anycid": ["self-hosted", "linux", "x64", "xlarge"], "itest-deals_invalid_utf8_label": ["self-hosted", "linux", "x64", "xlarge"], - "itest-deals_max_staging_deals": ["self-hosted", "linux", "x64", "xlarge"], - "itest-deals_partial_retrieval": ["self-hosted", "linux", "x64", "xlarge"], - "itest-deals_publish": ["self-hosted", "linux", "x64", "xlarge"], - "itest-deals_remote_retrieval": ["self-hosted", "linux", "x64", "xlarge"], "itest-decode_params": ["self-hosted", "linux", "x64", "xlarge"], "itest-dup_mpool_messages": ["self-hosted", "linux", "x64", "xlarge"], "itest-eth_account_abstraction": ["self-hosted", "linux", "x64", "xlarge"], @@ -123,19 +115,12 @@ jobs: [ "conformance", "itest-api", - "itest-deals_offline", - "itest-deals_padding", - "itest-deals_partial_retrieval_dm-level", - "itest-deals_pricing", - "itest-deals", "itest-direct_data_onboard_verified", "itest-direct_data_onboard", "itest-manual_onboarding", "itest-net", "itest-path_detach_redeclare", - "itest-path_type_filters", "itest-sealing_resources", - "itest-sector_finalize_early", "itest-sector_import_full", "itest-sector_import_simple", "itest-sector_pledge", @@ -143,7 +128,6 @@ jobs: "itest-wdpost_no_miner_storage", "itest-wdpost_worker_config", "itest-wdpost", - "itest-worker_upgrade", "itest-worker", "multicore-sdr", "unit-cli", From 025e325dcbd9de346d914a4718a80e72b31a6822 Mon Sep 17 00:00:00 2001 From: Rod Vagg Date: Wed, 3 Jul 2024 12:50:13 +1000 Subject: [PATCH 8/8] docs: lotus v1.27.2-rc1 prep --- CHANGELOG.md | 28 ++++++++++++++++++++++++++++ build/openrpc/full.json | 2 +- build/openrpc/gateway.json | 2 +- build/openrpc/miner.json | 2 +- build/openrpc/worker.json | 2 +- build/version.go | 2 +- documentation/en/cli-lotus.md | 2 +- 7 files changed, 34 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b8e06a3e485..5d826390d4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,34 @@ ## Improvements +# v1.27.2-rc1 / 2024-07-10 + +This is a release candidate of Lotus v1.27.2. This will be an OPTIONAL Lotus release. It contains some improvements that are relevant for node operators that are using or serving `eth_*` RPC methods. + +## ☢️ Upgrade Warnings ☢️ + +- This upgrade includes an additional migration to the events database. Node operators running Lotus with events turned on (off by default) may experience some delay in initial start-up as a minor database migration takes place. See [filecoin-project/lotus#12080](https://github.com/filecoin-project/lotus/pull/12080) for full details. + +## Improvements + +- fix: events index: record processed epochs and tipsets for events and eth_get_log blocks till requested tipset has been indexed (#12080) ([filecoin-project/lotus#12080](https://github.com/filecoin-project/lotus/pull/12080)) +- feat: eth: support "safe" and "finalized" for eth_getBlockByNumber (#12110) ([filecoin-project/lotus#12110](https://github.com/filecoin-project/lotus/pull/12110)) +- feat: api: sanity check the "to" address of outgoing messages (#12135) ([filecoin-project/lotus#12135](https://github.com/filecoin-project/lotus/pull/12135)) +- chore: ci: remove non-existent market tests from CI workflow (#12099) ([filecoin-project/lotus#12099](https://github.com/filecoin-project/lotus/pull/12099)) +- fix: bootstrap: remove unmaintained bootstrap node (#12133) ([filecoin-project/lotus#12133](https://github.com/filecoin-project/lotus/pull/12133)) +- Update bootstrap list to support both IPv4 and IPv6 (#12103) ([filecoin-project/lotus#12103](https://github.com/filecoin-project/lotus/pull/12103)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Aarsh Shah | 2 | +424/-28 | 4 | +| Steven Allen | 1 | +137/-0 | 3 | +| Mikers | 1 | +63/-0 | 4 | +| Phi-rjan | 1 | +10/-10 | 2 | +| Peter Rabbitson | 1 | +4/-8 | 1 | +| Hubert | 1 | +0/-1 | 1 | + # v1.27.1 / 2024-06-24 This release, v1.27.1, is an OPTIONAL lotus release. It is HIGHLY RECOMMENDED for node operators that are building Filecoin index off lotus! diff --git a/build/openrpc/full.json b/build/openrpc/full.json index c56ded102fd..ef2be8103b8 100644 --- a/build/openrpc/full.json +++ b/build/openrpc/full.json @@ -2,7 +2,7 @@ "openrpc": "1.2.6", "info": { "title": "Lotus RPC API", - "version": "1.27.1" + "version": "1.27.2-rc1" }, "methods": [ { diff --git a/build/openrpc/gateway.json b/build/openrpc/gateway.json index cc702401b35..56563b8a566 100644 --- a/build/openrpc/gateway.json +++ b/build/openrpc/gateway.json @@ -2,7 +2,7 @@ "openrpc": "1.2.6", "info": { "title": "Lotus RPC API", - "version": "1.27.1" + "version": "1.27.2-rc1" }, "methods": [ { diff --git a/build/openrpc/miner.json b/build/openrpc/miner.json index c5390cbe035..473489ef726 100644 --- a/build/openrpc/miner.json +++ b/build/openrpc/miner.json @@ -2,7 +2,7 @@ "openrpc": "1.2.6", "info": { "title": "Lotus RPC API", - "version": "1.27.1" + "version": "1.27.2-rc1" }, "methods": [ { diff --git a/build/openrpc/worker.json b/build/openrpc/worker.json index d962d914484..36e45fbaed6 100644 --- a/build/openrpc/worker.json +++ b/build/openrpc/worker.json @@ -2,7 +2,7 @@ "openrpc": "1.2.6", "info": { "title": "Lotus RPC API", - "version": "1.27.1" + "version": "1.27.2-rc1" }, "methods": [ { diff --git a/build/version.go b/build/version.go index ed0e0c84051..7e108d3f043 100644 --- a/build/version.go +++ b/build/version.go @@ -39,7 +39,7 @@ func BuildTypeString() string { } // NodeBuildVersion is the local build version of the Lotus daemon -const NodeBuildVersion string = "1.27.1" +const NodeBuildVersion string = "1.27.2-rc1" func NodeUserVersion() BuildVersion { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index f7e504f00c0..ef77763fa66 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -7,7 +7,7 @@ USAGE: lotus [global options] command [command options] [arguments...] VERSION: - 1.27.1 + 1.27.2-rc1 COMMANDS: daemon Start a lotus daemon process