Skip to content

Commit

Permalink
Squashed commit of the following:
Browse files Browse the repository at this point in the history
commit 89c766a
Author: Willian Mitsuda <wmitsuda@gmail.com>
Date:   Fri Sep 8 03:03:47 2023 -0300

    First ots2 prototype

commit 674b77f
Author: ledgerwatch <akhounov@gmail.com>
Date:   Fri Jul 14 09:15:22 2023 +0100

    Bump patch version in stable to v2.48.1 (#7895)

    Co-authored-by: Alex Sharp <alexsharp@Alexs-MacBook-Pro-2.local>

commit 4c461ac
Author: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com>
Date:   Fri Jul 14 09:15:15 2023 +0200

    Fix Gnosis sync from scratch (#7890)

    Fix an issue with historical block execution introduced by PR #7727

commit 084acc1
Author: Alex Sharp <akhounov@gmail.com>
Date:   Fri Jun 30 16:19:29 2023 +0100

    Stable modifications
  • Loading branch information
wmitsuda committed Sep 15, 2023
1 parent 205eeda commit 390dacd
Show file tree
Hide file tree
Showing 58 changed files with 7,428 additions and 21 deletions.
40 changes: 40 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,43 @@
# Otterscan v2.0.0-alpha1 enabled Erigon

This branch contains the experimental version of Erigon with Otterscan API **v2.0.0-alpha1**.

Compatibility:

- Erigon v2.48.1
- Otterscan v2.0.0-alpha1

## Warnings/disclaimers (READ THIS FIRST!!!)

This is an experimental build. Make sure you are aware of the following first:

- Don't use this build on production servers.
- Make sure you backup your Erigon node before trying this build.
- This version is made available in source-code form only.
- DB model will change on next alphas. There will **NOT** be migration scripts. You'll need to restore your original Erigon node from a backup.
- For now it is compatible with Erigon 2 only, but the end game is to support Erigon 3 only. At some point in the future we may change it.
- Enabling Otterscan v2 support will produce extra data inside Erigon DB, so you'll need more disk space than a regular Erigon node.
- At the first run of this patched build, extra stages will produce new indexes from genesis, so there will be extra sync time. Once it reaches the tip, the extra stages will take neglegible time per block iteration.
- **Alphas were not optimized for space/time (yet). The goal here was to implement the spec for all token/contract indexing support and prove it was doable.**

## How to use it

### Erigon

Build this branch as usual with `make` command.

Add the `--experimental.ots2` CLI argument to the `erigon` command. That'll enable Otterscan V2 extra stages.

Also enable the `ots2` API namespace, i.e., `--http.api "eth,erigon,ots,ots2"`.

### Otterscan

See: https://github.com/otterscan/otterscan/blob/develop/docs/ots2.md

> The rest of this document contains the original Erigon README.
---

# Erigon

Erigon is an implementation of Ethereum (execution layer with embeddable consensus layer), on the efficiency
Expand Down
96 changes: 96 additions & 0 deletions cmd/erigon-el/stages/stages.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
package stagedsync

import (
"context"

proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/state"
"github.com/ledgerwatch/log/v3"

"github.com/ledgerwatch/erigon/cmd/sentry/sentry"
"github.com/ledgerwatch/erigon/core/rawdb/blockio"
"github.com/ledgerwatch/erigon/core/vm"
"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/erigon/eth/stagedsync"
"github.com/ledgerwatch/erigon/ethdb/prune"
"github.com/ledgerwatch/erigon/p2p"
"github.com/ledgerwatch/erigon/turbo/engineapi"
"github.com/ledgerwatch/erigon/turbo/services"
"github.com/ledgerwatch/erigon/turbo/shards"
)

func nullStage(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx, logger log.Logger) error {
return nil
}
func ExecutionStages(ctx context.Context, sm prune.Mode, snapshots stagedsync.SnapshotsCfg, headers stagedsync.HeadersCfg, cumulativeIndex stagedsync.CumulativeIndexCfg, blockHashCfg stagedsync.BlockHashesCfg, bodies stagedsync.BodiesCfg, senders stagedsync.SendersCfg, exec stagedsync.ExecuteBlockCfg, hashState stagedsync.HashStateCfg, trieCfg stagedsync.TrieCfg, history stagedsync.HistoryCfg, logIndex stagedsync.LogIndexCfg, callTraces stagedsync.CallTracesCfg, txLookup stagedsync.TxLookupCfg, finish stagedsync.FinishCfg, caCfg stagedsync.ContractAnalyzerCfg, ots2Enabled bool, test bool) []*stagedsync.Stage {
defaultStages := stagedsync.DefaultStages(ctx, snapshots, headers, cumulativeIndex, blockHashCfg, bodies, senders, exec, hashState, trieCfg, history, logIndex, callTraces, txLookup, finish, caCfg, ots2Enabled, test)
// Remove body/headers stages
defaultStages[1].Forward = nullStage
defaultStages[4].Forward = nullStage
return defaultStages
}

func NewStagedSync(
ctx context.Context,
db kv.RwDB,
p2pCfg p2p.Config,
cfg *ethconfig.Config,
controlServer *sentry.MultiClient,
notifications *shards.Notifications,
snapDownloader proto_downloader.DownloaderClient,
agg *state.AggregatorV3,
forkValidator *engineapi.ForkValidator,
logger log.Logger,
blockReader services.FullBlockReader,
blockWriter *blockio.BlockWriter,
blockRetire services.BlockRetire,
) (*stagedsync.Sync, error) {
dirs := cfg.Dirs

// During Import we don't want other services like header requests, body requests etc. to be running.
// Hence we run it in the test mode.
runInTestMode := cfg.ImportMode

return stagedsync.New(
ExecutionStages(ctx, cfg.Prune,
stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, dirs, blockRetire, snapDownloader, blockReader, notifications.Events, cfg.HistoryV3, agg),
stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications, forkValidator),
stagedsync.StageCumulativeIndexCfg(db, blockReader),
stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter),
stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter),
stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd),
stagedsync.StageExecuteBlocksCfg(
db,
cfg.Prune,
cfg.BatchSize,
nil,
controlServer.ChainConfig,
controlServer.Engine,
&vm.Config{},
notifications.Accumulator,
cfg.StateStream,
/*stateStream=*/ false,
cfg.HistoryV3,
dirs,
blockReader,
controlServer.Hd,
cfg.Genesis,
cfg.Sync,
agg,
),
stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3),
stagedsync.StageTrieCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg),
stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp),
stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp),
stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp),
stagedsync.StageTxLookupCfg(db, cfg.Prune, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader),
stagedsync.StageFinishCfg(db, dirs.Tmp, forkValidator),
stagedsync.StageDbAwareCfg(db, dirs.Tmp, controlServer.ChainConfig, blockReader, controlServer.Engine),
cfg.Ots2,
runInTestMode),
stagedsync.DefaultUnwindOrder,
stagedsync.DefaultPruneOrder,
logger,
), nil
}
Loading

0 comments on commit 390dacd

Please sign in to comment.