diff --git a/.gitmodules b/.gitmodules index 241c169c4772..9e4fa0d0624d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,8 +1,11 @@ -[submodule "tests"] +[submodule "tests/testdata"] path = tests/testdata url = https://github.com/ethereum/tests shallow = true -[submodule "evm-benchmarks"] +[submodule "tests/evm-benchmarks"] path = tests/evm-benchmarks url = https://github.com/ipsilon/evm-benchmarks shallow = true +[submodule "tests/testdata-etc"] + path = tests/testdata-etc + url = https://github.com/etclabscore/tests-etc.git diff --git a/cmd/geth/main.go b/cmd/geth/main.go index a94c0c17db3b..a306a669ba8a 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -275,6 +275,9 @@ func main() { func prepare(ctx *cli.Context) { // If we're running a known preset, log it for convenience. switch { + case ctx.IsSet(utils.ClassicFlag.Name): + log.Info("Starting Geth on Ethereum Classic (ETC) mainnet...") + case ctx.IsSet(utils.RopstenFlag.Name): log.Info("Starting Geth on Ropsten testnet...") diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 974c03579507..ae620f223765 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1015,7 +1015,7 @@ var ( KilnFlag, } // NetworkFlags is the flag group of all built-in supported networks. - NetworkFlags = append([]cli.Flag{MainnetFlag}, TestnetFlags...) + NetworkFlags = append([]cli.Flag{MainnetFlag, ClassicFlag}, TestnetFlags...) // DatabasePathFlags is the flag group of all database path flags. DatabasePathFlags = []cli.Flag{ @@ -1031,6 +1031,9 @@ var ( // then a subdirectory of the specified datadir will be used. func MakeDataDir(ctx *cli.Context) string { if path := ctx.String(DataDirFlag.Name); path != "" { + if ctx.Bool(ClassicFlag.Name) { + return filepath.Join(path, "classic") + } if ctx.Bool(RopstenFlag.Name) { // Maintain compatibility with older Geth configurations storing the // Ropsten database in `testnet` instead of `ropsten`. @@ -1094,6 +1097,8 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) { switch { case ctx.IsSet(BootnodesFlag.Name): urls = SplitAndTrim(ctx.String(BootnodesFlag.Name)) + case ctx.Bool(ClassicFlag.Name): + urls = params.ClassicBootnodes case ctx.Bool(RopstenFlag.Name): urls = params.RopstenBootnodes case ctx.Bool(SepoliaFlag.Name): @@ -1554,6 +1559,8 @@ func SetDataDir(ctx *cli.Context, cfg *node.Config) { } cfg.DataDir = filepath.Join(node.DefaultDataDir(), "ropsten") + case ctx.Bool(ClassicFlag.Name) && cfg.DataDir == node.DefaultDataDir(): + cfg.DataDir = filepath.Join(node.DefaultDataDir(), "classic") case ctx.Bool(RinkebyFlag.Name) && cfg.DataDir == node.DefaultDataDir(): cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby") case ctx.Bool(GoerliFlag.Name) && cfg.DataDir == node.DefaultDataDir(): @@ -1752,7 +1759,7 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) { // SetEthConfig applies eth-related command line flags to the config. func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { // Avoid conflicting network flags - CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, SepoliaFlag, KilnFlag) + CheckExclusive(ctx, MainnetFlag, ClassicFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, SepoliaFlag, KilnFlag) CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light") CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer if ctx.String(GCModeFlag.Name) == "archive" && ctx.Uint64(TxLookupLimitFlag.Name) != 0 { @@ -1912,6 +1919,25 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { } cfg.Genesis = core.DefaultGenesisBlock() SetDNSDiscoveryDefaults(cfg, params.MainnetGenesisHash) + case ctx.Bool(ClassicFlag.Name): + if !ctx.IsSet(NetworkIdFlag.Name) { + cfg.NetworkId = 1 + } + cfg.Genesis = core.DefaultClassicGenesisBlock() + + // Only configure if not already established through flags/config + if cfg.EthDiscoveryURLs == nil { + url := params.ClassicDNS + if cfg.SyncMode == downloader.LightSync { + url = strings.ReplaceAll(url, "all", "les") + } + cfg.EthDiscoveryURLs = []string{url} + cfg.SnapDiscoveryURLs = cfg.EthDiscoveryURLs + } + + // Set ECIP-1099 block number for Ethash config. + u := params.ECIP1099Block_Classic.Uint64() + cfg.Ethash.ECIP1099Block = &u case ctx.Bool(RopstenFlag.Name): if !ctx.IsSet(NetworkIdFlag.Name) { cfg.NetworkId = 3 @@ -2238,6 +2264,8 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis { switch { case ctx.Bool(MainnetFlag.Name): genesis = core.DefaultGenesisBlock() + case ctx.Bool(ClassicFlag.Name): + genesis = core.DefaultClassicGenesisBlock() case ctx.Bool(RopstenFlag.Name): genesis = core.DefaultRopstenGenesisBlock() case ctx.Bool(SepoliaFlag.Name): diff --git a/cmd/utils/flags_classic.go b/cmd/utils/flags_classic.go new file mode 100644 index 000000000000..f5cf36662400 --- /dev/null +++ b/cmd/utils/flags_classic.go @@ -0,0 +1,14 @@ +package utils + +import ( + "github.com/ethereum/go-ethereum/internal/flags" + "github.com/urfave/cli/v2" +) + +var ( + ClassicFlag = &cli.BoolFlag{ + Name: "classic", + Usage: "Ethereum Classic (ETC) mainnet", + Category: flags.EthCategory, + } +) diff --git a/common/hasher.go b/common/hasher.go new file mode 100644 index 000000000000..87cb09a309fa --- /dev/null +++ b/common/hasher.go @@ -0,0 +1,58 @@ +package common + +import ( + "fmt" + "hash" + + "golang.org/x/crypto/sha3" +) + +// keccakState wraps sha3.state. In addition to the usual hash methods, it also supports +// Read to get a variable amount of data from the hash state. Read is faster than Sum +// because it doesn't copy the internal state, but also modifies the internal state. +type keccakState interface { + hash.Hash + Read([]byte) (int, error) +} + +type Hasher struct { + Sha keccakState +} + +var hasherPool = make(chan *Hasher, 128) + +func NewHasher() *Hasher { + var h *Hasher + select { + case h = <-hasherPool: + default: + h = &Hasher{Sha: sha3.NewLegacyKeccak256().(keccakState)} + } + return h +} + +func ReturnHasherToPool(h *Hasher) { + select { + case hasherPool <- h: + default: + fmt.Printf("Allowing Hasher to be garbage collected, pool is full\n") + } +} + +func HashData(data []byte) (Hash, error) { + h := NewHasher() + defer ReturnHasherToPool(h) + h.Sha.Reset() + + _, err := h.Sha.Write(data) + if err != nil { + return Hash{}, err + } + + var buf Hash + _, err = h.Sha.Read(buf[:]) + if err != nil { + return Hash{}, err + } + return buf, nil +} diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go index d53918382283..1c0733901cb4 100644 --- a/consensus/ethash/algorithm.go +++ b/consensus/ethash/algorithm.go @@ -50,10 +50,9 @@ const ( // cacheSize returns the size of the ethash verification cache that belongs to a certain // block number. -func cacheSize(block uint64) uint64 { - epoch := int(block / epochLength) +func cacheSize(epoch uint64) uint64 { if epoch < maxEpoch { - return cacheSizes[epoch] + return cacheSizes[int(epoch)] } return calcCacheSize(epoch) } @@ -61,8 +60,8 @@ func cacheSize(block uint64) uint64 { // calcCacheSize calculates the cache size for epoch. The cache size grows linearly, // however, we always take the highest prime below the linearly growing threshold in order // to reduce the risk of accidental regularities leading to cyclic behavior. -func calcCacheSize(epoch int) uint64 { - size := cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes +func calcCacheSize(epoch uint64) uint64 { + size := cacheInitBytes + cacheGrowthBytes*epoch - hashBytes for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64 size -= 2 * hashBytes } @@ -71,10 +70,9 @@ func calcCacheSize(epoch int) uint64 { // datasetSize returns the size of the ethash mining dataset that belongs to a certain // block number. -func datasetSize(block uint64) uint64 { - epoch := int(block / epochLength) +func datasetSize(epoch uint64) uint64 { if epoch < maxEpoch { - return datasetSizes[epoch] + return datasetSizes[int(epoch)] } return calcDatasetSize(epoch) } @@ -82,8 +80,8 @@ func datasetSize(block uint64) uint64 { // calcDatasetSize calculates the dataset size for epoch. The dataset size grows linearly, // however, we always take the highest prime below the linearly growing threshold in order // to reduce the risk of accidental regularities leading to cyclic behavior. -func calcDatasetSize(epoch int) uint64 { - size := datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes +func calcDatasetSize(epoch uint64) uint64 { + size := datasetInitBytes + datasetGrowthBytes*epoch - mixBytes for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64 size -= 2 * mixBytes } @@ -118,13 +116,41 @@ func makeHasher(h hash.Hash) hasher { // seedHash is the seed to use for generating a verification cache and the mining // dataset. -func seedHash(block uint64) []byte { +func seedHash(epoch uint64, epochLength uint64) []byte { + block := calcEpochBlock(epoch, epochLength) seed := make([]byte, 32) - if block < epochLength { + if block < epochLengthDefault { + return seed + } + + h := common.NewHasher() + + for i := 0; i < int(block/epochLengthDefault); i++ { + h.Sha.Reset() + //nolint:errcheck + _, writeErr := h.Sha.Write(seed) + if writeErr != nil { + log.Warn("Failed to write data", "err", writeErr) + } + //nolint:errcheck + _, readErr := h.Sha.Read(seed) + if readErr != nil { + log.Warn("Failed to read data", "err", readErr) + } + } + + common.ReturnHasherToPool(h) + + return seed +} + +func seedHashOld(block uint64) []byte { + seed := make([]byte, 32) + if block < epochLengthDefault { return seed } keccak256 := makeHasher(sha3.NewLegacyKeccak256()) - for i := 0; i < int(block/epochLength); i++ { + for i := 0; i < int(block/epochLengthDefault); i++ { keccak256(seed, seed) } return seed @@ -136,7 +162,7 @@ func seedHash(block uint64) []byte { // algorithm from Strict Memory Hard Hashing Functions (2014). The output is a // set of 524288 64-byte values. // This method places the result into dest in machine byte order. -func generateCache(dest []uint32, epoch uint64, seed []byte) { +func generateCache(dest []uint32, epoch uint64, epochLength uint64, seed []byte) { // Print some debug logs to allow analysis on low end devices logger := log.New("epoch", epoch) @@ -148,7 +174,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { if elapsed > 3*time.Second { logFn = logger.Info } - logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed)) + logFn("Generated ethash verification cache", "epochLength", epochLength, "elapsed", common.PrettyDuration(elapsed)) }() // Convert our destination slice to a byte buffer var cache []byte @@ -174,7 +200,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { case <-done: return case <-time.After(3 * time.Second): - logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/(cacheRounds+1), "elapsed", common.PrettyDuration(time.Since(start))) + logger.Info("Generating ethash verification cache", "epochLength", epochLength, "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/(cacheRounds+1), "elapsed", common.PrettyDuration(time.Since(start))) } } }() @@ -266,7 +292,7 @@ func generateDatasetItem(cache []uint32, index uint32, keccak512 hasher) []byte // generateDataset generates the entire ethash dataset for mining. // This method places the result into dest in machine byte order. -func generateDataset(dest []uint32, epoch uint64, cache []uint32) { +func generateDataset(dest []uint32, epoch uint64, epochLength uint64, cache []uint32) { // Print some debug logs to allow analysis on low end devices logger := log.New("epoch", epoch) @@ -278,7 +304,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) { if elapsed > 3*time.Second { logFn = logger.Info } - logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed)) + logFn("Generated ethash verification dataset", "epochLength", epochLength, "elapsed", common.PrettyDuration(elapsed)) }() // Figure out whether the bytes need to be swapped for the machine @@ -324,7 +350,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) { copy(dataset[index*hashBytes:], item) if status := atomic.AddUint64(&progress, 1); status%percent == 0 { - logger.Info("Generating DAG in progress", "percentage", (status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start))) + logger.Info("Generating DAG in progress", "epochLength", epochLength, "percentage", (status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start))) } } }(i) diff --git a/consensus/ethash/algorithm_test.go b/consensus/ethash/algorithm_test.go index 88769d277c09..c1d91cdae7a7 100644 --- a/consensus/ethash/algorithm_test.go +++ b/consensus/ethash/algorithm_test.go @@ -19,6 +19,7 @@ package ethash import ( "bytes" "encoding/binary" + "io" "math/big" "os" "reflect" @@ -44,12 +45,12 @@ func prepare(dest []uint32, src []byte) { func TestSizeCalculations(t *testing.T) { // Verify all the cache and dataset sizes from the lookup table. for epoch, want := range cacheSizes { - if size := calcCacheSize(epoch); size != want { + if size := calcCacheSize(uint64(epoch)); size != want { t.Errorf("cache %d: cache size mismatch: have %d, want %d", epoch, size, want) } } for epoch, want := range datasetSizes { - if size := calcDatasetSize(epoch); size != want { + if size := calcDatasetSize(uint64(epoch)); size != want { t.Errorf("dataset %d: dataset size mismatch: have %d, want %d", epoch, size, want) } } @@ -107,7 +108,7 @@ func TestCacheGeneration(t *testing.T) { } for i, tt := range tests { cache := make([]uint32, tt.size/4) - generateCache(cache, tt.epoch, seedHash(tt.epoch*epochLength+1)) + generateCache(cache, tt.epoch, epochLengthDefault, seedHash(tt.epoch, epochLengthDefault)) want := make([]uint32, tt.size/4) prepare(want, tt.cache) @@ -647,10 +648,10 @@ func TestDatasetGeneration(t *testing.T) { } for i, tt := range tests { cache := make([]uint32, tt.cacheSize/4) - generateCache(cache, tt.epoch, seedHash(tt.epoch*epochLength+1)) + generateCache(cache, tt.epoch, epochLengthDefault, seedHash(tt.epoch, epochLengthDefault)) dataset := make([]uint32, tt.datasetSize/4) - generateDataset(dataset, tt.epoch, cache) + generateDataset(dataset, tt.epoch, epochLengthDefault, cache) want := make([]uint32, tt.datasetSize/4) prepare(want, tt.dataset) @@ -666,10 +667,10 @@ func TestDatasetGeneration(t *testing.T) { func TestHashimoto(t *testing.T) { // Create the verification cache and mining dataset cache := make([]uint32, 1024/4) - generateCache(cache, 0, make([]byte, 32)) + generateCache(cache, 0, epochLengthDefault, make([]byte, 32)) dataset := make([]uint32, 32*1024/4) - generateDataset(dataset, 0, cache) + generateDataset(dataset, 0, epochLengthDefault, cache) // Create a block to verify hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f") @@ -748,26 +749,26 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) { func BenchmarkCacheGeneration(b *testing.B) { for i := 0; i < b.N; i++ { cache := make([]uint32, cacheSize(1)/4) - generateCache(cache, 0, make([]byte, 32)) + generateCache(cache, 0, epochLengthDefault, make([]byte, 32)) } } // Benchmarks the dataset (small) generation performance. func BenchmarkSmallDatasetGeneration(b *testing.B) { cache := make([]uint32, 65536/4) - generateCache(cache, 0, make([]byte, 32)) + generateCache(cache, 0, epochLengthDefault, make([]byte, 32)) b.ResetTimer() for i := 0; i < b.N; i++ { dataset := make([]uint32, 32*65536/4) - generateDataset(dataset, 0, cache) + generateDataset(dataset, 0, epochLengthDefault, cache) } } // Benchmarks the light verification performance. func BenchmarkHashimotoLight(b *testing.B) { cache := make([]uint32, cacheSize(1)/4) - generateCache(cache, 0, make([]byte, 32)) + generateCache(cache, 0, epochLengthDefault, make([]byte, 32)) hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f") @@ -780,10 +781,10 @@ func BenchmarkHashimotoLight(b *testing.B) { // Benchmarks the full (small) verification performance. func BenchmarkHashimotoFullSmall(b *testing.B) { cache := make([]uint32, 65536/4) - generateCache(cache, 0, make([]byte, 32)) + generateCache(cache, 0, epochLengthDefault, make([]byte, 32)) dataset := make([]uint32, 32*65536/4) - generateDataset(dataset, 0, cache) + generateDataset(dataset, 0, epochLengthDefault, cache) hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f") @@ -796,7 +797,6 @@ func BenchmarkHashimotoFullSmall(b *testing.B) { func benchmarkHashimotoFullMmap(b *testing.B, name string, lock bool) { b.Run(name, func(b *testing.B) { tmpdir := b.TempDir() - d := &dataset{epoch: 0} d.generate(tmpdir, 1, lock, false) var hash [common.HashLength]byte @@ -813,3 +813,33 @@ func BenchmarkHashimotoFullMmap(b *testing.B) { benchmarkHashimotoFullMmap(b, "WithLock", true) benchmarkHashimotoFullMmap(b, "WithoutLock", false) } + +func BenchmarkSeedHash(b *testing.B) { + var res []byte + const repeats = 100 + for n := 0; n < repeats; n++ { + for i := uint64(0); i < uint64(b.N); i++ { + res = seedHash(i*epochLengthDefault+1, epochLengthDefault) + } + } + + _, err := io.Copy(io.Discard, bytes.NewBuffer(res)) + if err != nil { + b.Error(err) + } +} + +func BenchmarkSeedHashOld(b *testing.B) { + var res []byte + const repeats = 100 + for n := 0; n < repeats; n++ { + for i := uint64(0); i < uint64(b.N); i++ { + res = seedHashOld(i*epochLengthDefault + 1) + } + } + + _, err := io.Copy(io.Discard, bytes.NewBuffer(res)) + if err != nil { + b.Error(err) + } +} diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 1c38b80ea59b..5a2a6ed7fb6c 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -339,6 +339,8 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time uin func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int { next := new(big.Int).Add(parent.Number, big1) switch { + case config.IsClassic(): + return CalcDifficulty_Classic(config, time, parent) case config.IsGrayGlacier(next): return calcDifficultyEip5133(time, parent) case config.IsArrowGlacier(next): @@ -563,7 +565,7 @@ func (ethash *Ethash) verifySeal(chain consensus.ChainHeaderReader, header *type if !fulldag { cache := ethash.cache(number) - size := datasetSize(number) + size := datasetSize(cache.epoch) if ethash.config.PowMode == ModeTest { size = 32 * 1024 } @@ -646,19 +648,21 @@ var ( big32 = big.NewInt(32) ) -// AccumulateRewards credits the coinbase of the given block with the mining -// reward. The total reward consists of the static block reward and rewards for -// included uncles. The coinbase of each uncle block is also rewarded. -func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) { +func calculateRewards(config *params.ChainConfig, header *types.Header, uncles []*types.Header) (big.Int, []big.Int) { // Select the correct block reward based on chain progression blockReward := FrontierBlockReward - if config.IsByzantium(header.Number) { - blockReward = ByzantiumBlockReward - } - if config.IsConstantinople(header.Number) { - blockReward = ConstantinopleBlockReward + if config.IsClassic() && header.Number.Cmp(config.ECIP1017Block()) >= 0 { + return ecip1017BlockReward(header, uncles) + } else { + if config.IsByzantium(header.Number) { + blockReward = ByzantiumBlockReward + } + if config.IsConstantinople(header.Number) { + blockReward = ConstantinopleBlockReward + } } // Accumulate the rewards for the miner and any included uncles + uncleRewards := []big.Int{} reward := new(big.Int).Set(blockReward) r := new(big.Int) for _, uncle := range uncles { @@ -666,10 +670,23 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header r.Sub(r, header.Number) r.Mul(r, blockReward) r.Div(r, big8) - state.AddBalance(uncle.Coinbase, r) + uncleRewards = append(uncleRewards, *r) r.Div(blockReward, big32) reward.Add(reward, r) } - state.AddBalance(header.Coinbase, reward) + return *reward, uncleRewards +} + +// accumulateRewards credits the coinbase of the given block with the mining +// reward. The total reward consists of the static block reward and rewards for +// included uncles. The coinbase of each uncle block is also rewarded. +func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) { + minerReward, uncleRewards := calculateRewards(config, header, uncles) + for i, uncle := range uncles { + if i < len(uncleRewards) { + state.AddBalance(uncle.Coinbase, &uncleRewards[i]) + } + } + state.AddBalance(header.Coinbase, &minerReward) } diff --git a/consensus/ethash/consensus_classic.go b/consensus/ethash/consensus_classic.go new file mode 100644 index 000000000000..0d90f309970f --- /dev/null +++ b/consensus/ethash/consensus_classic.go @@ -0,0 +1,257 @@ +package ethash + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" +) + +const ( + epochLengthDefault = 30_000 + epochLengthECIP1099 = 60_000 +) + +// calcEpochLength returns the epoch length for a given block number (ECIP-1099) +func calcEpochLength(block uint64, ecip1099FBlock *uint64) uint64 { + if ecip1099FBlock != nil { + if block >= *ecip1099FBlock { + return epochLengthECIP1099 + } + } + return epochLengthDefault +} + +// calcEpoch returns the epoch for a given block number (ECIP-1099) +func calcEpoch(block uint64, epochLength uint64) uint64 { + epoch := block / epochLength + return epoch +} + +// calcEpochBlock returns the epoch start block for a given epoch (ECIP-1099) +func calcEpochBlock(epoch uint64, epochLength uint64) uint64 { + return epoch*epochLength + 1 +} + +var ( + calcDifficultyNoBombByzantium = makeDifficultyCalculatorClassic(true, true, true) + calcDifficultyNoBombHomestead = makeDifficultyCalculatorClassic(false, true, true) + calcDifficulty1010Pause = makeDifficultyCalculatorClassic(false, false, true) + calcDifficulty1010Explode = makeDifficultyCalculatorClassic(false, false, false) +) + +func CalcDifficulty_Classic(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int { + next := new(big.Int).Add(parent.Number, big1) + switch { + case config.IsByzantium(next): + return calcDifficultyNoBombByzantium(time, parent) + case next.Cmp(config.ECIP1041Block()) >= 0: + return calcDifficultyNoBombHomestead(time, parent) + case next.Cmp(new(big.Int).Add(config.ECIP1010Block(), big.NewInt(2_000_000))) >= 0: + return calcDifficulty1010Explode(time, parent) + case next.Cmp(config.ECIP1010Block()) >= 0: + return calcDifficulty1010Pause(time, parent) + case config.IsHomestead(next): + return calcDifficultyHomestead(time, parent) + default: + return calcDifficultyFrontier(time, parent) + } +} + +func makeDifficultyCalculatorClassic(eip100b, defuse, pause bool) func(time uint64, parent *types.Header) *big.Int { + return func(time uint64, parent *types.Header) *big.Int { + // https://github.com/ethereum/EIPs/issues/100. + // algorithm: + // diff = (parent_diff + + // (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99)) + // ) + 2^(periodCount - 2) + + bigTime := new(big.Int).SetUint64(time) + bigParentTime := new(big.Int).SetUint64(parent.Time) + + // holds intermediate values to make the algo easier to read & audit + x := new(big.Int) + y := new(big.Int) + + if eip100b { + // (2 if len(parent_uncles) else 1) - (block_timestamp - parent_timestamp) // 9 + x.Sub(bigTime, bigParentTime) + x.Div(x, big9) + if parent.UncleHash == types.EmptyUncleHash { + x.Sub(big1, x) + } else { + x.Sub(big2, x) + } + // max((2 if len(parent_uncles) else 1) - (block_timestamp - parent_timestamp) // 9, -99) + if x.Cmp(bigMinus99) < 0 { + x.Set(bigMinus99) + } + // parent_diff + (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99)) + y.Div(parent.Difficulty, params.DifficultyBoundDivisor) + x.Mul(y, x) + x.Add(parent.Difficulty, x) + + // minimum difficulty can ever be (before exponential factor) + if x.Cmp(params.MinimumDifficulty) < 0 { + x.Set(params.MinimumDifficulty) + } + } else { + // https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.md + // algorithm: + // diff = (parent_diff + + // (parent_diff / 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99)) + // ) + 2^(periodCount - 2) + + // 1 - (block_timestamp - parent_timestamp) // 10 + x.Sub(bigTime, bigParentTime) + x.Div(x, big10) + x.Sub(big1, x) + + // max(1 - (block_timestamp - parent_timestamp) // 10, -99) + if x.Cmp(bigMinus99) < 0 { + x.Set(bigMinus99) + } + // (parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99)) + y.Div(parent.Difficulty, params.DifficultyBoundDivisor) + x.Mul(y, x) + x.Add(parent.Difficulty, x) + + // minimum difficulty can ever be (before exponential factor) + if x.Cmp(params.MinimumDifficulty) < 0 { + x.Set(params.MinimumDifficulty) + } + } + + if defuse { + return x + } + + // exPeriodRef the explosion clause's reference point + exPeriodRef := new(big.Int).Add(parent.Number, common.Big1) + + if pause { + exPeriodRef.Set(params.ECIP1010Block_Classic) + } else { + // unpaused (exploded) difficulty bomb + length := int64(2_000_000) + exPeriodRef.Sub(exPeriodRef, big.NewInt(length)) + } + + // EXPLOSION + + // the 'periodRef' (from above) represents the many ways of hackishly modifying the reference number + // (ie the 'currentBlock') in order to lie to the function about what time it really is + // + // 2^(( periodRef // EDP) - 2) + // + z := new(big.Int) + z.Div(exPeriodRef, big.NewInt(100_000)) // (periodRef // EDP) + if z.Cmp(big1) > 0 { // if result large enough (not in algo explicitly) + z.Sub(z, big2) // - 2 + z.Exp(big2, z, nil) // 2^ + } else { + z.SetUint64(0) + } + x.Add(x, z) + + return x + } +} + +var ( + disinflationRateQuotient = big.NewInt(4) + disinflationRateDivisor = big.NewInt(5) + ecip1017EraLen = big.NewInt(5_000_000) +) + +// As of "Era 2" (zero-index era 1), uncle miners and winners are rewarded equally for each included block. +// So they share this function. +func getEraUncleBlockReward(era *big.Int, blockReward *big.Int) *big.Int { + return new(big.Int).Div(GetBlockWinnerRewardByEra(era, blockReward), big32) +} + +// GetBlockUncleRewardByEra gets called _for each uncle miner_ associated with a winner block's uncles. +func GetBlockUncleRewardByEra(era *big.Int, header, uncle *types.Header, blockReward *big.Int) *big.Int { + // Era 1 (index 0): + // An extra reward to the winning miner for including uncles as part of the block, in the form of an extra 1/32 (0.15625ETC) per uncle included, up to a maximum of two (2) uncles. + if era.Cmp(big.NewInt(0)) == 0 { + r := new(big.Int) + r.Add(new(big.Int).SetUint64(uncle.Number.Uint64()), big8) // 2,534,998 + 8 = 2,535,006 + r.Sub(r, new(big.Int).SetUint64(header.Number.Uint64())) // 2,535,006 - 2,534,999 = 7 + r.Mul(r, blockReward) // 7 * 5e+18 = 35e+18 + r.Div(r, big8) // 35e+18 / 8 = 7/8 * 5e+18 + + return r + } + return getEraUncleBlockReward(era, blockReward) +} + +// GetBlockWinnerRewardForUnclesByEra gets called _per winner_, and accumulates rewards for each included uncle. +// Assumes uncles have been validated and limited (@ func (v *BlockValidator) VerifyUncles). +func GetBlockWinnerRewardForUnclesByEra(era *big.Int, uncles []*types.Header, blockReward *big.Int) *big.Int { + r := big.NewInt(0) + + for range uncles { + r.Add(r, getEraUncleBlockReward(era, blockReward)) // can reuse this, since 1/32 for winner's uncles remain unchanged from "Era 1" + } + return r +} + +// GetBlockWinnerRewardByEra gets a block reward at disinflation rate. +// Constants MaxBlockReward, disinflationRateQuotient, and disinflationRateDivisor assumed. +func GetBlockWinnerRewardByEra(era *big.Int, blockReward *big.Int) *big.Int { + if era.Cmp(big.NewInt(0)) == 0 { + return new(big.Int).Set(blockReward) + } + + // MaxBlockReward _r_ * (4/5)**era == MaxBlockReward * (4**era) / (5**era) + // since (q/d)**n == q**n / d**n + // qed + var q, d, r = new(big.Int), new(big.Int), new(big.Int) + + q.Exp(disinflationRateQuotient, era, nil) + d.Exp(disinflationRateDivisor, era, nil) + + r.Mul(blockReward, q) + r.Div(r, d) + + return r +} + +func ecip1017BlockReward(header *types.Header, uncles []*types.Header) (big.Int, []big.Int) { + blockReward := FrontierBlockReward + + // Ensure value 'era' is configured. + eraLen := ecip1017EraLen + era := GetBlockEra(new(big.Int).SetUint64(header.Number.Uint64()), new(big.Int).Set(eraLen)) + wr := GetBlockWinnerRewardByEra(era, blockReward) // wr "winner reward". 5, 4, 3.2, 2.56, ... + wurs := GetBlockWinnerRewardForUnclesByEra(era, uncles, blockReward) // wurs "winner uncle rewards" + wr.Add(wr, wurs) + + // Reward uncle miners. + uncleRewards := make([]big.Int, len(uncles)) + for i, uncle := range uncles { + ur := GetBlockUncleRewardByEra(era, header, uncle, blockReward) + uncleRewards[i] = *ur + } + + return *wr, uncleRewards +} + +// GetBlockEra gets which "Era" a given block is within, given an era length (ecip-1017 has era=5,000,000 blocks) +// Returns a zero-index era number, so "Era 1": 0, "Era 2": 1, "Era 3": 2 ... +func GetBlockEra(blockNum, eraLength *big.Int) *big.Int { + // If genesis block or impossible negative-numbered block, return zero-val. + if blockNum.Sign() < 1 { + return new(big.Int) + } + + remainder := big.NewInt(0).Mod(big.NewInt(0).Sub(blockNum, big.NewInt(1)), eraLength) + base := big.NewInt(0).Sub(blockNum, remainder) + + d := big.NewInt(0).Div(base, eraLength) + dremainder := big.NewInt(0).Mod(d, big.NewInt(1)) + + return new(big.Int).Sub(d, dremainder) +} diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go index dfe00d4b93c0..3f5d10222d13 100644 --- a/consensus/ethash/ethash.go +++ b/consensus/ethash/ethash.go @@ -168,7 +168,7 @@ func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(bu // lru tracks caches or datasets by their last use time, keeping at most N of them. type lru struct { what string - new func(epoch uint64) interface{} + new func(epoch, epochLength uint64) interface{} mu sync.Mutex // Items are kept in a LRU cache, but there is a special case: // We always keep an item for (highest seen epoch) + 1 as the 'future item'. @@ -179,7 +179,7 @@ type lru struct { // newlru create a new least-recently-used cache for either the verification caches // or the mining datasets. -func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru { +func newlru(what string, maxItems int, new func(epoch, epochLength uint64) interface{}) *lru { if maxItems <= 0 { maxItems = 1 } @@ -192,7 +192,7 @@ func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru // get retrieves or creates an item for the given epoch. The first return value is always // non-nil. The second return value is non-nil if lru thinks that an item will be useful in // the near future. -func (lru *lru) get(epoch uint64) (item, future interface{}) { +func (lru *lru) get(epoch uint64, epochLength uint64, ecip1099FBlock *uint64) (item, future interface{}) { lru.mu.Lock() defer lru.mu.Unlock() @@ -203,15 +203,32 @@ func (lru *lru) get(epoch uint64) (item, future interface{}) { item = lru.futureItem } else { log.Trace("Requiring new ethash "+lru.what, "epoch", epoch) - item = lru.new(epoch) + item = lru.new(epoch, epochLength) } lru.cache.Add(epoch, item) } + + // Ensure pre-generation handles ecip-1099 changeover correctly + var nextEpoch = epoch + 1 + var nextEpochLength = epochLength + if ecip1099FBlock != nil { + nextEpochBlock := nextEpoch * epochLength + // Note that == demands that the ECIP1099 activation block is situated + // at the beginning of an epoch. + // https://github.com/ethereumclassic/ECIPs/blob/master/_specs/ecip-1099.md#implementation + if nextEpochBlock == *ecip1099FBlock && epochLength == epochLengthDefault { + nextEpoch = nextEpoch / 2 + nextEpochLength = epochLengthECIP1099 + } + } + // Update the 'future item' if epoch is larger than previously seen. - if epoch < maxEpoch-1 && lru.future < epoch+1 { - log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1) - future = lru.new(epoch + 1) - lru.future = epoch + 1 + // Last conditional clause ('lru.future > nextEpoch') handles the ECIP1099 case where + // the next epoch is expected to be LESSER THAN that of the previous state's future epoch number. + if epoch < maxEpoch-1 && lru.future != nextEpoch { + log.Trace("Requiring new future ethash "+lru.what, "epoch", nextEpoch) + future = lru.new(nextEpoch, nextEpochLength) + lru.future = nextEpoch lru.futureItem = future } return item, future @@ -219,31 +236,32 @@ func (lru *lru) get(epoch uint64) (item, future interface{}) { // cache wraps an ethash cache with some metadata to allow easier concurrent use. type cache struct { - epoch uint64 // Epoch for which this cache is relevant - dump *os.File // File descriptor of the memory mapped cache - mmap mmap.MMap // Memory map itself to unmap before releasing - cache []uint32 // The actual cache data content (may be memory mapped) - once sync.Once // Ensures the cache is generated only once + epoch uint64 // Epoch for which this cache is relevant + epochLength uint64 // Epoch length (ECIP-1099) + dump *os.File // File descriptor of the memory mapped cache + mmap mmap.MMap // Memory map itself to unmap before releasing + cache []uint32 // The actual cache data content (may be memory mapped) + once sync.Once // Ensures the cache is generated only once } // newCache creates a new ethash verification cache and returns it as a plain Go // interface to be usable in an LRU cache. -func newCache(epoch uint64) interface{} { - return &cache{epoch: epoch} +func newCache(epoch uint64, epochLength uint64) interface{} { + return &cache{epoch: epoch, epochLength: epochLength} } // generate ensures that the cache content is generated before use. func (c *cache) generate(dir string, limit int, lock bool, test bool) { c.once.Do(func() { - size := cacheSize(c.epoch*epochLength + 1) - seed := seedHash(c.epoch*epochLength + 1) + size := cacheSize(c.epoch) + seed := seedHash(c.epoch, c.epochLength) if test { size = 1024 } // If we don't store anything on disk, generate and return. if dir == "" { c.cache = make([]uint32, size/4) - generateCache(c.cache, c.epoch, seed) + generateCache(c.cache, c.epoch, c.epochLength, seed) return } // Disk storage is needed, this will get fancy @@ -251,8 +269,13 @@ func (c *cache) generate(dir string, limit int, lock bool, test bool) { if !isLittleEndian() { endian = ".be" } - path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) - logger := log.New("epoch", c.epoch) + // The file path naming scheme was changed to include epoch values in the filename, + // which enables a filepath glob with scan to identify out-of-bounds caches and remove them. + // The legacy path declaration is provided below as a comment for reference. + // + // path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) // LEGACY + path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%d-%x%s", algorithmRevision, c.epoch, seed[:8], endian)) // CURRENT + logger := log.New("epoch", c.epoch, "epochLength", c.epochLength) // We're about to mmap the file, ensure that the mapping is cleaned up when the // cache becomes unused. @@ -267,21 +290,41 @@ func (c *cache) generate(dir string, limit int, lock bool, test bool) { } logger.Debug("Failed to load old ethash cache", "err", err) - // No previous cache available, create a new cache file to fill - c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, lock, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) + // No usable previous cache available, create a new cache file to fill + c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, lock, func(buffer []uint32) { generateCache(buffer, c.epoch, c.epochLength, seed) }) if err != nil { logger.Error("Failed to generate mapped ethash cache", "err", err) c.cache = make([]uint32, size/4) - generateCache(c.cache, c.epoch, seed) + generateCache(c.cache, c.epoch, c.epochLength, seed) } - // Iterate over all previous instances and delete old ones - for ep := int(c.epoch) - limit; ep >= 0; ep-- { - seed := seedHash(uint64(ep)*epochLength + 1) - path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s*", algorithmRevision, seed[:8], endian)) - files, _ := filepath.Glob(path) // find also the temp files that are generated. - for _, file := range files { - os.Remove(file) + + // Iterate over all cache file instances, deleting any out of bounds (where epoch is below lower limit, or above upper limit). + matches, _ := filepath.Glob(filepath.Join(dir, fmt.Sprintf("cache-R%d*", algorithmRevision))) + for _, file := range matches { + var ar int // algorithm revision + var e uint64 // epoch + var s string // seed + if _, err := fmt.Sscanf(filepath.Base(file), "cache-R%d-%d-%s"+endian, &ar, &e, &s); err != nil { + // There is an unrecognized file in this directory. + // See if the name matches the expected pattern of the legacy naming scheme. + if _, err := fmt.Sscanf(filepath.Base(file), "cache-R%d-%s"+endian, &ar, &s); err == nil { + // This file matches the previous generation naming pattern (sans epoch). + if err := os.Remove(file); err != nil { + logger.Error("Failed to remove legacy ethash cache file", "file", file, "err", err) + } else { + logger.Warn("Deleted legacy ethash cache file", "path", file) + } + } + // Else the file is unrecognized (unknown name format), leave it alone. + continue + } + if e <= c.epoch-uint64(limit) || e > c.epoch+1 { + if err := os.Remove(file); err == nil { + logger.Debug("Deleted ethash cache file", "target.epoch", e, "file", file) + } else { + logger.Error("Failed to delete ethash cache file", "target.epoch", e, "file", file, "err", err) + } } } }) @@ -298,18 +341,19 @@ func (c *cache) finalizer() { // dataset wraps an ethash dataset with some metadata to allow easier concurrent use. type dataset struct { - epoch uint64 // Epoch for which this cache is relevant - dump *os.File // File descriptor of the memory mapped cache - mmap mmap.MMap // Memory map itself to unmap before releasing - dataset []uint32 // The actual cache data content - once sync.Once // Ensures the cache is generated only once - done uint32 // Atomic flag to determine generation status + epoch uint64 // Epoch for which this cache is relevant + epochLength uint64 // Epoch length (ECIP-1099) + dump *os.File // File descriptor of the memory mapped cache + mmap mmap.MMap // Memory map itself to unmap before releasing + dataset []uint32 // The actual cache data content + once sync.Once // Ensures the cache is generated only once + done uint32 // Atomic flag to determine generation status } // newDataset creates a new ethash mining dataset and returns it as a plain Go // interface to be usable in an LRU cache. -func newDataset(epoch uint64) interface{} { - return &dataset{epoch: epoch} +func newDataset(epoch uint64, epochLength uint64) interface{} { + return &dataset{epoch: epoch, epochLength: epochLength} } // generate ensures that the dataset content is generated before use. @@ -317,10 +361,9 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) { d.once.Do(func() { // Mark the dataset generated after we're done. This is needed for remote defer atomic.StoreUint32(&d.done, 1) - - csize := cacheSize(d.epoch*epochLength + 1) - dsize := datasetSize(d.epoch*epochLength + 1) - seed := seedHash(d.epoch*epochLength + 1) + csize := cacheSize(d.epoch) + dsize := datasetSize(d.epoch) + seed := seedHash(d.epoch, d.epochLength) if test { csize = 1024 dsize = 32 * 1024 @@ -328,10 +371,10 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) { // If we don't store anything on disk, generate and return if dir == "" { cache := make([]uint32, csize/4) - generateCache(cache, d.epoch, seed) + generateCache(cache, d.epoch, d.epochLength, seed) d.dataset = make([]uint32, dsize/4) - generateDataset(d.dataset, d.epoch, cache) + generateDataset(d.dataset, d.epoch, d.epochLength, cache) return } @@ -340,7 +383,7 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) { if !isLittleEndian() { endian = ".be" } - path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) + path := filepath.Join(dir, fmt.Sprintf("full-R%d-%d-%x%s", algorithmRevision, d.epoch, seed[:8], endian)) logger := log.New("epoch", d.epoch) // We're about to mmap the file, ensure that the mapping is cleaned up when the @@ -351,27 +394,50 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) { var err error d.dump, d.mmap, d.dataset, err = memoryMap(path, lock) if err == nil { - logger.Debug("Loaded old ethash dataset from disk") + logger.Debug("Loaded old ethash dataset from disk", "path", path) return } logger.Debug("Failed to load old ethash dataset", "err", err) - // No previous dataset available, create a new dataset file to fill + // No usable previous dataset available, create a new dataset file to fill cache := make([]uint32, csize/4) - generateCache(cache, d.epoch, seed) + generateCache(cache, d.epoch, d.epochLength, seed) - d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, lock, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) }) + d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, lock, func(buffer []uint32) { generateDataset(buffer, d.epoch, d.epochLength, cache) }) if err != nil { logger.Error("Failed to generate mapped ethash dataset", "err", err) d.dataset = make([]uint32, dsize/4) - generateDataset(d.dataset, d.epoch, cache) + generateDataset(d.dataset, d.epoch, d.epochLength, cache) } - // Iterate over all previous instances and delete old ones - for ep := int(d.epoch) - limit; ep >= 0; ep-- { - seed := seedHash(uint64(ep)*epochLength + 1) - path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) - os.Remove(path) + + // Iterate over all full file instances, deleting any out of bounds (where epoch is below lower limit, or above upper limit). + matches, _ := filepath.Glob(filepath.Join(dir, fmt.Sprintf("full-R%d*", algorithmRevision))) + for _, file := range matches { + var ar int // algorithm revision + var e uint64 // epoch + var s string // seed + if _, err := fmt.Sscanf(filepath.Base(file), "full-R%d-%d-%s"+endian, &ar, &e, &s); err != nil { + // There is an unrecognized file in this directory. + // See if the name matches the expected pattern of the legacy naming scheme. + if _, err := fmt.Sscanf(filepath.Base(file), "full-R%d-%s"+endian, &ar, &s); err == nil { + // This file matches the previous generation naming pattern (sans epoch). + if err := os.Remove(file); err != nil { + logger.Error("Failed to remove legacy ethash full file", "file", file, "err", err) + } else { + logger.Warn("Deleted legacy ethash full file", "path", file) + } + } + // Else the file is unrecognized (unknown name format), leave it alone. + continue + } + if e <= d.epoch-uint64(limit) || e > d.epoch+1 { + if err := os.Remove(file); err == nil { + logger.Debug("Deleted ethash full file", "target.epoch", e, "file", file) + } else { + logger.Error("Failed to delete ethash full file", "target.epoch", e, "file", file, "err", err) + } + } } }) } @@ -431,7 +497,8 @@ type Config struct { // be block header JSON objects instead of work package arrays. NotifyFull bool - Log log.Logger `toml:"-"` + Log log.Logger `toml:"-"` + ECIP1099Block *uint64 `toml:"-"` } // Ethash is a consensus engine based on proof-of-work implementing the ethash @@ -572,8 +639,9 @@ func (ethash *Ethash) StopRemoteSealer() error { // by first checking against a list of in-memory caches, then against caches // stored on disk, and finally generating one if none can be found. func (ethash *Ethash) cache(block uint64) *cache { - epoch := block / epochLength - currentI, futureI := ethash.caches.get(epoch) + epochLength := calcEpochLength(block, ethash.config.ECIP1099Block) + epoch := calcEpoch(block, epochLength) + currentI, futureI := ethash.caches.get(epoch, epochLength, ethash.config.ECIP1099Block) current := currentI.(*cache) // Wait for generation finish. @@ -595,8 +663,9 @@ func (ethash *Ethash) cache(block uint64) *cache { // generates on a background thread. func (ethash *Ethash) dataset(block uint64, async bool) *dataset { // Retrieve the requested ethash dataset - epoch := block / epochLength - currentI, futureI := ethash.datasets.get(epoch) + epochLength := calcEpochLength(block, ethash.config.ECIP1099Block) + epoch := calcEpoch(block, epochLength) + currentI, futureI := ethash.datasets.get(epoch, epochLength, ethash.config.ECIP1099Block) current := currentI.(*dataset) // If async is specified, generate everything in a background thread @@ -692,6 +761,16 @@ func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API { // SeedHash is the seed to use for generating a verification cache and the mining // dataset. -func SeedHash(block uint64) []byte { - return seedHash(block) +func SeedHash(epoch uint64, epochLength uint64) []byte { + return seedHash(epoch, epochLength) +} + +// CalcEpochLength returns the epoch length for a given block number (ECIP-1099) +func CalcEpochLength(block uint64, ecip1099FBlock *uint64) uint64 { + return calcEpochLength(block, ecip1099FBlock) +} + +// CalcEpoch returns the epoch for a given block number (ECIP-1099) +func CalcEpoch(block uint64, epochLength uint64) uint64 { + return calcEpoch(block, epochLength) } diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go index ec4696390028..2eda5a0d91ee 100644 --- a/consensus/ethash/sealer.go +++ b/consensus/ethash/sealer.go @@ -347,7 +347,9 @@ func (s *remoteSealer) loop() { func (s *remoteSealer) makeWork(block *types.Block) { hash := s.ethash.SealHash(block.Header()) s.currentWork[0] = hash.Hex() - s.currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex() + epl := CalcEpochLength(block.NumberU64(), s.ethash.config.ECIP1099Block) + ep := CalcEpoch(block.NumberU64(), epl) + s.currentWork[1] = common.BytesToHash(SeedHash(ep, epl)).Hex() s.currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex() s.currentWork[3] = hexutil.EncodeBig(block.Number()) diff --git a/consensus/ethash/sealer_test.go b/consensus/ethash/sealer_test.go index e338f7529065..d528b51f54a3 100644 --- a/consensus/ethash/sealer_test.go +++ b/consensus/ethash/sealer_test.go @@ -63,7 +63,9 @@ func TestRemoteNotify(t *testing.T) { if want := ethash.SealHash(header).Hex(); work[0] != want { t.Errorf("work packet hash mismatch: have %s, want %s", work[0], want) } - if want := common.BytesToHash(SeedHash(header.Number.Uint64())).Hex(); work[1] != want { + epl := CalcEpochLength(block.NumberU64(), ethash.config.ECIP1099Block) + ep := CalcEpoch(block.NumberU64(), epl) + if want := common.BytesToHash(SeedHash(ep, epl)).Hex(); work[1] != want { t.Errorf("work packet seed mismatch: have %s, want %s", work[1], want) } target := new(big.Int).Div(new(big.Int).Lsh(big.NewInt(1), 256), header.Difficulty) diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go index f56ce85feeed..2540fdde1a71 100644 --- a/core/forkid/forkid.go +++ b/core/forkid/forkid.go @@ -227,12 +227,24 @@ func gatherForks(config *params.ChainConfig) []uint64 { if field.Type != reflect.TypeOf(new(big.Int)) { continue } + if config.IsClassic() && strings.Contains(field.Name, "DAO") { + continue + } // Extract the fork rule block number and aggregate it rule := conf.Field(i).Interface().(*big.Int) if rule != nil { forks = append(forks, rule.Uint64()) } } + if config.IsClassic() { + forks = append(forks, + 3_000_000, // Spurious Dragon. ETC implements only "half" of the EIPs for this fork. Later half of EIPs happen in ETC's Byzantium=XXX. + 5_000_000, // ECIP1017 Monetary Policy + 5_900_000, // ECIP1010 Difficulty Bomb Disposal + 11_700_000, // ECIP1099: Etchash + 14_525_000, // Partial London; only "half" of EIPs are adopted (not EIP-1559). + ) + } // Sort the fork block numbers to permit chronological XOR for i := 0; i < len(forks); i++ { for j := i + 1; j < len(forks); j++ { diff --git a/core/genesis.go b/core/genesis.go index b5f844724741..81eda61b6e83 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -359,6 +359,18 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override return newcfg, stored, nil } storedData, _ := json.Marshal(storedcfg) + + // Special case: differentiate ETH vs. ETC configs. + // This is needed because ETC has the same genesis block as ETH. + if newcfg.ChainID.Cmp(params.MainnetChainConfig.ChainID) == 0 && + storedcfg.ChainID.Cmp(params.ClassicChainConfig.ChainID) == 0 { + newcfg = params.ClassicChainConfig + applyOverrides(newcfg) + if err := newcfg.CheckConfigForkOrder(); err != nil { + return newcfg, stored, err + } + } + // Special case: if a private network is being used (no genesis and also no // mainnet hash in the database), we must not apply the `configOrDefault` // chain config as that would be AllProtocolChanges (applying any new fork diff --git a/core/genesis_classic.go b/core/genesis_classic.go new file mode 100644 index 000000000000..db6668c0810e --- /dev/null +++ b/core/genesis_classic.go @@ -0,0 +1,20 @@ +package core + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/params" +) + +// DefaultClassicGenesisBlock returns the Ethereum main net genesis block. +func DefaultClassicGenesisBlock() *Genesis { + return &Genesis{ + Config: params.ClassicChainConfig, + Nonce: 66, + ExtraData: hexutil.MustDecode("0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa"), + GasLimit: 5000, + Difficulty: big.NewInt(17179869184), + Alloc: decodePrealloc(mainnetAllocData), + } +} diff --git a/core/sender_cacher.go b/core/sender_cacher.go index 4be53619ebec..19c710f67930 100644 --- a/core/sender_cacher.go +++ b/core/sender_cacher.go @@ -18,6 +18,7 @@ package core import ( "runtime" + "sync" "github.com/ethereum/go-ethereum/core/types" ) @@ -42,6 +43,7 @@ type txSenderCacherRequest struct { type txSenderCacher struct { threads int tasks chan *txSenderCacherRequest + mu sync.Mutex } // newTxSenderCacher creates a new transaction sender background cacher and starts @@ -62,7 +64,9 @@ func newTxSenderCacher(threads int) *txSenderCacher { func (cacher *txSenderCacher) cache() { for task := range cacher.tasks { for i := 0; i < len(task.txs); i += task.inc { + cacher.mu.Lock() types.Sender(task.signer, task.txs[i]) + cacher.mu.Unlock() } } } diff --git a/core/state_transition.go b/core/state_transition.go index e6a15a3c1c2e..ecebf8ad89cd 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -335,7 +335,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { ret, st.gas, vmerr = st.evm.Call(sender, st.to(), st.data, st.gas, st.value) } - if !rules.IsLondon { + if !rules.IsLondon && !rules.IsMystique { // Before EIP-3529: refunds were capped to gasUsed / 2 st.refundGas(params.RefundQuotient) } else { diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index 87f0390a6f9c..33fb0c691bc4 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -44,7 +44,7 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int) Signer { signer = NewLondonSigner(config.ChainID) case config.IsBerlin(blockNumber): signer = NewEIP2930Signer(config.ChainID) - case config.IsEIP155(blockNumber): + case config.IsProtectedSigner(blockNumber): signer = NewEIP155Signer(config.ChainID) case config.IsHomestead(blockNumber): signer = HomesteadSigner{} @@ -324,7 +324,7 @@ func (s eip2930Signer) Hash(tx *Transaction) common.Hash { // This _should_ not happen, but in case someone sends in a bad // json struct via RPC, it's probably more prudent to return an // empty hash instead of killing the node with a panic - //panic("Unsupported transaction type: %d", tx.typ) + // panic("Unsupported transaction type: %d", tx.typ) return common.Hash{} } } diff --git a/core/vm/evm.go b/core/vm/evm.go index 888f4812a590..e1ed1752f781 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -238,7 +238,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas gas = 0 } // TODO: consider clearing up unused snapshots: - //} else { + // } else { // evm.StateDB.DiscardSnapshot(snapshot) } return ret, gas, err @@ -458,7 +458,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } // Reject code starting with 0xEF if EIP-3541 is enabled. - if err == nil && len(ret) >= 1 && ret[0] == 0xEF && evm.chainRules.IsLondon { + if err == nil && len(ret) >= 1 && ret[0] == 0xEF && (evm.chainRules.IsLondon || evm.chainRules.IsMystique) { err = ErrInvalidCode } diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 312977b75588..63379b49a383 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -64,6 +64,8 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter { cfg.JumpTable = &mergeInstructionSet case evm.chainRules.IsLondon: cfg.JumpTable = &londonInstructionSet + case evm.chainRules.IsMystique: + cfg.JumpTable = &mystiqueInstructionSet case evm.chainRules.IsBerlin: cfg.JumpTable = &berlinInstructionSet case evm.chainRules.IsIstanbul: @@ -72,7 +74,7 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter { cfg.JumpTable = &constantinopleInstructionSet case evm.chainRules.IsByzantium: cfg.JumpTable = &byzantiumInstructionSet - case evm.chainRules.IsEIP158: + case evm.chainRules.IsEIP158 || evm.chainRules.IsDieHard: cfg.JumpTable = &spuriousDragonInstructionSet case evm.chainRules.IsEIP150: cfg.JumpTable = &tangerineWhistleInstructionSet diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 94229436d23c..83ac44332ea1 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -53,6 +53,7 @@ var ( constantinopleInstructionSet = newConstantinopleInstructionSet() istanbulInstructionSet = newIstanbulInstructionSet() berlinInstructionSet = newBerlinInstructionSet() + mystiqueInstructionSet = newMystiqueInstructionSet() londonInstructionSet = newLondonInstructionSet() mergeInstructionSet = newMergeInstructionSet() ) @@ -98,6 +99,15 @@ func newLondonInstructionSet() JumpTable { return validate(instructionSet) } +// newMystiqueInstructionSet returns the frontier, homestead, byzantium, +// contantinople, istanbul, petersburg, berlin and london instructions. +func newMystiqueInstructionSet() JumpTable { + instructionSet := newBerlinInstructionSet() + enable3529(&instructionSet) // EIP-3529: Reduction in refunds https://eips.ethereum.org/EIPS/eip-3529 + // enable3198(&instructionSet) // Base fee opcode https://eips.ethereum.org/EIPS/eip-3198 + return validate(instructionSet) +} + // newBerlinInstructionSet returns the frontier, homestead, byzantium, // contantinople, istanbul, petersburg and berlin instructions. func newBerlinInstructionSet() JumpTable { diff --git a/eth/backend.go b/eth/backend.go index ab2aaf7b6b12..64b3053fe10f 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -216,7 +216,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { cacheLimit := cacheConfig.TrieCleanLimit + cacheConfig.TrieDirtyLimit + cacheConfig.SnapshotLimit checkpoint := config.Checkpoint if checkpoint == nil { - checkpoint = params.TrustedCheckpoints[eth.blockchain.Genesis().Hash()] + if !config.Genesis.Config.IsClassic() { + checkpoint = params.TrustedCheckpoints[eth.blockchain.Genesis().Hash()] + } } if eth.handler, err = newHandler(&handlerConfig{ Database: chainDb, diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index e195145b73ad..401cddd4af93 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -420,7 +420,12 @@ func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block) t.Fatal("can't create node:", err) } - ethcfg := ðconfig.Config{Genesis: genesis, Ethash: ethash.Config{PowMode: ethash.ModeFake}, SyncMode: downloader.FullSync, TrieTimeout: time.Minute, TrieDirtyCache: 256, TrieCleanCache: 256} + var bl1099 *uint64 + if genesis.Config.IsClassic() { + u := params.ECIP1099Block_Classic.Uint64() + bl1099 = &u + } + ethcfg := ðconfig.Config{Genesis: genesis, Ethash: ethash.Config{PowMode: ethash.ModeFake, ECIP1099Block: bl1099}, SyncMode: downloader.FullSync, TrieTimeout: time.Minute, TrieDirtyCache: 256, TrieCleanCache: 256} ethservice, err := eth.New(n, ethcfg) if err != nil { t.Fatal("can't create eth service:", err) diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index e9651d041c3c..d551315a3c15 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -244,6 +244,7 @@ func CreateConsensusEngine(stack *node.Node, ethashConfig *ethash.Config, clique DatasetsOnDisk: ethashConfig.DatasetsOnDisk, DatasetsLockMmap: ethashConfig.DatasetsLockMmap, NotifyFull: ethashConfig.NotifyFull, + ECIP1099Block: ethashConfig.ECIP1099Block, }, notify, noverify) engine.(*ethash.Ethash).SetThreads(-1) // Disable CPU mining } diff --git a/go.mod b/go.mod index eda8e663c6bb..02c93af8aed1 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.17 require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 - github.com/VictoriaMetrics/fastcache v1.6.0 + github.com/VictoriaMetrics/fastcache v1.12.0 github.com/aws/aws-sdk-go-v2 v1.2.0 github.com/aws/aws-sdk-go-v2/config v1.1.1 github.com/aws/aws-sdk-go-v2/credentials v1.1.1 @@ -78,7 +78,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/sso v1.1.1 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.1.1 // indirect github.com/aws/smithy-go v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect diff --git a/go.sum b/go.sum index 50c27386cbae..da5caf13125d 100644 --- a/go.sum +++ b/go.sum @@ -31,8 +31,8 @@ github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o= -github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= +github.com/VictoriaMetrics/fastcache v1.12.0 h1:vnVi/y9yKDcD9akmc4NqAoqgQhJrOwUF+j9LTgn4QDE= +github.com/VictoriaMetrics/fastcache v1.12.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -72,8 +72,8 @@ github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -185,7 +185,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= @@ -547,7 +546,6 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 538644908ba0..27e0e9ad2359 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -2012,7 +2012,14 @@ func (api *DebugAPI) SeedHash(ctx context.Context, number uint64) (string, error if block == nil { return "", fmt.Errorf("block #%d not found", number) } - return fmt.Sprintf("%#x", ethash.SeedHash(number)), nil + var bl1099 *uint64 + if api.b.ChainConfig().IsClassic() { + b := params.ECIP1099Block_Classic.Uint64() + bl1099 = &b + } + epl := ethash.CalcEpochLength(number, bl1099) + ep := ethash.CalcEpoch(number, epl) + return fmt.Sprintf("%#x", ethash.SeedHash(ep, epl)), nil } // ChaindbProperty returns leveldb properties of the key-value database. diff --git a/les/client.go b/les/client.go index c304bf86f8a8..6406e477da5a 100644 --- a/les/client.go +++ b/les/client.go @@ -154,7 +154,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { checkpoint := config.Checkpoint if checkpoint == nil { - checkpoint = params.TrustedCheckpoints[genesisHash] + if !chainConfig.IsClassic() { + checkpoint = params.TrustedCheckpoints[genesisHash] + } } // Note: NewLightChain adds the trusted checkpoint so it needs an ODR with // indexers already set but not started yet diff --git a/miner/worker.go b/miner/worker.go index c3fca2159452..beabcf6110b7 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -885,7 +885,7 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP // Check whether the tx is replay protected. If we're not in the EIP155 hf // phase, start ignoring the sender until we do. - if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) { + if tx.Protected() && !w.chainConfig.IsProtectedSigner(env.header.Number) { log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block) txs.Pop() diff --git a/params/config.go b/params/config.go index 22b36b7d68e3..92e0a19d2794 100644 --- a/params/config.go +++ b/params/config.go @@ -270,17 +270,17 @@ var ( // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, false, new(EthashConfig), nil} + AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, false, new(EthashConfig), nil, nil, nil, nil} // AllCliqueProtocolChanges contains every protocol change (EIPs) introduced // and accepted by the Ethereum core developers into the Clique consensus. // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, false, nil, &CliqueConfig{Period: 0, Epoch: 30000}} + AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, false, nil, &CliqueConfig{Period: 0, Epoch: 30000}, nil, nil, nil} - TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, false, new(EthashConfig), nil} - NonActivatedConfig = &ChainConfig{big.NewInt(1), nil, nil, false, nil, common.Hash{}, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, new(EthashConfig), nil} + TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, false, new(EthashConfig), nil, nil, nil, nil} + NonActivatedConfig = &ChainConfig{big.NewInt(1), nil, nil, false, nil, common.Hash{}, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, new(EthashConfig), nil, nil, nil, nil} TestRules = TestChainConfig.Rules(new(big.Int), false) ) @@ -386,6 +386,11 @@ type ChainConfig struct { // Various consensus engines Ethash *EthashConfig `json:"ethash,omitempty"` Clique *CliqueConfig `json:"clique,omitempty"` + + // ETC fork values + ETC_EIP155Block *big.Int `json:"etcEIP155Block,omitempty"` // ETC EIP155 HF block + ETC_EIP160Block *big.Int `json:"etcEIP160Block,omitempty"` // ETC EIP160 HF block + ETC_MystiqueBlock *big.Int `json:"etcMystiqueBlock,omitempty"` // ETC Mystique HF block } // EthashConfig is the consensus engine configs for proof-of-work based sealing. @@ -503,6 +508,7 @@ func (c *ChainConfig) IsEIP150(num *big.Int) bool { } // IsEIP155 returns whether num is either equal to the EIP155 fork block or greater. +// meowsbits: DISUSED. func (c *ChainConfig) IsEIP155(num *big.Int) bool { return isForked(c.EIP155Block, num) } @@ -775,6 +781,7 @@ type Rules struct { IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool IsBerlin, IsLondon bool IsMerge, IsShanghai, isCancun bool + IsDieHard, IsMystique bool // ETC forks } // Rules ensures c's ChainID is not nil. @@ -798,5 +805,8 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool) Rules { IsMerge: isMerge, IsShanghai: c.IsShanghai(num), isCancun: c.IsCancun(num), + + IsDieHard: c.IsClassic() && isForked(c.ETC_EIP160Block, num), + IsMystique: c.IsClassic() && isForked(c.ETC_MystiqueBlock, num), } } diff --git a/params/config_classic.go b/params/config_classic.go new file mode 100644 index 000000000000..cb121b5e7aa4 --- /dev/null +++ b/params/config_classic.go @@ -0,0 +1,98 @@ +package params + +import ( + "math" + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +// ClassicChainConfig is the chain parameters to run a node on the Ethereum Classic (ETC) network. +var ClassicChainConfig = &ChainConfig{ + ChainID: big.NewInt(61), + HomesteadBlock: big.NewInt(1_150_000), + DAOForkBlock: big.NewInt(1_920_000), + DAOForkSupport: false, + EIP150Block: big.NewInt(2_500_000), + EIP150Hash: common.HexToHash("0xca12c63534f565899681965528d536c52cb05b7c48e269c2a6cb77ad864d878a"), + EIP155Block: big.NewInt(8_772_000), + EIP158Block: big.NewInt(8_772_000), + ByzantiumBlock: big.NewInt(8_772_000), + ConstantinopleBlock: big.NewInt(9_573_000), + PetersburgBlock: big.NewInt(9_573_000), + IstanbulBlock: big.NewInt(10_500_839), + MuirGlacierBlock: nil, // Difficulty Bomb Delay + BerlinBlock: big.NewInt(13_189_133), + + // LondonBlock + // ETC only takes 2/4 EIPs. + // Excluded: EIP-1559, EIP-TODO. (BaseFee, ...) + // Included: EIP-3541, EIP-3529. (EF-prefixed contracts, Reduction in refunds: SSTORE, SELFDESTRUCT) + // It is important that this field remain nil for ETC. + // The code uses ChainConfig.IsLondon to and LondonBlock checks + // as conditions for a lot of BaseFee and EIP-1559 things. + // Hardcoded overrides for Included EIPs are made with ChainConfig.IsMystique + // and her block. + LondonBlock: nil, + ArrowGlacierBlock: nil, // Difficulty Bomb Delay + GrayGlacierBlock: nil, // Difficulty Bomb Delay + TerminalTotalDifficulty: nil, // n/a + Ethash: new(EthashConfig), + ETC_EIP155Block: big.NewInt(3_000_000), + ETC_EIP160Block: big.NewInt(3_000_000), + ETC_MystiqueBlock: big.NewInt(14_525_000), +} + +// IsClassic returns true if the config's chain id is 61 (ETC). +func (c *ChainConfig) IsClassic() bool { + if c.ChainID == nil { + return false + } + return c.ChainID.Cmp(ClassicChainConfig.ChainID) == 0 +} + +// ECIP1010Block_Classic defines the block number where the ECIP-1010 difficulty bomb delay is activated, +// delaying the bomb for 2M blocks. +var ECIP1010Block_Classic = big.NewInt(3_000_000) + +func (c *ChainConfig) ECIP1010Block() *big.Int { + if c.IsClassic() { + return ECIP1010Block_Classic + } + return nil +} + +// ECIP1041Block_Classic is the ultimate difficulty bomb diffuser block number for the Ethereum Classic network. +var ECIP1041Block_Classic = big.NewInt(5_900_000) + +func (c *ChainConfig) ECIP1041Block() *big.Int { + if c.IsClassic() { + return ECIP1041Block_Classic + } + return nil +} + +// ECIP1017Block_Classic defines the block number where the ECIP-1017 monetary policy is activated, +var ECIP1017Block_Classic = big.NewInt(5_000_000) +var infinity = big.NewInt(math.MaxInt64) + +func (c *ChainConfig) ECIP1017Block() *big.Int { + if c.IsClassic() { + return ECIP1017Block_Classic + } + return infinity +} + +// ECIP1099Block_Classic defines the block number where the ECIP-1099 Etchash PoW algorithm is activated. +var ECIP1099Block_Classic = big.NewInt(11_700_000) + +func (c *ChainConfig) IsProtectedSigner(num *big.Int) bool { + if c.IsClassic() { + return isForked(c.ETC_EIP155Block, num) + } + return isForked(c.EIP155Block, num) +} + +const ClassicDNS = "enrtree://AJE62Q4DUX4QMMXEHCSSCSC65TDHZYSMONSD64P3WULVLSF6MRQ3K@all.classic.blockd.info" + +var ClassicBootnodes = []string{} diff --git a/tests/init.go b/tests/init.go index ef5ea4bb9a9a..2e5b0e19a9d2 100644 --- a/tests/init.go +++ b/tests/init.go @@ -230,6 +230,106 @@ var Forks = map[string]*params.ChainConfig{ MergeNetsplitBlock: big.NewInt(0), TerminalTotalDifficulty: big.NewInt(0), }, + // ETC_Atlantis :: Byzantium + "ETC_Atlantis": { + ChainID: big.NewInt(61), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: nil, + PetersburgBlock: nil, + IstanbulBlock: nil, + MuirGlacierBlock: nil, + BerlinBlock: nil, + LondonBlock: nil, + ArrowGlacierBlock: nil, + TerminalTotalDifficulty: nil, + ETC_EIP155Block: big.NewInt(0), + ETC_EIP160Block: big.NewInt(0), + ETC_MystiqueBlock: nil, + }, + // ETC_Agharta :: Constantinople+Petersburg + "ETC_Agharta": { + ChainID: big.NewInt(61), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: nil, + MuirGlacierBlock: nil, + BerlinBlock: nil, + LondonBlock: nil, + ArrowGlacierBlock: nil, + TerminalTotalDifficulty: nil, + ETC_EIP155Block: big.NewInt(0), + ETC_EIP160Block: big.NewInt(0), + ETC_MystiqueBlock: nil, + }, + // ETC_Magneto :: Istanbul + "ETC_Phoenix": { + ChainID: big.NewInt(61), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: nil, + BerlinBlock: nil, + LondonBlock: nil, + ArrowGlacierBlock: nil, + TerminalTotalDifficulty: nil, + ETC_EIP155Block: big.NewInt(0), + ETC_EIP160Block: big.NewInt(0), + ETC_MystiqueBlock: nil, + }, + // ETC_Magneto :: Berlin + "ETC_Magneto": { + ChainID: big.NewInt(61), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: nil, + BerlinBlock: big.NewInt(0), + LondonBlock: nil, + ArrowGlacierBlock: nil, + TerminalTotalDifficulty: nil, + ETC_EIP155Block: big.NewInt(0), + ETC_EIP160Block: big.NewInt(0), + ETC_MystiqueBlock: nil, + }, + // ETC_Magneto :: London + "ETC_Mystique": { + ChainID: big.NewInt(61), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: nil, + BerlinBlock: big.NewInt(0), + LondonBlock: nil, + ArrowGlacierBlock: nil, + TerminalTotalDifficulty: nil, + ETC_EIP155Block: big.NewInt(0), + ETC_EIP160Block: big.NewInt(0), + ETC_MystiqueBlock: big.NewInt(0), + }, } // AvailableForks returns the set of defined fork names diff --git a/tests/state_classic_test.go b/tests/state_classic_test.go new file mode 100644 index 000000000000..7a8385751b53 --- /dev/null +++ b/tests/state_classic_test.go @@ -0,0 +1,85 @@ +package tests + +import ( + "fmt" + "path/filepath" + "testing" + + "github.com/ethereum/go-ethereum/core/vm" +) + +var ( + baseDirClassic = filepath.Join(".", "testdata-etc") + stateTestDirClassic = filepath.Join(baseDirClassic, "GeneralStateTests") +) + +func TestState_Classic(t *testing.T) { + t.Parallel() + + st := new(testMatcher) + // Long tests: + st.slow(`^stAttackTest/ContractCreationSpam`) + st.slow(`^stBadOpcode/badOpcodes`) + st.slow(`^stPreCompiledContracts/modexp`) + st.slow(`^stQuadraticComplexityTest/`) + st.slow(`^stStaticCall/static_Call50000`) + st.slow(`^stStaticCall/static_Return50000`) + st.slow(`^stSystemOperationsTest/CallRecursiveBomb`) + st.slow(`^stTransactionTest/Opcodes_TransactionInit`) + + // Very time consuming + st.skipLoad(`^stTimeConsuming/`) + st.skipLoad(`.*vmPerformance/loop.*`) + + // Uses 1GB RAM per tested fork + st.skipLoad(`^stStaticCall/static_Call1MB`) + + st.skipLoad(`configs/`) + + // Broken tests: + // Expected failures: + // st.fails(`^stRevertTest/RevertPrecompiledTouch(_storage)?\.json/Byzantium/0`, "bug in test") + // st.fails(`^stRevertTest/RevertPrecompiledTouch(_storage)?\.json/Byzantium/3`, "bug in test") + // st.fails(`^stRevertTest/RevertPrecompiledTouch(_storage)?\.json/Constantinople/0`, "bug in test") + // st.fails(`^stRevertTest/RevertPrecompiledTouch(_storage)?\.json/Constantinople/3`, "bug in test") + // st.fails(`^stRevertTest/RevertPrecompiledTouch(_storage)?\.json/ConstantinopleFix/0`, "bug in test") + // st.fails(`^stRevertTest/RevertPrecompiledTouch(_storage)?\.json/ConstantinopleFix/3`, "bug in test") + + // For Istanbul, older tests were moved into LegacyTests + for _, dir := range []string{ + stateTestDirClassic, + } { + st.walk(t, dir, func(t *testing.T, name string, test *StateTest) { + for _, subtest := range test.Subtests() { + subtest := subtest + key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index) + + t.Run(key+"/trie", func(t *testing.T) { + withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error { + _, _, err := test.Run(subtest, vmconfig, false) + if err != nil && len(test.json.Post[subtest.Fork][subtest.Index].ExpectException) > 0 { + // Ignore expected errors (TODO MariusVanDerWijden check error string) + return nil + } + return st.checkFailure(t, err) + }) + }) + t.Run(key+"/snap", func(t *testing.T) { + withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error { + snaps, statedb, err := test.Run(subtest, vmconfig, true) + if snaps != nil && statedb != nil { + if _, err := snaps.Journal(statedb.IntermediateRoot(false)); err != nil { + return err + } + } + if err != nil && len(test.json.Post[subtest.Fork][subtest.Index].ExpectException) > 0 { + // Ignore expected errors (TODO MariusVanDerWijden check error string) + return nil + } + return st.checkFailure(t, err) + }) + }) + } + }) + } +} diff --git a/tests/testdata-etc b/tests/testdata-etc new file mode 160000 index 000000000000..8183e9eebd93 --- /dev/null +++ b/tests/testdata-etc @@ -0,0 +1 @@ +Subproject commit 8183e9eebd93912cebddbe3e88f66f836a8205dd