Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Fix eip 2464 #573

Open
wants to merge 16 commits into
base: dev-upgrade
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 0 additions & 14 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -118,20 +118,6 @@ jobs:
cd cicd/devnet/terraform
terraform init ${{ env.tf_init_cli_options }}
terraform apply -var "docker_tag=dev-upgrade-${git_hash}" ${{ env.tf_apply_cli_options }}
sleep 5
source .env
for ((i=$us_east_2_start;i<$us_east_2_end;i++)); do
echo "Force deploy xdc-$i"
aws ecs update-service --region us-east-2 --cluster devnet-xdcnode-cluster --service ecs-service-xdc$i --force-new-deployment --no-cli-pager | head -n 10;
done
for ((i=$eu_west_1_start;i<$eu_west_1_end;i++)); do
echo "Force deploy xdc-$i"
aws ecs update-service --region eu-west-1 --cluster devnet-xdcnode-cluster --service ecs-service-xdc$i --force-new-deployment --no-cli-pager | head -n 10;
done
for ((i=$ap_southeast_2_start;i<$ap_southeast_2_end;i++)); do
echo "Force deploy xdc-$i"
aws ecs update-service --region ap-southeast-2 --cluster devnet-xdcnode-cluster --service ecs-service-xdc$i --force-new-deployment --no-cli-pager | head -n 10;
done

rpcnode_terraform_apply:
runs-on: ubuntu-latest
Expand Down
2 changes: 1 addition & 1 deletion cicd/devnet/terraform/.env
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,4 @@ eu_west_1_end=72

# Sydney
ap_southeast_2_start=73
ap_southeast_2_end=108
ap_southeast_2_end=73
12 changes: 0 additions & 12 deletions cicd/terraform/.env
Original file line number Diff line number Diff line change
@@ -1,13 +1 @@
log_level=3

# Ohio
us_east_2_start=0
us_east_2_end=36

# Ireland
eu_west_1_start=37
eu_west_1_end=72

# Sydney
ap_southeast_2_start=73
ap_southeast_2_end=108
11 changes: 5 additions & 6 deletions common/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ var TIP2019Block = big.NewInt(1)
var TIPSigning = big.NewInt(3000000)
var TIPRandomize = big.NewInt(3464000)

var TIPV2SwitchBlock = big.NewInt(99999999900)
var TIPV2SwitchBlock = big.NewInt(78678000) // Target 21st Aug 2024

var TIPIncreaseMasternodes = big.NewInt(5000000) // Upgrade MN Count at Block.
var TIPNoHalvingMNReward = big.NewInt(38383838) // hardfork no halving masternodes reward
Expand All @@ -45,10 +45,9 @@ var TIPXDCX = big.NewInt(38383838)
var TIPXDCXLending = big.NewInt(38383838)
var TIPXDCXCancellationFee = big.NewInt(38383838)
var TIPXDCXCancellationFeeTestnet = big.NewInt(38383838)
var TIPXDCXMinerDisable = big.NewInt(88999999900)
var TIPXDCXReceiverDisable = big.NewInt(99999999999)
var TIPXDCXMinerDisable = big.NewInt(78678000) // Target 21st Aug 2024
var TIPXDCXReceiverDisable = big.NewInt(78678900) // Target 21st Aug 2024, safer to release after disable miner
var Eip1559Block = big.NewInt(9999999999)
var TIPXDCXDISABLE = big.NewInt(99999999900)
var BerlinBlock = big.NewInt(76321000) // Target 19th June 2024
var LondonBlock = big.NewInt(76321000) // Target 19th June 2024
var MergeBlock = big.NewInt(76321000) // Target 19th June 2024
Expand Down Expand Up @@ -85,8 +84,8 @@ var BaseTopUp = big.NewInt(100)
var BaseRecall = big.NewInt(100)
var TIPTRC21Fee = big.NewInt(38383838)
var TIPTRC21FeeTestnet = big.NewInt(38383838)
var BlockNumberGas50x = big.NewInt(99999999900)
var LimitTimeFinality = uint64(30) // limit in 30 block
var BlockNumberGas50x = big.NewInt(78678000) // Target 21st Aug 2024
var LimitTimeFinality = uint64(30) // limit in 30 block

var IgnoreSignerCheckBlockArray = map[uint64]bool{
uint64(1032300): true,
Expand Down
1 change: 0 additions & 1 deletion consensus/XDPoS/engines/engine_v1/engine.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,6 @@ func (x *XDPoS_v1) verifyHeaderWithCache(chain consensus.ChainReader, header *ty
// looking those up from the database. This is useful for concurrently verifying
// a batch of new headers.
func (x *XDPoS_v1) verifyHeader(chain consensus.ChainReader, header *types.Header, parents []*types.Header, fullVerify bool) error {
fullVerify = false
// If we're running a engine faking, accept any block as valid
if x.config.SkipV1Validation {
return nil
Expand Down
5 changes: 4 additions & 1 deletion consensus/XDPoS/engines/engine_v2/mining.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,10 @@ func (x *XDPoS_v2) yourturn(chain consensus.ChainReader, round types.Round, pare
}

for i, s := range masterNodes {
log.Debug("[yourturn] Masternode:", "index", i, "address", s.String(), "parentBlockNum", parent.Number)
// temp remove spam log
_, _ = i, s
// log.Debug("[yourturn] Masternode:", "index", i,"address", s.String(), "parentBlockNum", parent.Number)

}

curIndex := utils.Position(masterNodes, signer)
Expand Down
3 changes: 2 additions & 1 deletion consensus/XDPoS/engines/engine_v2/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,8 @@ func (x *XDPoS_v2) getSnapshot(chain consensus.ChainReader, number uint64, isGap
}

gapBlockHash := chain.GetHeaderByNumber(gapBlockNum).Hash()
log.Debug("get snapshot from gap block", "number", gapBlockNum, "hash", gapBlockHash.Hex())
// temp remove spam log
// log.Debug("get snapshot from gap block", "number", gapBlockNum, "hash", gapBlockHash.Hex())

// If an in-memory SnapshotV2 was found, use that
if s, ok := x.snapshots.Get(gapBlockHash); ok {
Expand Down
26 changes: 22 additions & 4 deletions eth/fetcher/block_fetcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ type chainHeightFn func() uint64
type chainInsertFn func(types.Blocks) (int, error)

// blockInsertFn is a callback type to insert a batch of blocks into the local chain.
type blockInsertFn func(types.Block) (error)
type blockInsertFn func(types.Block) error

type blockPrepareFn func(block *types.Block) error

Expand Down Expand Up @@ -383,6 +383,8 @@ func (f *BlockFetcher) loop() {
f.forgetBlock(hash)
continue
}

log.Info("[2464] [loop] before f.insert", "peer", op.origin, "block", op.block.Number(), "hash", op.block.Hash())
f.insert(op.origin, op.block)
}

Expand Down Expand Up @@ -680,6 +682,7 @@ func (f *BlockFetcher) enqueue(peer string, block *types.Block) {
}
// Ensure the peer isn't DOSing us
count := f.queues[peer] + 1
log.Info("[2464] [enqueue]", "peer", peer, "number", block.Number(), "hash", hash, "count", count)
if count > blockLimit {
log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit)
blockBroadcastDOSMeter.Mark(1)
Expand All @@ -700,6 +703,7 @@ func (f *BlockFetcher) enqueue(peer string, block *types.Block) {
block: block,
}
f.queues[peer] = count
log.Info("[2464] [enqueue] assign count", "peer", peer, "number", block.Number(), "hash", hash, "f.queues[peer]", f.queues[peer])
f.queued[hash] = op
f.knowns.Add(hash, true)
f.queue.Push(op, -int64(block.NumberU64()))
Expand All @@ -717,9 +721,15 @@ func (f *BlockFetcher) insert(peer string, block *types.Block) {
hash := block.Hash()

// Run the import on a new thread
log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash) // for sure passed here
tstart := time.Now()
log.Info("[2464] [insert] start timer")
log.Info("[2464] [insert] blabla", "elapsed", common.PrettyDuration(time.Since(tstart)))
go func() {
defer func() { f.done <- hash }()
defer func() {
log.Info("[2464] [insert] finish, sending to f.done channel", "elapsed", common.PrettyDuration(time.Since(tstart)))
f.done <- hash
}() //this doesn't trigger!

// If the parent's unknown, abort insertion
parent := f.getBlock(block.ParentHash())
Expand All @@ -733,18 +743,21 @@ func (f *BlockFetcher) insert(peer string, block *types.Block) {
// Quickly validate the header and propagate the block if it passes
switch err {
case nil:
log.Info("[2464] [insert] err nil", "elapsed", common.PrettyDuration(time.Since(tstart)))
// All ok, quickly propagate to our peers
blockBroadcastOutTimer.UpdateSince(block.ReceivedAt)
if fastBroadCast {
go f.broadcastBlock(block, true)
}

case consensus.ErrFutureBlock:
log.Info("[2464] [insert] err ErrFutureBlock", "elapsed", common.PrettyDuration(time.Since(tstart)))
delay := time.Unix(block.Time().Int64(), 0).Sub(time.Now()) // nolint: gosimple
log.Info("Receive future block", "number", block.NumberU64(), "hash", block.Hash().Hex(), "delay", delay)
time.Sleep(delay)
goto again
case consensus.ErrNoValidatorSignature:
log.Info("[2464] [insert] err ErrNoValidatorSignature", "elapsed", common.PrettyDuration(time.Since(tstart)))
newBlock := block
var errM2 error
isM2 := false
Expand All @@ -771,13 +784,15 @@ func (f *BlockFetcher) insert(peer string, block *types.Block) {
fastBroadCast = false
goto again //TODO: doublecheck if goto again logic is required
default:
log.Info("[2464] [insert] err default", "elapsed", common.PrettyDuration(time.Since(tstart)))
// Something went very wrong, drop the peer
log.Warn("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
f.dropPeer(peer)
return
}
// Run the actual import and log any issues
if err := f.insertBlock(*block); err != nil {
log.Info("[2464] [insert] f.insertBlock ERROR", "elapsed", common.PrettyDuration(time.Since(tstart)))
log.Warn("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
return
}
Expand Down Expand Up @@ -850,8 +865,11 @@ func (f *BlockFetcher) forgetHash(hash common.Hash) {
// forgetBlock removes all traces of a queued block from the fetcher's internal
// state.
func (f *BlockFetcher) forgetBlock(hash common.Hash) {
if insert := f.queued[hash]; insert != nil {
insert := f.queued[hash]
log.Info("[2464] [forgetBlock]", "insert", insert)
if insert != nil {
f.queues[insert.origin]--
log.Info("[2464] [forgetBlock]", "hash", hash, "peer", insert.origin, "count", f.queues[insert.origin])
if f.queues[insert.origin] == 0 {
delete(f.queues, insert.origin)
}
Expand Down
62 changes: 50 additions & 12 deletions eth/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
return blockchain.CurrentBlock().NumberU64()
}

inserter := func(block types.Block) (error) {
inserter := func(block types.Block) error {
// If sync hasn't reached the checkpoint yet, deny importing weird blocks.
//
// Ideally we would also compare the head block's timestamp and similarly reject
Expand Down Expand Up @@ -925,7 +925,10 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}

}
pm.txFetcher.Enqueue(p.id, txs, msg.Code == PooledTransactionsMsg)
tstart := time.Now()
pm.txpool.AddRemotes(txs)
log.Info("[2464] [handleMst] pm.txpool.AddRemotes", "elapsed", common.PrettyDuration(time.Since(tstart)))
// pm.txFetcher.Enqueue(p.id, txs, msg.Code == PooledTransactionsMsg)

case msg.Code == OrderTxMsg:
// Transactions arrived, make sure we have a valid and fresh chain to handle them
Expand Down Expand Up @@ -1003,7 +1006,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if !exist {
go pm.bft.Vote(p.id, &vote)
} else {
log.Debug("Discarded vote, known vote", "vote hash", vote.Hash(), "voted block hash", vote.ProposedBlockInfo.Hash.Hex(), "number", vote.ProposedBlockInfo.Number, "round", vote.ProposedBlockInfo.Round)
//temp remove spam log
// log.Debug("Discarded vote, known vote", "vote hash", vote.Hash(), "voted block hash", vote.ProposedBlockInfo.Hash.Hex(), "number", vote.ProposedBlockInfo.Number, "round", vote.ProposedBlockInfo.Round)
}

case msg.Code == TimeoutMsg:
Expand Down Expand Up @@ -1128,14 +1132,37 @@ func (pm *ProtocolManager) BroadcastTransactions(txs types.Transactions, propaga
}
}
for peer, hashes := range annos {
if peer.version >= eth65 { //implement
if isEth65OrHigher(peer.version) {
peer.AsyncSendPooledTransactionHashes(hashes)
} else {
peer.AsyncSendTransactions(hashes)
}
}
}

// BroadcastTxs will propagate a batch of transactions to all peers which are not known to
// already have the given transaction.
func (pm *ProtocolManager) BroadcastTxs(txs types.Transactions) {
var txset = make(map[*peer]types.Transactions)

// Broadcast transactions to a batch of peers not knowing about it
for _, tx := range txs {
peers := pm.peers.PeersWithoutTx(tx.Hash())
for _, peer := range peers {
txset[peer] = append(txset[peer], tx)
}
log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
}
// FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
for peer, txs := range txset {
hashes := []common.Hash{}
for _, tx := range txs {
hashes = append(hashes, tx.Hash())
}
peer.AsyncSendTransactions(hashes)
}
}

// BroadcastVote will propagate a Vote to all peers which are not known to
// already have the given vote.
func (pm *ProtocolManager) BroadcastVote(vote *types.Vote) {
Expand Down Expand Up @@ -1225,17 +1252,28 @@ func (self *ProtocolManager) minedBroadcastLoop() {
}

func (pm *ProtocolManager) txBroadcastLoop() {
// for {
// select {
// case event := <-pm.txsCh:
// // For testing purpose only, disable propagation
// if pm.broadcastTxAnnouncesOnly {
// pm.BroadcastTransactions(event.Txs, false)
// continue
// }
// pm.BroadcastTransactions(event.Txs, true) // First propagate transactions to peers
// pm.BroadcastTransactions(event.Txs, false) // Only then announce to the rest

// // Err() channel will be closed when unsubscribing.
// case <-pm.txsSub.Err():
// return
// }
// }
for {
select {
case event := <-pm.txsCh:
// For testing purpose only, disable propagation
if pm.broadcastTxAnnouncesOnly {
pm.BroadcastTransactions(event.Txs, false)
continue
}
pm.BroadcastTransactions(event.Txs, true) // First propagate transactions to peers
pm.BroadcastTransactions(event.Txs, false) // Only then announce to the rest

tstart := time.Now()
pm.BroadcastTxs(event.Txs)
log.Info("[2464] [txBroadcastLoop] pm.BroadcastTxs", "elapsed", common.PrettyDuration(time.Since(tstart)))
// Err() channel will be closed when unsubscribing.
case <-pm.txsSub.Err():
return
Expand Down
26 changes: 3 additions & 23 deletions eth/protocol.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,32 +43,13 @@ const (
// try to follow the exact comparison from go-ethereum as much as possible (eg. version >= 63 <> isEth63OrHigher(version))

func isEth63(version int) bool {
switch {
case version == 63:
return true
case version == 100:
return true
default:
return false
}
return version == xdpos2 || version == eth63
}
func isEth64(version int) bool {
switch {
case version == 64:
return true
default:
return false
}
return version == eth64
}
func isEth65(version int) bool {
switch {
case version == 65:
return true
case version == 101:
return true
default:
return false
}
return version == xdpos22 || version == eth65
}

func isEth63OrHigher(version int) bool {
Expand All @@ -86,7 +67,6 @@ func isEth65OrHigher(version int) bool {
// protocolName is the official short name of the protocol used during capability negotiation.
const protocolName = "eth"

// ProtocolVersions are the supported versions of the eth protocol (first is primary).
var ProtocolVersions = []uint{xdpos22, xdpos2, eth65, eth64, eth63}

// protocolLengths are the number of implemented message corresponding to different protocol versions.
Expand Down
2 changes: 1 addition & 1 deletion internal/ethapi/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -1323,7 +1323,7 @@ func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.Blo
// this makes sure resources are cleaned up.
defer cancel()

block, err := b.BlockByHash(ctx, header.Hash())
block, err := b.BlockByNumberOrHash(ctx, blockNrOrHash)
if err != nil {
return nil, 0, false, err, nil
}
Expand Down
Loading