Skip to content

Commit

Permalink
Merge pull request #480 from c98tristan/feat/get-logs-block-hash
Browse files Browse the repository at this point in the history
Support `eth_getLogs` query with `blockHash` params.
  • Loading branch information
tungng98 authored Nov 24, 2024
2 parents 3b1cdbc + cbbdcff commit 6ea52b8
Show file tree
Hide file tree
Showing 10 changed files with 197 additions and 83 deletions.
38 changes: 24 additions & 14 deletions accounts/abi/bind/backends/simulated.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@ import (
"context"
"errors"
"fmt"
"github.com/tomochain/tomochain/consensus"
"github.com/tomochain/tomochain/core/rawdb"
"math/big"
"sync"
"time"
Expand All @@ -30,9 +28,11 @@ import (
"github.com/tomochain/tomochain/accounts/abi/bind"
"github.com/tomochain/tomochain/common"
"github.com/tomochain/tomochain/common/math"
"github.com/tomochain/tomochain/consensus"
"github.com/tomochain/tomochain/consensus/ethash"
"github.com/tomochain/tomochain/core"
"github.com/tomochain/tomochain/core/bloombits"
"github.com/tomochain/tomochain/core/rawdb"
"github.com/tomochain/tomochain/core/state"
"github.com/tomochain/tomochain/core/types"
"github.com/tomochain/tomochain/core/vm"
Expand Down Expand Up @@ -202,7 +202,7 @@ func (b *SimulatedBackend) CallContract(ctx context.Context, call tomochain.Call
return rval, err
}

//FIXME: please use copyState for this function
// FIXME: please use copyState for this function
// CallContractWithState executes a contract call at the given state.
func (b *SimulatedBackend) CallContractWithState(call tomochain.CallMsg, chain consensus.ChainContext, statedb *state.StateDB) ([]byte, error) {
// Ensure message is initialized properly.
Expand Down Expand Up @@ -285,7 +285,6 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call tomochain.CallM

snapshot := b.pendingState.Snapshot()
_, _, failed, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState)
fmt.Println("EstimateGas",err,failed)
b.pendingState.RevertToSnapshot(snapshot)

if err != nil || failed {
Expand Down Expand Up @@ -377,17 +376,24 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
//
// TODO(karalabe): Deprecate when the subscription one can return past data too.
func (b *SimulatedBackend) FilterLogs(ctx context.Context, query tomochain.FilterQuery) ([]types.Log, error) {
// Initialize unset filter boundaried to run from genesis to chain head
from := int64(0)
if query.FromBlock != nil {
from = query.FromBlock.Int64()
}
to := int64(-1)
if query.ToBlock != nil {
to = query.ToBlock.Int64()
var filter *filters.Filter
if query.BlockHash != nil {
// Block filter requested, construct a single-shot filter
filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics)
} else {
// Initialize unset filter boundaried to run from genesis to chain head
from := int64(0)
if query.FromBlock != nil {
from = query.FromBlock.Int64()
}
to := int64(-1)
if query.ToBlock != nil {
to = query.ToBlock.Int64()
}
// Construct the range filter
filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
}
// Construct and execute the filter
filter := filters.New(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
// Run the filter and return all the logs

logs, err := filter.Logs(ctx)
if err != nil {
Expand Down Expand Up @@ -484,6 +490,10 @@ func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumb
return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil
}

func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
return fb.bc.GetHeaderByHash(hash), nil
}

func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
return core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash)), nil
}
Expand Down
4 changes: 4 additions & 0 deletions eth/api_backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,10 @@ func (b *EthApiBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNum
return b.eth.blockchain.GetHeaderByNumber(uint64(blockNr)), nil
}

func (b *EthApiBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
return b.eth.blockchain.GetHeaderByHash(hash), nil
}

func (b *EthApiBackend) BlockByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Block, error) {
// Pending block is only known by the miner
if blockNr == rpc.PendingBlockNumber {
Expand Down
81 changes: 50 additions & 31 deletions eth/filters/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -268,14 +268,8 @@ func (api *PublicFilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc
}

// FilterCriteria represents a request to create a new filter.
//
// TODO(karalabe): Kill this in favor of ethereum.FilterQuery.
type FilterCriteria struct {
FromBlock *big.Int
ToBlock *big.Int
Addresses []common.Address
Topics [][]common.Hash
}
// Same as ethereum.FilterQuery but with UnmarshalJSON() method.
type FilterCriteria ethereum.FilterQuery

// NewFilter creates a new filter and returns the filter id. It can be
// used to retrieve logs when the state changes. This method cannot be
Expand Down Expand Up @@ -326,15 +320,23 @@ func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
//
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs
func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) {
// Convert the RPC block numbers into internal representations
if crit.FromBlock == nil {
crit.FromBlock = big.NewInt(rpc.LatestBlockNumber.Int64())
}
if crit.ToBlock == nil {
crit.ToBlock = big.NewInt(rpc.LatestBlockNumber.Int64())
var filter *Filter
if crit.BlockHash != nil {
// Block filter requested, construct a single-shot filter
filter = NewBlockFilter(api.backend, *crit.BlockHash, crit.Addresses, crit.Topics)
} else {
// Convert the RPC block numbers into internal representations
begin := rpc.LatestBlockNumber.Int64()
if crit.FromBlock != nil {
begin = crit.FromBlock.Int64()
}
end := rpc.LatestBlockNumber.Int64()
if crit.ToBlock != nil {
end = crit.ToBlock.Int64()
}
// Construct the range filter
filter = NewRangeFilter(api.backend, begin, end, crit.Addresses, crit.Topics)
}
// Create and run the filter to get all the logs
filter := New(api.backend, crit.FromBlock.Int64(), crit.ToBlock.Int64(), crit.Addresses, crit.Topics)

logs, err := filter.Logs(ctx)
if err != nil {
Expand Down Expand Up @@ -373,16 +375,24 @@ func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*ty
return nil, fmt.Errorf("filter not found")
}

begin := rpc.LatestBlockNumber.Int64()
if f.crit.FromBlock != nil {
begin = f.crit.FromBlock.Int64()
}
end := rpc.LatestBlockNumber.Int64()
if f.crit.ToBlock != nil {
end = f.crit.ToBlock.Int64()
var filter *Filter
if f.crit.BlockHash != nil {
// Block filter requested, construct a single-shot filter
filter = NewBlockFilter(api.backend, *f.crit.BlockHash, f.crit.Addresses, f.crit.Topics)
} else {
// Convert the RPC block numbers into internal representations
begin := rpc.LatestBlockNumber.Int64()
if f.crit.FromBlock != nil {
begin = f.crit.FromBlock.Int64()
}
end := rpc.LatestBlockNumber.Int64()
if f.crit.ToBlock != nil {
end = f.crit.ToBlock.Int64()
}
// Construct the range filter
filter = NewRangeFilter(api.backend, begin, end, f.crit.Addresses, f.crit.Topics)
}
// Create and run the filter to get all the logs
filter := New(api.backend, begin, end, f.crit.Addresses, f.crit.Topics)
// Run the filter and return all the logs

logs, err := filter.Logs(ctx)
if err != nil {
Expand Down Expand Up @@ -446,7 +456,8 @@ func returnLogs(logs []*types.Log) []*types.Log {
// UnmarshalJSON sets *args fields with given data.
func (args *FilterCriteria) UnmarshalJSON(data []byte) error {
type input struct {
From *rpc.BlockNumber `json:"fromBlock"`
BlockHash *common.Hash `json:"blockHash"`
FromBlock *rpc.BlockNumber `json:"fromBlock"`
ToBlock *rpc.BlockNumber `json:"toBlock"`
Addresses interface{} `json:"address"`
Topics []interface{} `json:"topics"`
Expand All @@ -457,12 +468,20 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error {
return err
}

if raw.From != nil {
args.FromBlock = big.NewInt(raw.From.Int64())
}
if raw.BlockHash != nil {
if raw.FromBlock != nil || raw.ToBlock != nil {
// BlockHash is mutually exclusive with FromBlock/ToBlock criteria
return fmt.Errorf("cannot specify both BlockHash and FromBlock/ToBlock, choose one or the other")
}
args.BlockHash = raw.BlockHash
} else {
if raw.FromBlock != nil {
args.FromBlock = big.NewInt(raw.FromBlock.Int64())
}

if raw.ToBlock != nil {
args.ToBlock = big.NewInt(raw.ToBlock.Int64())
if raw.ToBlock != nil {
args.ToBlock = big.NewInt(raw.ToBlock.Int64())
}
}

args.Addresses = []common.Address{}
Expand Down
2 changes: 1 addition & 1 deletion eth/filters/api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func TestUnmarshalJSONNewFilterArgs(t *testing.T) {

// from, to block number
var test1 FilterCriteria
vector := fmt.Sprintf(`{"fromBlock":"0x%x","toBlock":"0x%x"}`, fromBlock, toBlock)
vector := fmt.Sprintf(`{"fromBlock":"%v","toBlock":"%v"}`, fromBlock, toBlock)
if err := json.Unmarshal([]byte(vector), &test1); err != nil {
t.Fatal(err)
}
Expand Down
14 changes: 7 additions & 7 deletions eth/filters/bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,14 @@ import (
"bytes"
"context"
"fmt"
"github.com/tomochain/tomochain/core/rawdb"
"testing"
"time"

"github.com/tomochain/tomochain/common"
"github.com/tomochain/tomochain/common/bitutil"
"github.com/tomochain/tomochain/core"
"github.com/tomochain/tomochain/core/bloombits"
"github.com/tomochain/tomochain/core/rawdb"
"github.com/tomochain/tomochain/core/types"
"github.com/tomochain/tomochain/ethdb"
"github.com/tomochain/tomochain/event"
Expand Down Expand Up @@ -68,7 +68,7 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
benchDataDir := node.DefaultDataDir() + "/geth/chaindata"
fmt.Println("Running bloombits benchmark section size:", sectionSize)

db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024,"")
db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "")
if err != nil {
b.Fatalf("error opening database at %v: %v", benchDataDir, err)
}
Expand Down Expand Up @@ -130,13 +130,13 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
for i := 0; i < benchFilterCnt; i++ {
if i%20 == 0 {
db.Close()
db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024,"")
db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "")
backend = &testBackend{mux, db, cnt, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
}
var addr common.Address
addr[0] = byte(i)
addr[1] = byte(i / 256)
filter := New(backend, 0, int64(cnt*sectionSize-1), []common.Address{addr}, nil)
filter := NewRangeFilter(backend, 0, int64(cnt*sectionSize-1), []common.Address{addr}, nil)
if _, err := filter.Logs(context.Background()); err != nil {
b.Error("filter.Find error:", err)
}
Expand All @@ -148,7 +148,7 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
}

func forEachKey(db ethdb.Database, startPrefix, endPrefix []byte, fn func(key []byte)) {
it := db.NewIterator(startPrefix,nil)
it := db.NewIterator(startPrefix, nil)
for it.Next() {
key := it.Key()
cmpLen := len(key)
Expand Down Expand Up @@ -176,7 +176,7 @@ func clearBloomBits(db ethdb.Database) {
func BenchmarkNoBloomBits(b *testing.B) {
benchDataDir := node.DefaultDataDir() + "/geth/chaindata"
fmt.Println("Running benchmark without bloombits")
db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024,"")
db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "")
if err != nil {
b.Fatalf("error opening database at %v: %v", benchDataDir, err)
}
Expand All @@ -192,7 +192,7 @@ func BenchmarkNoBloomBits(b *testing.B) {
start := time.Now()
mux := new(event.TypeMux)
backend := &testBackend{mux, db, 0, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
filter := New(backend, 0, int64(headNum), []common.Address{{}}, nil)
filter := NewRangeFilter(backend, 0, int64(headNum), []common.Address{{}}, nil)
filter.Logs(context.Background())
d := time.Since(start)
fmt.Println("Finished running filter benchmarks")
Expand Down
Loading

0 comments on commit 6ea52b8

Please sign in to comment.