diff --git a/.circleci/config.yml b/.circleci/config.yml index 9c51bc48f06..7ad79354942 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -3,7 +3,7 @@ version: 2 defaults: &defaults working_directory: /go/src/github.com/tendermint/tendermint docker: - - image: circleci/golang:1.12.0 + - image: circleci/golang environment: GOBIN: /tmp/workspace/bin @@ -14,6 +14,9 @@ docs_update_config: &docs_update_config environment: AWS_REGION: us-east-1 +release_management_docker: &release_management_docker + machine: true + jobs: setup_dependencies: <<: *defaults @@ -192,7 +195,7 @@ jobs: name: run localnet and exit on failure command: | set -x - docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang:1.11.4 make build-linux + docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux make localnet-start & ./scripts/localnet-blocks-test.sh 40 5 10 localhost @@ -256,6 +259,105 @@ jobs: echo "Website build started" fi + prepare_build: + <<: *defaults + steps: + - checkout + - run: + name: Get next release number + command: | + export LAST_TAG="`git describe --tags --abbrev=0 --match "${CIRCLE_BRANCH}.*"`" + echo "Last tag: ${LAST_TAG}" + if [ -z "${LAST_TAG}" ]; then + export LAST_TAG="${CIRCLE_BRANCH}" + echo "Last tag not found. Possibly fresh branch or feature branch. Setting ${LAST_TAG} as tag." + fi + export NEXT_TAG="`python -u scripts/release_management/bump-semver.py --version "${LAST_TAG}"`" + echo "Next tag: ${NEXT_TAG}" + echo "export CIRCLE_TAG=\"${NEXT_TAG}\"" > release-version.source + - run: + name: Build dependencies + command: | + make get_tools get_vendor_deps + - persist_to_workspace: + root: . + paths: + - "release-version.source" + - save_cache: + key: v1-release-deps-{{ .Branch }}-{{ .Revision }} + paths: + - "vendor" + + build_artifacts: + <<: *defaults + parallelism: 4 + steps: + - checkout + - restore_cache: + keys: + - v1-release-deps-{{ .Branch }}-{{ .Revision }} + - attach_workspace: + at: /tmp/workspace + - run: + name: Build artifact + command: | + # Setting CIRCLE_TAG because we do not tag the release ourselves. + source /tmp/workspace/release-version.source + if test ${CIRCLE_NODE_INDEX:-0} == 0 ;then export GOOS=linux GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi + if test ${CIRCLE_NODE_INDEX:-0} == 1 ;then export GOOS=darwin GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi + if test ${CIRCLE_NODE_INDEX:-0} == 2 ;then export GOOS=windows GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi + if test ${CIRCLE_NODE_INDEX:-0} == 3 ;then export GOOS=linux GOARCH=arm && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi + - persist_to_workspace: + root: build + paths: + - "*.zip" + - "tendermint_linux_amd64" + + release_artifacts: + <<: *defaults + steps: + - checkout + - attach_workspace: + at: /tmp/workspace + - run: + name: Deploy to GitHub + command: | + # Setting CIRCLE_TAG because we do not tag the release ourselves. + source /tmp/workspace/release-version.source + echo "---" + ls -la /tmp/workspace/*.zip + echo "---" + python -u scripts/release_management/sha-files.py + echo "---" + cat /tmp/workspace/SHA256SUMS + echo "---" + export RELEASE_ID="`python -u scripts/release_management/github-draft.py`" + echo "Release ID: ${RELEASE_ID}" + #Todo: Parallelize uploads + export GOOS=linux GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" + export GOOS=darwin GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" + export GOOS=windows GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" + export GOOS=linux GOARCH=arm && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" + python -u scripts/release_management/github-upload.py --file "/tmp/workspace/SHA256SUMS" --id "${RELEASE_ID}" + python -u scripts/release_management/github-publish.py --id "${RELEASE_ID}" + + release_docker: + <<: *release_management_docker + steps: + - checkout + - attach_workspace: + at: /tmp/workspace + - run: + name: Deploy to Docker Hub + command: | + # Setting CIRCLE_TAG because we do not tag the release ourselves. + source /tmp/workspace/release-version.source + cp /tmp/workspace/tendermint_linux_amd64 DOCKER/tendermint + docker build --label="tendermint" --tag="tendermint/tendermint:${CIRCLE_TAG}" --tag="tendermint/tendermint:latest" "DOCKER" + docker login -u "${DOCKERHUB_USER}" --password-stdin <<< "${DOCKERHUB_PASS}" + docker push "tendermint/tendermint" + docker logout + workflows: version: 2 test-suite: @@ -292,3 +394,25 @@ workflows: - upload_coverage: requires: - test_cover + release: + jobs: + - prepare_build + - build_artifacts: + requires: + - prepare_build + - release_artifacts: + requires: + - prepare_build + - build_artifacts + filters: + branches: + only: + - /v[0-9]+\.[0-9]+/ + - release_docker: + requires: + - prepare_build + - build_artifacts + filters: + branches: + only: + - /v[0-9]+\.[0-9]+/ diff --git a/CHANGELOG.md b/CHANGELOG.md index f0ba675ae43..057b2e7be0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,149 @@ # Changelog +## v0.31.4 + +*April 12th, 2019* + +This release fixes a regression from v0.31.3 which used the peer's `SocketAddr` to add the peer to +the address book. This swallowed the peer's self-reported port which is important in case of reconnect. +It brings back `NetAddress()` to `NodeInfo` and uses it instead of `SocketAddr` for adding peers. +Additionally, it improves response time on the `/validators` or `/status` RPC endpoints. +As a side-effect it makes these RPC endpoint more difficult to DoS and fixes a performance degradation in `ExecCommitBlock`. +Also, it contains an [ADR](https://github.com/tendermint/tendermint/pull/3539) that proposes decoupling the +responsibility for peer behaviour from the `p2p.Switch` (by @brapse). + +Special thanks to external contributors on this release: +@brapse, @guagualvcha, @mydring + +### IMPROVEMENTS: +- [p2p] [\#3463](https://github.com/tendermint/tendermint/pull/3463) Do not log "Can't add peer's address to addrbook" error for a private peer +- [p2p] [\#3547](https://github.com/tendermint/tendermint/pull/3547) Fix a couple of annoying typos (@mdyring) + +### BUG FIXES: + +- [docs] [\#3514](https://github.com/tendermint/tendermint/issues/3514) Fix block.Header.Time description (@melekes) +- [p2p] [\#2716](https://github.com/tendermint/tendermint/issues/2716) Check if we're already connected to peer right before dialing it (@melekes) +- [p2p] [\#3545](https://github.com/tendermint/tendermint/issues/3545) Add back `NetAddress()` to `NodeInfo` and use it instead of peer's `SocketAddr()` when adding a peer to the `PEXReactor` (potential fix for [\#3532](https://github.com/tendermint/tendermint/issues/3532)) +- [state] [\#3438](https://github.com/tendermint/tendermint/pull/3438) + Persist validators every 100000 blocks even if no changes to the set + occurred (@guagualvcha). This + 1) Prevents possible DoS attack using `/validators` or `/status` RPC + endpoints. Before response time was growing linearly with height if no + changes were made to the validator set. + 2) Fixes performance degradation in `ExecCommitBlock` where we call + `LoadValidators` for each `Evidence` in the block. + +## v0.31.3 + +*April 1st, 2019* + +This release includes two security sensitive fixes: it ensures generated private +keys are valid, and it prevents certain DNS lookups that would cause the node to +panic if the lookup failed. + +### BREAKING CHANGES: +* Go API + - [crypto/secp256k1] [\#3439](https://github.com/tendermint/tendermint/issues/3439) + The `secp256k1.GenPrivKeySecp256k1` function has changed to guarantee that it returns a valid key, which means it + will return a different private key than in previous versions for the same secret. + +### BUG FIXES: + +- [crypto/secp256k1] [\#3439](https://github.com/tendermint/tendermint/issues/3439) + Ensure generated private keys are valid by randomly sampling until a valid key is found. + Previously, it was possible (though rare!) to generate keys that exceeded the curve order. + Such keys would lead to invalid signatures. +- [p2p] [\#3522](https://github.com/tendermint/tendermint/issues/3522) Memoize + socket address in peer connections to avoid DNS lookups. Previously, failed + DNS lookups could cause the node to panic. + +## v0.31.2 + +*March 30th, 2019* + +This release fixes a regression from v0.31.1 where Tendermint panics under +mempool load for external ABCI apps. + +Special thanks to external contributors on this release: +@guagualvcha + +### BREAKING CHANGES: + +* CLI/RPC/Config + +* Apps + +* Go API + - [libs/autofile] [\#3504](https://github.com/tendermint/tendermint/issues/3504) Remove unused code in autofile package. Deleted functions: `Group.Search`, `Group.FindLast`, `GroupReader.ReadLine`, `GroupReader.PushLine`, `MakeSimpleSearchFunc` (@guagualvcha) + +* Blockchain Protocol + +* P2P Protocol + +### FEATURES: + +### IMPROVEMENTS: + +- [circle] [\#3497](https://github.com/tendermint/tendermint/issues/3497) Move release management to CircleCI + +### BUG FIXES: + +- [mempool] [\#3512](https://github.com/tendermint/tendermint/issues/3512) Fix panic from concurrent access to txsMap, a regression for external ABCI apps introduced in v0.31.1 + +## v0.31.1 + +*March 27th, 2019* + +This release contains a major improvement for the mempool that reduce the amount of sent data by about 30% +(see some numbers below). +It also fixes a memory leak in the mempool and adds TLS support to the RPC server by providing a certificate and key in the config. + +Special thanks to external contributors on this release: +@brapse, @guagualvcha, @HaoyangLiu, @needkane, @TraceBundy + +### BREAKING CHANGES: + +* CLI/RPC/Config + +* Apps + +* Go API + - [crypto] [\#3426](https://github.com/tendermint/tendermint/pull/3426) Remove `Ripemd160` helper method (@needkane) + - [libs/common] [\#3429](https://github.com/tendermint/tendermint/pull/3429) Remove `RepeatTimer` (also `TimerMaker` and `Ticker` interface) + - [rpc/client] [\#3458](https://github.com/tendermint/tendermint/issues/3458) Include `NetworkClient` interface into `Client` interface + - [types] [\#3448](https://github.com/tendermint/tendermint/issues/3448) Remove method `PB2TM.ConsensusParams` + +* Blockchain Protocol + +* P2P Protocol + +### FEATURES: + + - [rpc] [\#3419](https://github.com/tendermint/tendermint/issues/3419) Start HTTPS server if `rpc.tls_cert_file` and `rpc.tls_key_file` are provided in the config (@guagualvcha) + +### IMPROVEMENTS: + +- [docs] [\#3140](https://github.com/tendermint/tendermint/issues/3140) Formalize proposer election algorithm properties +- [docs] [\#3482](https://github.com/tendermint/tendermint/issues/3482) Fix broken links (@brapse) +- [mempool] [\#2778](https://github.com/tendermint/tendermint/issues/2778) No longer send txs back to peers who sent it to you. +Also, limit to 65536 active peers. +This vastly improves the bandwidth consumption of nodes. +For instance, for a 4 node localnet, in a test sending 250byte txs for 120 sec. at 500 txs/sec (total of 15MB): + - total bytes received from 1st node: + - before: 42793967 (43MB) + - after: 30003256 (30MB) + - total bytes sent to 1st node: + - before: 30569339 (30MB) + - after: 19304964 (19MB) +- [p2p] [\#3475](https://github.com/tendermint/tendermint/issues/3475) Simplify `GetSelectionWithBias` for addressbook (@guagualvcha) +- [rpc/lib/client] [\#3430](https://github.com/tendermint/tendermint/issues/3430) Disable compression for HTTP client to prevent GZIP-bomb DoS attacks (@guagualvcha) + +### BUG FIXES: + +- [blockchain] [\#2699](https://github.com/tendermint/tendermint/issues/2699) Update the maxHeight when a peer is removed +- [mempool] [\#3478](https://github.com/tendermint/tendermint/issues/3478) Fix memory-leak related to `broadcastTxRoutine` (@HaoyangLiu) + + ## v0.31.0 *March 16th, 2019* diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 37ae3a51061..bcb2af5391b 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,4 +1,4 @@ -## v0.32.0 +## v0.31.5 ** diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index 4a855f425cb..6a7f289f54c 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -1,5 +1,5 @@ -FROM alpine:3.7 -MAINTAINER Greg Szabo +FROM alpine:3.9 +LABEL maintainer="hello@tendermint.com" # Tendermint will be looking for the genesis file in /tendermint/config/genesis.json # (unless you change `genesis_file` in config.toml). You can put your config.toml and diff --git a/Makefile b/Makefile index 79ae6aaba75..7c2ce1d9cf2 100644 --- a/Makefile +++ b/Makefile @@ -6,6 +6,7 @@ GOTOOLS = \ github.com/square/certstrap GOBIN?=${GOPATH}/bin PACKAGES=$(shell go list ./...) +OUTPUT?=build/tendermint INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf BUILD_TAGS?='tendermint' @@ -19,13 +20,13 @@ check: check_tools get_vendor_deps ### Build Tendermint build: - CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ + CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ build_c: - CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o build/tendermint ./cmd/tendermint/ + CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o $(OUTPUT) ./cmd/tendermint/ build_race: - CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint + CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint install: CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint @@ -109,7 +110,7 @@ draw_deps: get_deps_bin_size: @# Copy of build recipe with additional flags to perform binary size analysis - $(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ 2>&1)) + $(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ 2>&1)) @find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log @echo "Results can be found here: $(CURDIR)/deps_bin_size.log" @@ -261,7 +262,7 @@ check_dep: ### Docker image build-docker: - cp build/tendermint DOCKER/tendermint + cp $(OUTPUT) DOCKER/tendermint docker build --label=tendermint --tag="tendermint/tendermint" DOCKER rm -rf DOCKER/tendermint diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index b1ce9ff2190..f1c920921ae 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -26,16 +26,17 @@ var _ Client = (*socketClient)(nil) type socketClient struct { cmn.BaseService - reqQueue chan *ReqRes - flushTimer *cmn.ThrottleTimer + addr string mustConnect bool + conn net.Conn + + reqQueue chan *ReqRes + flushTimer *cmn.ThrottleTimer mtx sync.Mutex - addr string - conn net.Conn err error - reqSent *list.List - resCb func(*types.Request, *types.Response) // listens to all callbacks + reqSent *list.List // list of requests sent, waiting for response + resCb func(*types.Request, *types.Response) // called on all requests, if set. } @@ -86,6 +87,7 @@ func (cli *socketClient) OnStop() { cli.mtx.Lock() defer cli.mtx.Unlock() if cli.conn != nil { + // does this really need a mutex? cli.conn.Close() } @@ -207,12 +209,15 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error { reqres.Done() // Release waiters cli.reqSent.Remove(next) // Pop first item from linked list - // Notify reqRes listener if set + // Notify reqRes listener if set (request specific callback). + // NOTE: it is possible this callback isn't set on the reqres object. + // at this point, in which case it will be called after, when it is set. + // TODO: should we move this after the resCb call so the order is always consistent? if cb := reqres.GetCallback(); cb != nil { cb(res) } - // Notify client listener if set + // Notify client listener if set (global callback). if cli.resCb != nil { cli.resCb(reqres.Request, res) } diff --git a/blockchain/pool.go b/blockchain/pool.go index 2cb7dda96e2..c842c0d130e 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -69,7 +69,7 @@ type BlockPool struct { height int64 // the lowest key in requesters. // peers peers map[p2p.ID]*bpPeer - maxPeerHeight int64 + maxPeerHeight int64 // the biggest reported height // atomic numPending int32 // number of requests pending assignment or block response @@ -78,6 +78,8 @@ type BlockPool struct { errorsCh chan<- peerError } +// NewBlockPool returns a new BlockPool with the height equal to start. Block +// requests and errors will be sent to requestsCh and errorsCh accordingly. func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool { bp := &BlockPool{ peers: make(map[p2p.ID]*bpPeer), @@ -93,15 +95,15 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p return bp } +// OnStart implements cmn.Service by spawning requesters routine and recording +// pool's start time. func (pool *BlockPool) OnStart() error { go pool.makeRequestersRoutine() pool.startTime = time.Now() return nil } -func (pool *BlockPool) OnStop() {} - -// Run spawns requesters as needed. +// spawns requesters as needed func (pool *BlockPool) makeRequestersRoutine() { for { if !pool.IsRunning() { @@ -150,6 +152,8 @@ func (pool *BlockPool) removeTimedoutPeers() { } } +// GetStatus returns pool's height, numPending requests and the number of +// requesters. func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -157,6 +161,7 @@ func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequester return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters) } +// IsCaughtUp returns true if this node is caught up, false - otherwise. // TODO: relax conditions, prevent abuse. func (pool *BlockPool) IsCaughtUp() bool { pool.mtx.Lock() @@ -170,8 +175,9 @@ func (pool *BlockPool) IsCaughtUp() bool { // Some conditions to determine if we're caught up. // Ensures we've either received a block or waited some amount of time, - // and that we're synced to the highest known height. Note we use maxPeerHeight - 1 - // because to sync block H requires block H+1 to verify the LastCommit. + // and that we're synced to the highest known height. + // Note we use maxPeerHeight - 1 because to sync block H requires block H+1 + // to verify the LastCommit. receivedBlockOrTimedOut := pool.height > 0 || time.Since(pool.startTime) > 5*time.Second ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= (pool.maxPeerHeight-1) isCaughtUp := receivedBlockOrTimedOut && ourChainIsLongestAmongPeers @@ -260,14 +266,14 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int } } -// MaxPeerHeight returns the highest height reported by a peer. +// MaxPeerHeight returns the highest reported height. func (pool *BlockPool) MaxPeerHeight() int64 { pool.mtx.Lock() defer pool.mtx.Unlock() return pool.maxPeerHeight } -// Sets the peer's alleged blockchain height. +// SetPeerHeight sets the peer's alleged blockchain height. func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -286,6 +292,8 @@ func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) { } } +// RemovePeer removes the peer with peerID from the pool. If there's no peer +// with peerID, function is a no-op. func (pool *BlockPool) RemovePeer(peerID p2p.ID) { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -299,10 +307,32 @@ func (pool *BlockPool) removePeer(peerID p2p.ID) { requester.redo(peerID) } } - if p, exist := pool.peers[peerID]; exist && p.timeout != nil { - p.timeout.Stop() + + peer, ok := pool.peers[peerID] + if ok { + if peer.timeout != nil { + peer.timeout.Stop() + } + + delete(pool.peers, peerID) + + // Find a new peer with the biggest height and update maxPeerHeight if the + // peer's height was the biggest. + if peer.height == pool.maxPeerHeight { + pool.updateMaxPeerHeight() + } + } +} + +// If no peers are left, maxPeerHeight is set to 0. +func (pool *BlockPool) updateMaxPeerHeight() { + var max int64 + for _, peer := range pool.peers { + if peer.height > max { + max = peer.height + } } - delete(pool.peers, peerID) + pool.maxPeerHeight = max } // Pick an available peer with at least the given minHeight. diff --git a/blockchain/pool_test.go b/blockchain/pool_test.go index 75a03f631c1..01d7dba2052 100644 --- a/blockchain/pool_test.go +++ b/blockchain/pool_test.go @@ -1,12 +1,15 @@ package blockchain import ( + "fmt" "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" ) @@ -39,7 +42,9 @@ func (p testPeer) runInputRoutine() { func (p testPeer) simulateInput(input inputData) { block := &types.Block{Header: types.Header{Height: input.request.Height}} input.pool.AddBlock(input.request.PeerID, block, 123) - input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height) + // TODO: uncommenting this creates a race which is detected by: https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856 + // see: https://github.com/tendermint/tendermint/issues/3390#issue-418379890 + // input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height) } type testPeers map[p2p.ID]testPeer @@ -66,7 +71,7 @@ func makePeers(numPeers int, minHeight, maxHeight int64) testPeers { return peers } -func TestBasic(t *testing.T) { +func TestBlockPoolBasic(t *testing.T) { start := int64(42) peers := makePeers(10, start+1, 1000) errorsCh := make(chan peerError, 1000) @@ -122,7 +127,7 @@ func TestBasic(t *testing.T) { } } -func TestTimeout(t *testing.T) { +func TestBlockPoolTimeout(t *testing.T) { start := int64(42) peers := makePeers(10, start+1, 1000) errorsCh := make(chan peerError, 1000) @@ -180,3 +185,40 @@ func TestTimeout(t *testing.T) { } } } + +func TestBlockPoolRemovePeer(t *testing.T) { + peers := make(testPeers, 10) + for i := 0; i < 10; i++ { + peerID := p2p.ID(fmt.Sprintf("%d", i+1)) + height := int64(i + 1) + peers[peerID] = testPeer{peerID, height, make(chan inputData)} + } + requestsCh := make(chan BlockRequest) + errorsCh := make(chan peerError) + + pool := NewBlockPool(1, requestsCh, errorsCh) + pool.SetLogger(log.TestingLogger()) + err := pool.Start() + require.NoError(t, err) + defer pool.Stop() + + // add peers + for peerID, peer := range peers { + pool.SetPeerHeight(peerID, peer.height) + } + assert.EqualValues(t, 10, pool.MaxPeerHeight()) + + // remove not-existing peer + assert.NotPanics(t, func() { pool.RemovePeer(p2p.ID("Superman")) }) + + // remove peer with biggest height + pool.RemovePeer(p2p.ID("10")) + assert.EqualValues(t, 9, pool.MaxPeerHeight()) + + // remove all peers + for peerID := range peers { + pool.RemovePeer(peerID) + } + + assert.EqualValues(t, 0, pool.MaxPeerHeight()) +} diff --git a/config/config.go b/config/config.go index d9628f38376..5ad5e6d59c5 100644 --- a/config/config.go +++ b/config/config.go @@ -339,6 +339,20 @@ type RPCConfig struct { // global HTTP write timeout, which applies to all connections and endpoints. // See https://github.com/tendermint/tendermint/issues/3435 TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout_broadcast_tx_commit"` + + // The name of a file containing certificate that is used to create the HTTPS server. + // + // If the certificate is signed by a certificate authority, + // the certFile should be the concatenation of the server's certificate, any intermediates, + // and the CA's certificate. + // + // NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. + TLSCertFile string `mapstructure:"tls_cert_file"` + + // The name of a file containing matching private key that is used to create the HTTPS server. + // + // NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. + TLSKeyFile string `mapstructure:"tls_key_file"` } // DefaultRPCConfig returns a default configuration for the RPC server @@ -357,6 +371,9 @@ func DefaultRPCConfig() *RPCConfig { MaxSubscriptionClients: 100, MaxSubscriptionsPerClient: 5, TimeoutBroadcastTxCommit: 10 * time.Second, + + TLSCertFile: "", + TLSKeyFile: "", } } @@ -395,6 +412,18 @@ func (cfg *RPCConfig) IsCorsEnabled() bool { return len(cfg.CORSAllowedOrigins) != 0 } +func (cfg RPCConfig) KeyFile() string { + return rootify(filepath.Join(defaultConfigDir, cfg.TLSKeyFile), cfg.RootDir) +} + +func (cfg RPCConfig) CertFile() string { + return rootify(filepath.Join(defaultConfigDir, cfg.TLSCertFile), cfg.RootDir) +} + +func (cfg RPCConfig) IsTLSEnabled() bool { + return cfg.TLSCertFile != "" && cfg.TLSKeyFile != "" +} + //----------------------------------------------------------------------------- // P2PConfig diff --git a/config/toml.go b/config/toml.go index 331c3d0d7a3..63bda09e529 100644 --- a/config/toml.go +++ b/config/toml.go @@ -181,6 +181,17 @@ max_subscriptions_per_client = {{ .RPC.MaxSubscriptionsPerClient }} # See https://github.com/tendermint/tendermint/issues/3435 timeout_broadcast_tx_commit = "{{ .RPC.TimeoutBroadcastTxCommit }}" +# The name of a file containing certificate that is used to create the HTTPS server. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. +tls_cert_file = "{{ .RPC.TLSCertFile }}" + +# The name of a file containing matching private key that is used to create the HTTPS server. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. +tls_key_file = "{{ .RPC.TLSKeyFile }}" + ##### peer to peer configuration options ##### [p2p] diff --git a/consensus/replay.go b/consensus/replay.go index c8ab8a33185..e47d4892a57 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -324,7 +324,7 @@ func (h *Handshaker) ReplayBlocks( } if res.ConsensusParams != nil { - state.ConsensusParams = types.PB2TM.ConsensusParams(res.ConsensusParams, state.ConsensusParams.Block.TimeIotaMs) + state.ConsensusParams = state.ConsensusParams.Update(res.ConsensusParams) } sm.SaveState(h.stateDB, state) } diff --git a/consensus/state_test.go b/consensus/state_test.go index a4d01e5844a..fc1e3e949e5 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -14,7 +14,7 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - p2pdummy "github.com/tendermint/tendermint/p2p/dummy" + p2pmock "github.com/tendermint/tendermint/p2p/mock" "github.com/tendermint/tendermint/types" ) @@ -1548,7 +1548,7 @@ func TestStateHalt1(t *testing.T) { func TestStateOutputsBlockPartsStats(t *testing.T) { // create dummy peer cs, _ := randConsensusState(1) - peer := p2pdummy.NewPeer() + peer := p2pmock.NewPeer(nil) // 1) new block part parts := types.NewPartSetFromData(cmn.RandBytes(100), 10) @@ -1591,7 +1591,7 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { func TestStateOutputVoteStats(t *testing.T) { cs, vss := randConsensusState(2) // create dummy peer - peer := p2pdummy.NewPeer() + peer := p2pmock.NewPeer(nil) vote := signVote(vss[1], types.PrecommitType, []byte("test"), types.PartSetHeader{}) diff --git a/crypto/doc.go b/crypto/doc.go index 41b3f302159..95ae0af1812 100644 --- a/crypto/doc.go +++ b/crypto/doc.go @@ -37,9 +37,6 @@ // sum := crypto.Sha256([]byte("This is Tendermint")) // fmt.Printf("%x\n", sum) -// Ripemd160 -// sum := crypto.Ripemd160([]byte("This is consensus")) -// fmt.Printf("%x\n", sum) package crypto // TODO: Add more docs in here diff --git a/crypto/example_test.go b/crypto/example_test.go index 904e1c6109c..f1d0013d483 100644 --- a/crypto/example_test.go +++ b/crypto/example_test.go @@ -26,10 +26,3 @@ func ExampleSha256() { // Output: // f91afb642f3d1c87c17eb01aae5cb65c242dfdbe7cf1066cc260f4ce5d33b94e } - -func ExampleRipemd160() { - sum := crypto.Ripemd160([]byte("This is Tendermint")) - fmt.Printf("%x\n", sum) - // Output: - // 051e22663e8f0fd2f2302f1210f954adff009005 -} diff --git a/crypto/hash.go b/crypto/hash.go index c1fb41f7a5d..e1d22523f27 100644 --- a/crypto/hash.go +++ b/crypto/hash.go @@ -2,8 +2,6 @@ package crypto import ( "crypto/sha256" - - "golang.org/x/crypto/ripemd160" ) func Sha256(bytes []byte) []byte { @@ -11,9 +9,3 @@ func Sha256(bytes []byte) []byte { hasher.Write(bytes) return hasher.Sum(nil) } - -func Ripemd160(bytes []byte) []byte { - hasher := ripemd160.New() - hasher.Write(bytes) - return hasher.Sum(nil) -} diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index 78857c45c5a..fc64a0b0ff0 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -6,6 +6,7 @@ import ( "crypto/subtle" "fmt" "io" + "math/big" "golang.org/x/crypto/ripemd160" @@ -65,32 +66,61 @@ func (privKey PrivKeySecp256k1) Equals(other crypto.PrivKey) bool { } // GenPrivKey generates a new ECDSA private key on curve secp256k1 private key. -// It uses OS randomness in conjunction with the current global random seed -// in tendermint/libs/common to generate the private key. +// It uses OS randomness to generate the private key. func GenPrivKey() PrivKeySecp256k1 { return genPrivKey(crypto.CReader()) } // genPrivKey generates a new secp256k1 private key using the provided reader. func genPrivKey(rand io.Reader) PrivKeySecp256k1 { - privKeyBytes := [32]byte{} - _, err := io.ReadFull(rand, privKeyBytes[:]) - if err != nil { - panic(err) + var privKeyBytes [32]byte + d := new(big.Int) + for { + privKeyBytes = [32]byte{} + _, err := io.ReadFull(rand, privKeyBytes[:]) + if err != nil { + panic(err) + } + + d.SetBytes(privKeyBytes[:]) + // break if we found a valid point (i.e. > 0 and < N == curverOrder) + isValidFieldElement := 0 < d.Sign() && d.Cmp(secp256k1.S256().N) < 0 + if isValidFieldElement { + break + } } - // crypto.CRandBytes is guaranteed to be 32 bytes long, so it can be - // casted to PrivKeySecp256k1. + return PrivKeySecp256k1(privKeyBytes) } +var one = new(big.Int).SetInt64(1) + // GenPrivKeySecp256k1 hashes the secret with SHA2, and uses // that 32 byte output to create the private key. +// +// It makes sure the private key is a valid field element by setting: +// +// c = sha256(secret) +// k = (c mod (n − 1)) + 1, where n = curve order. +// // NOTE: secret should be the output of a KDF like bcrypt, // if it's derived from user input. func GenPrivKeySecp256k1(secret []byte) PrivKeySecp256k1 { - privKey32 := sha256.Sum256(secret) - // sha256.Sum256() is guaranteed to be 32 bytes long, so it can be - // casted to PrivKeySecp256k1. + secHash := sha256.Sum256(secret) + // to guarantee that we have a valid field element, we use the approach of: + // "Suite B Implementer’s Guide to FIPS 186-3", A.2.1 + // https://apps.nsa.gov/iaarchive/library/ia-guidance/ia-solutions-for-classified/algorithm-guidance/suite-b-implementers-guide-to-fips-186-3-ecdsa.cfm + // see also https://github.com/golang/go/blob/0380c9ad38843d523d9c9804fe300cb7edd7cd3c/src/crypto/ecdsa/ecdsa.go#L89-L101 + fe := new(big.Int).SetBytes(secHash[:]) + n := new(big.Int).Sub(secp256k1.S256().N, one) + fe.Mod(fe, n) + fe.Add(fe, one) + + feB := fe.Bytes() + var privKey32 [32]byte + // copy feB over to fixed 32 byte privKey32 and pad (if necessary) + copy(privKey32[32-len(feB):32], feB) + return PrivKeySecp256k1(privKey32) } diff --git a/crypto/secp256k1/secp256k1_cgo_test.go b/crypto/secp256k1/secp256k1_cgo_test.go new file mode 100644 index 00000000000..edb207b53d1 --- /dev/null +++ b/crypto/secp256k1/secp256k1_cgo_test.go @@ -0,0 +1,39 @@ +// +build libsecp256k1 + +package secp256k1 + +import ( + "github.com/magiconair/properties/assert" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPrivKeySecp256k1SignVerify(t *testing.T) { + msg := []byte("A.1.2 ECC Key Pair Generation by Testing Candidates") + priv := GenPrivKey() + tests := []struct { + name string + privKey PrivKeySecp256k1 + wantSignErr bool + wantVerifyPasses bool + }{ + {name: "valid sign-verify round", privKey: priv, wantSignErr: false, wantVerifyPasses: true}, + {name: "invalid private key", privKey: [32]byte{}, wantSignErr: true, wantVerifyPasses: false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.privKey.Sign(msg) + if tt.wantSignErr { + require.Error(t, err) + t.Logf("Got error: %s", err) + return + } + require.NoError(t, err) + require.NotNil(t, got) + + pub := tt.privKey.PubKey() + assert.Equal(t, tt.wantVerifyPasses, pub.VerifyBytes(msg, got)) + }) + } +} diff --git a/crypto/secp256k1/secp256k1_internal_test.go b/crypto/secp256k1/secp256k1_internal_test.go new file mode 100644 index 00000000000..305f12020d5 --- /dev/null +++ b/crypto/secp256k1/secp256k1_internal_test.go @@ -0,0 +1,45 @@ +package secp256k1 + +import ( + "bytes" + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + underlyingSecp256k1 "github.com/btcsuite/btcd/btcec" +) + +func Test_genPrivKey(t *testing.T) { + + empty := make([]byte, 32) + oneB := big.NewInt(1).Bytes() + onePadded := make([]byte, 32) + copy(onePadded[32-len(oneB):32], oneB) + t.Logf("one padded: %v, len=%v", onePadded, len(onePadded)) + + validOne := append(empty, onePadded...) + tests := []struct { + name string + notSoRand []byte + shouldPanic bool + }{ + {"empty bytes (panics because 1st 32 bytes are zero and 0 is not a valid field element)", empty, true}, + {"curve order: N", underlyingSecp256k1.S256().N.Bytes(), true}, + {"valid because 0 < 1 < N", validOne, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.shouldPanic { + require.Panics(t, func() { + genPrivKey(bytes.NewReader(tt.notSoRand)) + }) + return + } + got := genPrivKey(bytes.NewReader(tt.notSoRand)) + fe := new(big.Int).SetBytes(got[:]) + require.True(t, fe.Cmp(underlyingSecp256k1.S256().N) < 0) + require.True(t, fe.Sign() > 0) + }) + } +} diff --git a/crypto/secp256k1/secp256k1_nocgo_test.go b/crypto/secp256k1/secp256k1_nocgo_test.go index a06a0e3d195..17cb758151a 100644 --- a/crypto/secp256k1/secp256k1_nocgo_test.go +++ b/crypto/secp256k1/secp256k1_nocgo_test.go @@ -6,7 +6,6 @@ import ( "testing" secp256k1 "github.com/btcsuite/btcd/btcec" - "github.com/stretchr/testify/require" ) diff --git a/crypto/secp256k1/secpk256k1_test.go b/crypto/secp256k1/secp256k1_test.go similarity index 74% rename from crypto/secp256k1/secpk256k1_test.go rename to crypto/secp256k1/secp256k1_test.go index 0f0b5adce9c..2488b539997 100644 --- a/crypto/secp256k1/secpk256k1_test.go +++ b/crypto/secp256k1/secp256k1_test.go @@ -2,6 +2,7 @@ package secp256k1_test import ( "encoding/hex" + "math/big" "testing" "github.com/btcsuite/btcutil/base58" @@ -84,3 +85,28 @@ func TestSecp256k1LoadPrivkeyAndSerializeIsIdentity(t *testing.T) { require.Equal(t, privKeyBytes[:], serializedBytes) } } + +func TestGenPrivKeySecp256k1(t *testing.T) { + // curve oder N + N := underlyingSecp256k1.S256().N + tests := []struct { + name string + secret []byte + }{ + {"empty secret", []byte{}}, + {"some long secret", []byte("We live in a society exquisitely dependent on science and technology, in which hardly anyone knows anything about science and technology.")}, + {"another seed used in cosmos tests #1", []byte{0}}, + {"another seed used in cosmos tests #2", []byte("mySecret")}, + {"another seed used in cosmos tests #3", []byte("")}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotPrivKey := secp256k1.GenPrivKeySecp256k1(tt.secret) + require.NotNil(t, gotPrivKey) + // interpret as a big.Int and make sure it is a valid field element: + fe := new(big.Int).SetBytes(gotPrivKey[:]) + require.True(t, fe.Cmp(N) < 0) + require.True(t, fe.Sign() > 0) + }) + } +} diff --git a/docs/architecture/adr-037-peer-behaviour.md b/docs/architecture/adr-037-peer-behaviour.md new file mode 100644 index 00000000000..36b024482d4 --- /dev/null +++ b/docs/architecture/adr-037-peer-behaviour.md @@ -0,0 +1,145 @@ +# ADR 037: Peer Behaviour Interface + +## Changelog +* 07-03-2019: Initial draft + +## Context + +The responsibility for signaling and acting upon peer behaviour lacks a single +owning component and is heavily coupled with the network stack[1](#references). Reactors +maintain a reference to the `p2p.Switch` which they use to call +`switch.StopPeerForError(...)` when a peer misbehaves and +`switch.MarkAsGood(...)` when a peer contributes in some meaningful way. +While the switch handles `StopPeerForError` internally, the `MarkAsGood` +method delegates to another component, `p2p.AddrBook`. This scheme of delegation +across Switch obscures the responsibility for handling peer behaviour +and ties up the reactors in a larger dependency graph when testing. + +## Decision + +Introduce a `PeerBehaviour` interface and concrete implementations which +provide methods for reactors to signal peer behaviour without direct +coupling `p2p.Switch`. Introduce a ErrPeer to provide +concrete reasons for stopping peers. + +### Implementation Changes + +PeerBehaviour then becomes an interface for signaling peer errors as well +as for marking peers as `good`. + +XXX: It might be better to pass p2p.ID instead of the whole peer but as +a first draft maintain the underlying implementation as much as +possible. + +```go +type PeerBehaviour interface { + Errored(peer Peer, reason ErrPeer) + MarkPeerAsGood(peer Peer) +} +``` + +Instead of signaling peers to stop with arbitrary reasons: +`reason interface{}` + +We introduce a concrete error type ErrPeer: +```go +type ErrPeer int + +const ( + ErrPeerUnknown = iota + ErrPeerBadMessage + ErrPeerMessageOutofOrder + ... +) +``` + +As a first iteration we provide a concrete implementation which wraps +the switch: +```go +type SwitchedPeerBehaviour struct { + sw *Switch +} + +func (spb *SwitchedPeerBehaviour) Errored(peer Peer, reason ErrPeer) { + spb.sw.StopPeerForError(peer, reason) +} + +func (spb *SwitchedPeerBehaviour) MarkPeerAsGood(peer Peer) { + spb.sw.MarkPeerAsGood(peer) +} + +func NewSwitchedPeerBehaviour(sw *Switch) *SwitchedPeerBehaviour { + return &SwitchedPeerBehaviour{ + sw: sw, + } +} +``` + +Reactors, which are often difficult to unit test[2](#references). could use an implementation which exposes the signals produced by the reactor in +manufactured scenarios: + +```go +type PeerErrors map[Peer][]ErrPeer +type GoodPeers map[Peer]bool + +type StorePeerBehaviour struct { + pe PeerErrors + gp GoodPeers +} + +func NewStorePeerBehaviour() *StorePeerBehaviour{ + return &StorePeerBehaviour{ + pe: make(PeerErrors), + gp: GoodPeers{}, + } +} + +func (spb StorePeerBehaviour) Errored(peer Peer, reason ErrPeer) { + if _, ok := spb.pe[peer]; !ok { + spb.pe[peer] = []ErrPeer{reason} + } else { + spb.pe[peer] = append(spb.pe[peer], reason) + } +} + +func (mpb *StorePeerBehaviour) GetPeerErrors() PeerErrors { + return mpb.pe +} + +func (spb *StorePeerBehaviour) MarkPeerAsGood(peer Peer) { + if _, ok := spb.gp[peer]; !ok { + spb.gp[peer] = true + } +} + +func (spb *StorePeerBehaviour) GetGoodPeers() GoodPeers { + return spb.gp +} +``` + +## Status + +Proposed + +## Consequences + +### Positive + + * De-couple signaling from acting upon peer behaviour. + * Reduce the coupling of reactors and the Switch and the network + stack + * The responsibility of managing peer behaviour can be migrated to + a single component instead of split between the switch and the + address book. + +### Negative + + * The first iteration will simply wrap the Switch and introduce a + level of indirection. + +### Neutral + +## References + +1. Issue [#2067](https://github.com/tendermint/tendermint/issues/2067): P2P Refactor +2. PR: [#3506](https://github.com/tendermint/tendermint/pull/3506): ADR 036: Blockchain Reactor Refactor diff --git a/docs/networks/docker-compose.md b/docs/networks/docker-compose.md index 7e4adde8d4d..8db49af5e98 100644 --- a/docs/networks/docker-compose.md +++ b/docs/networks/docker-compose.md @@ -4,7 +4,7 @@ With Docker Compose, you can spin up local testnets with a single command. ## Requirements -1. [Install tendermint](/docs/introduction/install.md) +1. [Install tendermint](../introduction/install.md) 2. [Install docker](https://docs.docker.com/engine/installation/) 3. [Install docker-compose](https://docs.docker.com/compose/install/) diff --git a/docs/spec/abci/abci.md b/docs/spec/abci/abci.md index c696c938471..c65d96ec1b5 100644 --- a/docs/spec/abci/abci.md +++ b/docs/spec/abci/abci.md @@ -347,8 +347,10 @@ Commit are included in the header of the next block. - `Version (Version)`: Version of the blockchain and the application - `ChainID (string)`: ID of the blockchain - `Height (int64)`: Height of the block in the chain - - `Time (google.protobuf.Timestamp)`: Time of the block. It is the proposer's - local time when block was created. + - `Time (google.protobuf.Timestamp)`: Time of the previous block. + For heights > 1, it's the weighted median of the timestamps of the valid + votes in the block.LastCommit. + For height == 1, it's genesis time. - `NumTxs (int32)`: Number of transactions in the block - `TotalTxs (int64)`: Total number of transactions in the blockchain until now diff --git a/docs/spec/abci/apps.md b/docs/spec/abci/apps.md index ca6abe7f84e..47e62eed935 100644 --- a/docs/spec/abci/apps.md +++ b/docs/spec/abci/apps.md @@ -31,8 +31,13 @@ states to the latest committed state at once. When `Commit` completes, it unlocks the mempool. -Note that it is not possible to send transactions to Tendermint during `Commit` - if your app -tries to send a `/broadcast_tx` to Tendermint during Commit, it will deadlock. +WARNING: if the ABCI app logic processing the `Commit` message sends a +`/broadcast_tx_sync` or `/broadcast_tx_commit` and waits for the response +before proceeding, it will deadlock. Executing those `broadcast_tx` calls +involves acquiring a lock that is held during the `Commit` call, so it's not +possible. If you make the call to the `broadcast_tx` endpoints concurrently, +that's no problem, it just can't be part of the sequential logic of the +`Commit` function. ### Consensus Connection diff --git a/docs/spec/blockchain/blockchain.md b/docs/spec/blockchain/blockchain.md index 00cccfc2e39..cd31c5dc1e6 100644 --- a/docs/spec/blockchain/blockchain.md +++ b/docs/spec/blockchain/blockchain.md @@ -103,7 +103,7 @@ type PartSetHeader struct { } ``` -See [MerkleRoot](/docs/spec/blockchain/encoding.md#MerkleRoot) for details. +See [MerkleRoot](./encoding.md#MerkleRoot) for details. ## Time @@ -163,7 +163,7 @@ a _precommit_ has `vote.Type == 2`. Signatures in Tendermint are raw bytes representing the underlying signature. -See the [signature spec](/docs/spec/blockchain/encoding.md#key-types) for more. +See the [signature spec](./encoding.md#key-types) for more. ## EvidenceData @@ -190,7 +190,7 @@ type DuplicateVoteEvidence struct { } ``` -See the [pubkey spec](/docs/spec/blockchain/encoding.md#key-types) for more. +See the [pubkey spec](./encoding.md#key-types) for more. ## Validation @@ -209,7 +209,7 @@ the current version of the `state` corresponds to the state after executing transactions from the `prevBlock`. Elements of an object are accessed as expected, ie. `block.Header`. -See the [definition of `State`](/docs/spec/blockchain/state.md). +See the [definition of `State`](./state.md). ### Header @@ -244,7 +244,7 @@ The height is an incrementing integer. The first block has `block.Header.Height ### Time ``` -block.Header.Timestamp >= prevBlock.Header.Timestamp + 1 ms +block.Header.Timestamp >= prevBlock.Header.Timestamp + state.consensusParams.Block.TimeIotaMs block.Header.Timestamp == MedianTime(block.LastCommit, state.LastValidators) ``` @@ -332,6 +332,7 @@ block.ValidatorsHash == MerkleRoot(state.Validators) MerkleRoot of the current validator set that is committing the block. This can be used to validate the `LastCommit` included in the next block. +Note the validators are sorted by their address before computing the MerkleRoot. ### NextValidatorsHash @@ -342,6 +343,7 @@ block.NextValidatorsHash == MerkleRoot(state.NextValidators) MerkleRoot of the next validator set that will be the validator set that commits the next block. This is included so that the current validator set gets a chance to sign the next validator sets Merkle root. +Note the validators are sorted by their address before computing the MerkleRoot. ### ConsensusHash diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md index e8258e4a917..bde580a14ce 100644 --- a/docs/spec/blockchain/encoding.md +++ b/docs/spec/blockchain/encoding.md @@ -339,6 +339,6 @@ type CanonicalVote struct { The field ordering and the fixed sized encoding for the first three fields is optimized to ease parsing of SignBytes in HSMs. It creates fixed offsets for relevant fields that need to be read in this context. -For more details, see the [signing spec](/docs/spec/consensus/signing.md). +For more details, see the [signing spec](../consensus/signing.md). Also, see the motivating discussion in [#1622](https://github.com/tendermint/tendermint/issues/1622). diff --git a/docs/spec/consensus/abci.md b/docs/spec/consensus/abci.md index 82b88161e23..226d228997b 100644 --- a/docs/spec/consensus/abci.md +++ b/docs/spec/consensus/abci.md @@ -1 +1 @@ -[Moved](/docs/spec/software/abci.md) +[Moved](../software/abci.md) diff --git a/docs/spec/consensus/wal.md b/docs/spec/consensus/wal.md index 589680f993c..6146ab9c0bb 100644 --- a/docs/spec/consensus/wal.md +++ b/docs/spec/consensus/wal.md @@ -1 +1 @@ -[Moved](/docs/spec/software/wal.md) +[Moved](../software/wal.md) diff --git a/docs/spec/reactors/consensus/proposer-selection.md b/docs/spec/reactors/consensus/proposer-selection.md index b5e0b35afbc..6cb596ec06e 100644 --- a/docs/spec/reactors/consensus/proposer-selection.md +++ b/docs/spec/reactors/consensus/proposer-selection.md @@ -2,45 +2,290 @@ This document specifies the Proposer Selection Procedure that is used in Tendermint to choose a round proposer. As Tendermint is “leader-based protocol”, the proposer selection is critical for its correct functioning. -Let denote with `proposer_p(h,r)` a process returned by the Proposer Selection Procedure at the process p, at height h -and round r. Then the Proposer Selection procedure should fulfill the following properties: -`Agreement`: Given a validator set V, and two honest validators, -p and q, for each height h, and each round r, -proposer_p(h,r) = proposer_q(h,r) +At a given block height, the proposer selection algorithm runs with the same validator set at each round . +Between heights, an updated validator set may be specified by the application as part of the ABCIResponses' EndBlock. -`Liveness`: In every consecutive sequence of rounds of size K (K is system parameter), at least a -single round has an honest proposer. +## Requirements for Proposer Selection -`Fairness`: The proposer selection is proportional to the validator voting power, i.e., a validator with more -voting power is selected more frequently, proportional to its power. More precisely, given a set of processes -with the total voting power N, during a sequence of rounds of size N, every process is proposer in a number of rounds -equal to its voting power. +This sections covers the requirements with Rx being mandatory and Ox optional requirements. +The following requirements must be met by the Proposer Selection procedure: -We now look at a few particular cases to understand better how fairness should be implemented. -If we have 4 processes with the following voting power distribution (p0,4), (p1, 2), (p2, 2), (p3, 2) at some round r, -we have the following sequence of proposer selections in the following rounds: +#### R1: Determinism +Given a validator set `V`, and two honest validators `p` and `q`, for each height `h` and each round `r` the following must hold: -`p0, p1, p2, p3, p0, p0, p1, p2, p3, p0, p0, p1, p2, p3, p0, p0, p1, p2, p3, p0, etc` + `proposer_p(h,r) = proposer_q(h,r)` -Let consider now the following scenario where a total voting power of faulty processes is aggregated in a single process -p0: (p0,3), (p1, 1), (p2, 1), (p3, 1), (p4, 1), (p5, 1), (p6, 1), (p7, 1). -In this case the sequence of proposer selections looks like this: +where `proposer_p(h,r)` is the proposer returned by the Proposer Selection Procedure at process `p`, at height `h` and round `r`. -`p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, etc` +#### R2: Fairness +Given a validator set with total voting power P and a sequence S of elections. In any sub-sequence of S with length C*P, a validator v must be elected as proposer P/VP(v) times, i.e. with frequency: -In this case, we see that a number of rounds coordinated by a faulty process is proportional to its voting power. -We consider also the case where we have voting power uniformly distributed among processes, i.e., we have 10 processes -each with voting power of 1. And let consider that there are 3 faulty processes with consecutive addresses, -for example the first 3 processes are faulty. Then the sequence looks like this: + f(v) ~ VP(v) / P -`p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, etc` +where C is a tolerance factor for validator set changes with following values: +- C == 1 if there are no validator set changes +- C ~ k when there are validator changes -In this case, we have 3 consecutive rounds with a faulty proposer. -One special case we consider is the case where a single honest process p0 has most of the voting power, for example: -(p0,100), (p1, 2), (p2, 3), (p3, 4). Then the sequence of proposer selection looks like this: +*[this needs more work]* -p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p1, p0, p0, p0, p0, p0, etc +### Basic Algorithm -This basically means that almost all rounds have the same proposer. But in this case, the process p0 has anyway enough -voting power to decide whatever he wants, so the fact that he coordinates almost all rounds seems correct. +At its core, the proposer selection procedure uses a weighted round-robin algorithm. + +A model that gives a good intuition on how/ why the selection algorithm works and it is fair is that of a priority queue. The validators move ahead in this queue according to their voting power (the higher the voting power the faster a validator moves towards the head of the queue). When the algorithm runs the following happens: +- all validators move "ahead" according to their powers: for each validator, increase the priority by the voting power +- first in the queue becomes the proposer: select the validator with highest priority +- move the proposer back in the queue: decrease the proposer's priority by the total voting power + +Notation: +- vset - the validator set +- n - the number of validators +- VP(i) - voting power of validator i +- A(i) - accumulated priority for validator i +- P - total voting power of set +- avg - average of all validator priorities +- prop - proposer + +Simple view at the Selection Algorithm: + +``` + def ProposerSelection (vset): + + // compute priorities and elect proposer + for each validator i in vset: + A(i) += VP(i) + prop = max(A) + A(prop) -= P +``` + +### Stable Set + +Consider the validator set: + +Validator | p1| p2 +----------|---|--- +VP | 1 | 3 + +Assuming no validator changes, the following table shows the proposer priority computation over a few runs. Four runs of the selection procedure are shown, starting with the 5th the same values are computed. +Each row shows the priority queue and the process place in it. The proposer is the closest to the head, the rightmost validator. As priorities are updated, the validators move right in the queue. The proposer moves left as its priority is reduced after election. + +|Priority Run | -2| -1| 0 | 1| 2 | 3 | 4 | 5 | Alg step +|--------------- |---|---|---- |---|---- |---|---|---|-------- +| | | |p1,p2| | | | | |Initialized to 0 +|run 1 | | | | p1| | p2| | |A(i)+=VP(i) +| | | p2| | p1| | | | |A(p2)-= P +|run 2 | | | | |p1,p2| | | |A(i)+=VP(i) +| | p1| | | | p2| | | |A(p1)-= P +|run 3 | | p1| | | | | | p2|A(i)+=VP(i) +| | | p1| | p2| | | | |A(p2)-= P +|run 4 | | | p1| | | | p2| |A(i)+=VP(i) +| | | |p1,p2| | | | | |A(p2)-= P + +It can be shown that: +- At the end of each run k+1 the sum of the priorities is the same as at end of run k. If a new set's priorities are initialized to 0 then the sum of priorities will be 0 at each run while there are no changes. +- The max distance between priorites is (n-1) * P. *[formal proof not finished]* + +### Validator Set Changes +Between proposer selection runs the validator set may change. Some changes have implications on the proposer election. + +#### Voting Power Change +Consider again the earlier example and assume that the voting power of p1 is changed to 4: + +Validator | p1| p2 +----------|---| --- +VP | 4 | 3 + +Let's also assume that before this change the proposer priorites were as shown in first row (last run). As it can be seen, the selection could run again, without changes, as before. + +|Priority Run| -2 | -1 | 0 | 1 | 2 | Comment +|--------------| ---|--- |------|--- |--- |-------- +| last run | | p2 | | p1 | |__update VP(p1)__ +| next run | | | | | p2 |A(i)+=VP(i) +| | p1 | | | | p2 |A(p1)-= P + +However, when a validator changes power from a high to a low value, some other validator remain far back in the queue for a long time. This scenario is considered again in the Proposer Priority Range section. + +As before: +- At the end of each run k+1 the sum of the priorities is the same as at run k. +- The max distance between priorites is (n-1) * P. + +#### Validator Removal +Consider a new example with set: + +Validator | p1 | p2 | p3 | +--------- |--- |--- |--- | +VP | 1 | 2 | 3 | + +Let's assume that after the last run the proposer priorities were as shown in first row with their sum being 0. After p2 is removed, at the end of next proposer selection run (penultimate row) the sum of priorities is -2 (minus the priority of the removed process). + +The procedure could continue without modifications. However, after a sufficiently large number of modifications in validator set, the priority values would migrate towards maximum or minimum allowed values causing truncations due to overflow detection. +For this reason, the selection procedure adds another __new step__ that centers the current priority values such that the priority sum remains close to 0. + +|Priority Run |-3 | -2 | -1 | 0 | 1 | 2 | 4 |Comment +|--------------- |--- | ---|--- |--- |--- |--- |---|-------- +| last run |p3 | | | | p1 | p2 | |__remove p2__ +| nextrun | | | | | | | | +| __new step__ | | p3 | | | | p1 | |A(i) -= avg, avg = -1 +| | | | | | p3 | p1 | |A(i)+=VP(i) +| | | | p1 | | p3 | | |A(p1)-= P + +The modified selection algorithm is: + + def ProposerSelection (vset): + + // center priorities around zero + avg = sum(A(i) for i in vset)/len(vset) + for each validator i in vset: + A(i) -= avg + + // compute priorities and elect proposer + for each validator i in vset: + A(i) += VP(i) + prop = max(A) + A(prop) -= P + +Observations: +- The sum of priorities is now close to 0. Due to integer division the sum is an integer in (-n, n), where n is the number of validators. + +#### New Validator +When a new validator is added, same problem as the one described for removal appears, the sum of priorities in the new set is not zero. This is fixed with the centering step introduced above. + +One other issue that needs to be addressed is the following. A validator V that has just been elected is moved to the end of the queue. If the validator set is large and/ or other validators have significantly higher power, V will have to wait many runs to be elected. If V removes and re-adds itself to the set, it would make a significant (albeit unfair) "jump" ahead in the queue. + +In order to prevent this, when a new validator is added, its initial priority is set to: + + A(V) = -1.125 * P + +where P is the total voting power of the set including V. + +Curent implementation uses the penalty factor of 1.125 because it provides a small punishment that is efficient to calculate. See [here](https://github.com/tendermint/tendermint/pull/2785#discussion_r235038971) for more details. + +If we consider the validator set where p3 has just been added: + +Validator | p1 | p2 | p3 +----------|--- |--- |--- +VP | 1 | 3 | 8 + +then p3 will start with proposer priority: + + A(p3) = -1.125 * (1 + 3 + 8) ~ -13 + +Note that since current computation uses integer division there is penalty loss when sum of the voting power is less than 8. + +In the next run, p3 will still be ahead in the queue, elected as proposer and moved back in the queue. + +|Priority Run |-13 | -9 | -5 | -2 | -1 | 0 | 1 | 2 | 5 | 6 | 7 |Alg step +|---------------|--- |--- |--- |----|--- |--- |---|---|---|---|---|-------- +|last run | | | | p2 | | | | p1| | | |__add p3__ +| | p3 | | | p2 | | | | p1| | | |A(p3) = -4 +|next run | | p3 | | | | | | p2| | p1| |A(i) -= avg, avg = -4 +| | | | | | p3 | | | | p2| | p1|A(i)+=VP(i) +| | | | p1 | | p3 | | | | p2| | |A(p1)-=P + +### Proposer Priority Range +With the introduction of centering, some interesting cases occur. Low power validators that bind early in a set that includes high power validator(s) benefit from subsequent additions to the set. This is because these early validators run through more right shift operations during centering, operations that increase their priority. + +As an example, consider the set where p2 is added after p1, with priority -1.125 * 80k = -90k. After the selection procedure runs once: + +Validator | p1 | p2 | Comment +----------|-----|---- |--- +VP | 80k | 10 | +A | 0 |-90k | __added p2__ +A |-45k | 45k | __run selection__ + +Then execute the following steps: + +1. Add a new validator p3: + +Validator | p1 | p2 | p3 +----------|-----|--- |---- +VP | 80k | 10 | 10 + +2. Run selection once. The notation '..p'/'p..' means very small deviations compared to column priority. + +|Priority Run | -90k..| -60k | -45k | -15k| 0 | 45k | 75k | 155k | Comment +|--------------|------ |----- |------- |---- |---|---- |----- |------- |--------- +| last run | p3 | | p2 | | | p1 | | | __added p3__ +| next run +| *right_shift*| | p3 | | p2 | | | p1 | | A(i) -= avg,avg=-30k +| | | ..p3| | ..p2| | | | p1 | A(i)+=VP(i) +| | | ..p3| | ..p2| | | p1.. | | A(p1)-=P, P=80k+20 + + +3. Remove p1 and run selection once: + +Validator | p3 | p2 | Comment +----------|----- |---- |-------- +VP | 10 | 10 | +A |-60k |-15k | +A |-22.5k|22.5k| __run selection__ + +At this point, while the total voting power is 20, the distance between priorities is 45k. It will take 4500 runs for p3 to catch up with p2. + +In order to prevent these types of scenarios, the selection algorithm performs scaling of priorities such that the difference between min and max values is smaller than two times the total voting power. + +The modified selection algorithm is: + + def ProposerSelection (vset): + + // scale the priority values + diff = max(A)-min(A) + threshold = 2 * P + if diff > threshold: + scale = diff/threshold + for each validator i in vset: + A(i) = A(i)/scale + + // center priorities around zero + avg = sum(A(i) for i in vset)/len(vset) + for each validator i in vset: + A(i) -= avg + + // compute priorities and elect proposer + for each validator i in vset: + A(i) += VP(i) + prop = max(A) + A(prop) -= P + +Observations: +- With this modification, the maximum distance between priorites becomes 2 * P. + +Note also that even during steady state the priority range may increase beyond 2 * P. The scaling introduced here helps to keep the range bounded. + +### Wrinkles + +#### Validator Power Overflow Conditions +The validator voting power is a positive number stored as an int64. When a validator is added the `1.125 * P` computation must not overflow. As a consequence the code handling validator updates (add and update) checks for overflow conditions making sure the total voting power is never larger than the largest int64 `MAX`, with the property that `1.125 * MAX` is still in the bounds of int64. Fatal error is return when overflow condition is detected. + +#### Proposer Priority Overflow/ Underflow Handling +The proposer priority is stored as an int64. The selection algorithm performs additions and subtractions to these values and in the case of overflows and underflows it limits the values to: + + MaxInt64 = 1 << 63 - 1 + MinInt64 = -1 << 63 + +### Requirement Fulfillment Claims +__[R1]__ + +The proposer algorithm is deterministic giving consistent results across executions with same transactions and validator set modifications. +[WIP - needs more detail] + +__[R2]__ + +Given a set of processes with the total voting power P, during a sequence of elections of length P, the number of times any process is selected as proposer is equal to its voting power. The sequence of the P proposers then repeats. If we consider the validator set: + +Validator | p1| p2 +----------|---|--- +VP | 1 | 3 + +With no other changes to the validator set, the current implementation of proposer selection generates the sequence: +`p2, p1, p2, p2, p2, p1, p2, p2,...` or [`p2, p1, p2, p2`]* +A sequence that starts with any circular permutation of the [`p2, p1, p2, p2`] sub-sequence would also provide the same degree of fairness. In fact these circular permutations show in the sliding window (over the generated sequence) of size equal to the length of the sub-sequence. + +Assigning priorities to each validator based on the voting power and updating them at each run ensures the fairness of the proposer selection. In addition, every time a validator is elected as proposer its priority is decreased with the total voting power. + +Intuitively, a process v jumps ahead in the queue at most (max(A) - min(A))/VP(v) times until it reaches the head and is elected. The frequency is then: + + f(v) ~ VP(v)/(max(A)-min(A)) = 1/k * VP(v)/P + +For current implementation, this means v should be proposer at least VP(v) times out of k * P runs, with scaling factor k=2. diff --git a/docs/spec/reactors/mempool/reactor.md b/docs/spec/reactors/mempool/reactor.md index fa25eeb3eae..d349fc7cc23 100644 --- a/docs/spec/reactors/mempool/reactor.md +++ b/docs/spec/reactors/mempool/reactor.md @@ -12,3 +12,11 @@ for details. Sending incorrectly encoded data or data exceeding `maxMsgSize` will result in stopping the peer. + +The mempool will not send a tx back to any peer which it received it from. + +The reactor assigns an `uint16` number for each peer and maintains a map from +p2p.ID to `uint16`. Each mempool transaction carries a list of all the senders +(`[]uint16`). The list is updated every time mempool receives a transaction it +is already seen. `uint16` assumes that a node will never have over 65535 active +peers (0 is reserved for unknown source - e.g. RPC). diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md index aa275c7a14e..d19c272fcad 100644 --- a/docs/tendermint-core/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -127,6 +127,17 @@ max_subscriptions_per_client = 5 # See https://github.com/tendermint/tendermint/issues/3435 timeout_broadcast_tx_commit = "10s" +# The name of a file containing certificate that is used to create the HTTPS server. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. +tls_cert_file = "" + +# The name of a file containing matching private key that is used to create the HTTPS server. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. +tls_key_file = "" + ##### peer to peer configuration options ##### [p2p] diff --git a/libs/autofile/group.go b/libs/autofile/group.go index d1ea0de75aa..ce73466e41d 100644 --- a/libs/autofile/group.go +++ b/libs/autofile/group.go @@ -331,172 +331,6 @@ func (g *Group) NewReader(index int) (*GroupReader, error) { return r, nil } -// Returns -1 if line comes after, 0 if found, 1 if line comes before. -type SearchFunc func(line string) (int, error) - -// Searches for the right file in Group, then returns a GroupReader to start -// streaming lines. -// Returns true if an exact match was found, otherwise returns the next greater -// line that starts with prefix. -// CONTRACT: Caller must close the returned GroupReader -func (g *Group) Search(prefix string, cmp SearchFunc) (*GroupReader, bool, error) { - g.mtx.Lock() - minIndex, maxIndex := g.minIndex, g.maxIndex - g.mtx.Unlock() - // Now minIndex/maxIndex may change meanwhile, - // but it shouldn't be a big deal - // (maybe we'll want to limit scanUntil though) - - for { - curIndex := (minIndex + maxIndex + 1) / 2 - - // Base case, when there's only 1 choice left. - if minIndex == maxIndex { - r, err := g.NewReader(maxIndex) - if err != nil { - return nil, false, err - } - match, err := scanUntil(r, prefix, cmp) - if err != nil { - r.Close() - return nil, false, err - } - return r, match, err - } - - // Read starting roughly at the middle file, - // until we find line that has prefix. - r, err := g.NewReader(curIndex) - if err != nil { - return nil, false, err - } - foundIndex, line, err := scanNext(r, prefix) - r.Close() - if err != nil { - return nil, false, err - } - - // Compare this line to our search query. - val, err := cmp(line) - if err != nil { - return nil, false, err - } - if val < 0 { - // Line will come later - minIndex = foundIndex - } else if val == 0 { - // Stroke of luck, found the line - r, err := g.NewReader(foundIndex) - if err != nil { - return nil, false, err - } - match, err := scanUntil(r, prefix, cmp) - if !match { - panic("Expected match to be true") - } - if err != nil { - r.Close() - return nil, false, err - } - return r, true, err - } else { - // We passed it - maxIndex = curIndex - 1 - } - } - -} - -// Scans and returns the first line that starts with 'prefix' -// Consumes line and returns it. -func scanNext(r *GroupReader, prefix string) (int, string, error) { - for { - line, err := r.ReadLine() - if err != nil { - return 0, "", err - } - if !strings.HasPrefix(line, prefix) { - continue - } - index := r.CurIndex() - return index, line, nil - } -} - -// Returns true iff an exact match was found. -// Pushes line, does not consume it. -func scanUntil(r *GroupReader, prefix string, cmp SearchFunc) (bool, error) { - for { - line, err := r.ReadLine() - if err != nil { - return false, err - } - if !strings.HasPrefix(line, prefix) { - continue - } - val, err := cmp(line) - if err != nil { - return false, err - } - if val < 0 { - continue - } else if val == 0 { - r.PushLine(line) - return true, nil - } else { - r.PushLine(line) - return false, nil - } - } -} - -// Searches backwards for the last line in Group with prefix. -// Scans each file forward until the end to find the last match. -func (g *Group) FindLast(prefix string) (match string, found bool, err error) { - g.mtx.Lock() - minIndex, maxIndex := g.minIndex, g.maxIndex - g.mtx.Unlock() - - r, err := g.NewReader(maxIndex) - if err != nil { - return "", false, err - } - defer r.Close() - - // Open files from the back and read -GROUP_LOOP: - for i := maxIndex; i >= minIndex; i-- { - err := r.SetIndex(i) - if err != nil { - return "", false, err - } - // Scan each line and test whether line matches - for { - line, err := r.ReadLine() - if err == io.EOF { - if found { - return match, found, nil - } - continue GROUP_LOOP - } else if err != nil { - return "", false, err - } - if strings.HasPrefix(line, prefix) { - match = line - found = true - } - if r.CurIndex() > i { - if found { - return match, found, nil - } - continue GROUP_LOOP - } - } - } - - return -} - // GroupInfo holds information about the group. type GroupInfo struct { MinIndex int // index of the first file in the group, including head @@ -654,48 +488,6 @@ func (gr *GroupReader) Read(p []byte) (n int, err error) { } } -// ReadLine reads a line (without delimiter). -// just return io.EOF if no new lines found. -func (gr *GroupReader) ReadLine() (string, error) { - gr.mtx.Lock() - defer gr.mtx.Unlock() - - // From PushLine - if gr.curLine != nil { - line := string(gr.curLine) - gr.curLine = nil - return line, nil - } - - // Open file if not open yet - if gr.curReader == nil { - err := gr.openFile(gr.curIndex) - if err != nil { - return "", err - } - } - - // Iterate over files until line is found - var linePrefix string - for { - bytesRead, err := gr.curReader.ReadBytes('\n') - if err == io.EOF { - // Open the next file - if err1 := gr.openFile(gr.curIndex + 1); err1 != nil { - return "", err1 - } - if len(bytesRead) > 0 && bytesRead[len(bytesRead)-1] == byte('\n') { - return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil - } - linePrefix += string(bytesRead) - continue - } else if err != nil { - return "", err - } - return linePrefix + string(bytesRead[:len(bytesRead)-1]), nil - } -} - // IF index > gr.Group.maxIndex, returns io.EOF // CONTRACT: caller should hold gr.mtx func (gr *GroupReader) openFile(index int) error { @@ -725,20 +517,6 @@ func (gr *GroupReader) openFile(index int) error { return nil } -// PushLine makes the given line the current one, so the next time somebody -// calls ReadLine, this line will be returned. -// panics if called twice without calling ReadLine. -func (gr *GroupReader) PushLine(line string) { - gr.mtx.Lock() - defer gr.mtx.Unlock() - - if gr.curLine == nil { - gr.curLine = []byte(line) - } else { - panic("PushLine failed, already have line") - } -} - // CurIndex returns cursor's file index. func (gr *GroupReader) CurIndex() int { gr.mtx.Lock() @@ -753,32 +531,3 @@ func (gr *GroupReader) SetIndex(index int) error { defer gr.mtx.Unlock() return gr.openFile(index) } - -//-------------------------------------------------------------------------------- - -// A simple SearchFunc that assumes that the marker is of form -// . -// For example, if prefix is '#HEIGHT:', the markers of expected to be of the form: -// -// #HEIGHT:1 -// ... -// #HEIGHT:2 -// ... -func MakeSimpleSearchFunc(prefix string, target int) SearchFunc { - return func(line string) (int, error) { - if !strings.HasPrefix(line, prefix) { - return -1, fmt.Errorf("Marker line did not have prefix: %v", prefix) - } - i, err := strconv.Atoi(line[len(prefix):]) - if err != nil { - return -1, fmt.Errorf("Failed to parse marker line: %v", err.Error()) - } - if target < i { - return 1, nil - } else if target == i { - return 0, nil - } else { - return -1, nil - } - } -} diff --git a/libs/autofile/group_test.go b/libs/autofile/group_test.go index 68870df87c4..c300aba7179 100644 --- a/libs/autofile/group_test.go +++ b/libs/autofile/group_test.go @@ -1,13 +1,9 @@ package autofile import ( - "errors" - "fmt" "io" "io/ioutil" "os" - "strconv" - "strings" "testing" "github.com/stretchr/testify/assert" @@ -106,107 +102,6 @@ func TestCheckHeadSizeLimit(t *testing.T) { destroyTestGroup(t, g) } -func TestSearch(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 10*1000) - - // Create some files in the group that have several INFO lines in them. - // Try to put the INFO lines in various spots. - for i := 0; i < 100; i++ { - // The random junk at the end ensures that this INFO linen - // is equally likely to show up at the end. - _, err := g.Head.Write([]byte(fmt.Sprintf("INFO %v %v\n", i, cmn.RandStr(123)))) - require.NoError(t, err, "Failed to write to head") - g.checkHeadSizeLimit() - for j := 0; j < 10; j++ { - _, err1 := g.Head.Write([]byte(cmn.RandStr(123) + "\n")) - require.NoError(t, err1, "Failed to write to head") - g.checkHeadSizeLimit() - } - } - - // Create a search func that searches for line - makeSearchFunc := func(target int) SearchFunc { - return func(line string) (int, error) { - parts := strings.Split(line, " ") - if len(parts) != 3 { - return -1, errors.New("Line did not have 3 parts") - } - i, err := strconv.Atoi(parts[1]) - if err != nil { - return -1, errors.New("Failed to parse INFO: " + err.Error()) - } - if target < i { - return 1, nil - } else if target == i { - return 0, nil - } else { - return -1, nil - } - } - } - - // Now search for each number - for i := 0; i < 100; i++ { - gr, match, err := g.Search("INFO", makeSearchFunc(i)) - require.NoError(t, err, "Failed to search for line, tc #%d", i) - assert.True(t, match, "Expected Search to return exact match, tc #%d", i) - line, err := gr.ReadLine() - require.NoError(t, err, "Failed to read line after search, tc #%d", i) - if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", i)) { - t.Fatalf("Failed to get correct line, tc #%d", i) - } - // Make sure we can continue to read from there. - cur := i + 1 - for { - line, err := gr.ReadLine() - if err == io.EOF { - if cur == 99+1 { - // OK! - break - } else { - t.Fatalf("Got EOF after the wrong INFO #, tc #%d", i) - } - } else if err != nil { - t.Fatalf("Error reading line, tc #%d, err:\n%s", i, err) - } - if !strings.HasPrefix(line, "INFO ") { - continue - } - if !strings.HasPrefix(line, fmt.Sprintf("INFO %v ", cur)) { - t.Fatalf("Unexpected INFO #. Expected %v got:\n%v, tc #%d", cur, line, i) - } - cur++ - } - gr.Close() - } - - // Now search for something that is too small. - // We should get the first available line. - { - gr, match, err := g.Search("INFO", makeSearchFunc(-999)) - require.NoError(t, err, "Failed to search for line") - assert.False(t, match, "Expected Search to not return exact match") - line, err := gr.ReadLine() - require.NoError(t, err, "Failed to read line after search") - if !strings.HasPrefix(line, "INFO 0 ") { - t.Error("Failed to fetch correct line, which is the earliest INFO") - } - err = gr.Close() - require.NoError(t, err, "Failed to close GroupReader") - } - - // Now search for something that is too large. - // We should get an EOF error. - { - gr, _, err := g.Search("INFO", makeSearchFunc(999)) - assert.Equal(t, io.EOF, err) - assert.Nil(t, gr) - } - - // Cleanup - destroyTestGroup(t, g) -} - func TestRotateFile(t *testing.T) { g := createTestGroupWithHeadSizeLimit(t, 0) g.WriteLine("Line 1") @@ -237,100 +132,6 @@ func TestRotateFile(t *testing.T) { destroyTestGroup(t, g) } -func TestFindLast1(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) - - g.WriteLine("Line 1") - g.WriteLine("Line 2") - g.WriteLine("# a") - g.WriteLine("Line 3") - g.FlushAndSync() - g.RotateFile() - g.WriteLine("Line 4") - g.WriteLine("Line 5") - g.WriteLine("Line 6") - g.WriteLine("# b") - g.FlushAndSync() - - match, found, err := g.FindLast("#") - assert.NoError(t, err) - assert.True(t, found) - assert.Equal(t, "# b", match) - - // Cleanup - destroyTestGroup(t, g) -} - -func TestFindLast2(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) - - g.WriteLine("Line 1") - g.WriteLine("Line 2") - g.WriteLine("Line 3") - g.FlushAndSync() - g.RotateFile() - g.WriteLine("# a") - g.WriteLine("Line 4") - g.WriteLine("Line 5") - g.WriteLine("# b") - g.WriteLine("Line 6") - g.FlushAndSync() - - match, found, err := g.FindLast("#") - assert.NoError(t, err) - assert.True(t, found) - assert.Equal(t, "# b", match) - - // Cleanup - destroyTestGroup(t, g) -} - -func TestFindLast3(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) - - g.WriteLine("Line 1") - g.WriteLine("# a") - g.WriteLine("Line 2") - g.WriteLine("# b") - g.WriteLine("Line 3") - g.FlushAndSync() - g.RotateFile() - g.WriteLine("Line 4") - g.WriteLine("Line 5") - g.WriteLine("Line 6") - g.FlushAndSync() - - match, found, err := g.FindLast("#") - assert.NoError(t, err) - assert.True(t, found) - assert.Equal(t, "# b", match) - - // Cleanup - destroyTestGroup(t, g) -} - -func TestFindLast4(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) - - g.WriteLine("Line 1") - g.WriteLine("Line 2") - g.WriteLine("Line 3") - g.FlushAndSync() - g.RotateFile() - g.WriteLine("Line 4") - g.WriteLine("Line 5") - g.WriteLine("Line 6") - g.FlushAndSync() - - match, found, err := g.FindLast("#") - assert.NoError(t, err) - assert.False(t, found) - assert.Empty(t, match) - - // Cleanup - destroyTestGroup(t, g) -} - func TestWrite(t *testing.T) { g := createTestGroupWithHeadSizeLimit(t, 0) diff --git a/libs/common/repeat_timer.go b/libs/common/repeat_timer.go deleted file mode 100644 index 5d049738dd3..00000000000 --- a/libs/common/repeat_timer.go +++ /dev/null @@ -1,232 +0,0 @@ -package common - -import ( - "sync" - "time" -) - -// Used by RepeatTimer the first time, -// and every time it's Reset() after Stop(). -type TickerMaker func(dur time.Duration) Ticker - -// Ticker is a basic ticker interface. -type Ticker interface { - - // Never changes, never closes. - Chan() <-chan time.Time - - // Stopping a stopped Ticker will panic. - Stop() -} - -//---------------------------------------- -// defaultTicker - -var _ Ticker = (*defaultTicker)(nil) - -type defaultTicker time.Ticker - -func defaultTickerMaker(dur time.Duration) Ticker { - ticker := time.NewTicker(dur) - return (*defaultTicker)(ticker) -} - -// Implements Ticker -func (t *defaultTicker) Chan() <-chan time.Time { - return t.C -} - -// Implements Ticker -func (t *defaultTicker) Stop() { - ((*time.Ticker)(t)).Stop() -} - -//---------------------------------------- -// LogicalTickerMaker - -// Construct a TickerMaker that always uses `source`. -// It's useful for simulating a deterministic clock. -func NewLogicalTickerMaker(source chan time.Time) TickerMaker { - return func(dur time.Duration) Ticker { - return newLogicalTicker(source, dur) - } -} - -type logicalTicker struct { - source <-chan time.Time - ch chan time.Time - quit chan struct{} -} - -func newLogicalTicker(source <-chan time.Time, interval time.Duration) Ticker { - lt := &logicalTicker{ - source: source, - ch: make(chan time.Time), - quit: make(chan struct{}), - } - go lt.fireRoutine(interval) - return lt -} - -// We need a goroutine to read times from t.source -// and fire on t.Chan() when `interval` has passed. -func (t *logicalTicker) fireRoutine(interval time.Duration) { - source := t.source - - // Init `lasttime` - lasttime := time.Time{} - select { - case lasttime = <-source: - case <-t.quit: - return - } - // Init `lasttime` end - - for { - select { - case newtime := <-source: - elapsed := newtime.Sub(lasttime) - if interval <= elapsed { - // Block for determinism until the ticker is stopped. - select { - case t.ch <- newtime: - case <-t.quit: - return - } - // Reset timeleft. - // Don't try to "catch up" by sending more. - // "Ticker adjusts the intervals or drops ticks to make up for - // slow receivers" - https://golang.org/pkg/time/#Ticker - lasttime = newtime - } - case <-t.quit: - return // done - } - } -} - -// Implements Ticker -func (t *logicalTicker) Chan() <-chan time.Time { - return t.ch // immutable -} - -// Implements Ticker -func (t *logicalTicker) Stop() { - close(t.quit) // it *should* panic when stopped twice. -} - -//--------------------------------------------------------------------- - -/* - RepeatTimer repeatedly sends a struct{}{} to `.Chan()` after each `dur` - period. (It's good for keeping connections alive.) - A RepeatTimer must be stopped, or it will keep a goroutine alive. -*/ -type RepeatTimer struct { - name string - ch chan time.Time - tm TickerMaker - - mtx sync.Mutex - dur time.Duration - ticker Ticker - quit chan struct{} -} - -// NewRepeatTimer returns a RepeatTimer with a defaultTicker. -func NewRepeatTimer(name string, dur time.Duration) *RepeatTimer { - return NewRepeatTimerWithTickerMaker(name, dur, defaultTickerMaker) -} - -// NewRepeatTimerWithTicker returns a RepeatTimer with the given ticker -// maker. -func NewRepeatTimerWithTickerMaker(name string, dur time.Duration, tm TickerMaker) *RepeatTimer { - var t = &RepeatTimer{ - name: name, - ch: make(chan time.Time), - tm: tm, - dur: dur, - ticker: nil, - quit: nil, - } - t.reset() - return t -} - -// receive ticks on ch, send out on t.ch -func (t *RepeatTimer) fireRoutine(ch <-chan time.Time, quit <-chan struct{}) { - for { - select { - case tick := <-ch: - select { - case t.ch <- tick: - case <-quit: - return - } - case <-quit: // NOTE: `t.quit` races. - return - } - } -} - -func (t *RepeatTimer) Chan() <-chan time.Time { - return t.ch -} - -func (t *RepeatTimer) Stop() { - t.mtx.Lock() - defer t.mtx.Unlock() - - t.stop() -} - -// Wait the duration again before firing. -func (t *RepeatTimer) Reset() { - t.mtx.Lock() - defer t.mtx.Unlock() - - t.reset() -} - -//---------------------------------------- -// Misc. - -// CONTRACT: (non-constructor) caller should hold t.mtx. -func (t *RepeatTimer) reset() { - if t.ticker != nil { - t.stop() - } - t.ticker = t.tm(t.dur) - t.quit = make(chan struct{}) - go t.fireRoutine(t.ticker.Chan(), t.quit) -} - -// CONTRACT: caller should hold t.mtx. -func (t *RepeatTimer) stop() { - if t.ticker == nil { - /* - Similar to the case of closing channels twice: - https://groups.google.com/forum/#!topic/golang-nuts/rhxMiNmRAPk - Stopping a RepeatTimer twice implies that you do - not know whether you are done or not. - If you're calling stop on a stopped RepeatTimer, - you probably have race conditions. - */ - panic("Tried to stop a stopped RepeatTimer") - } - t.ticker.Stop() - t.ticker = nil - /* - From https://golang.org/pkg/time/#Ticker: - "Stop the ticker to release associated resources" - "After Stop, no more ticks will be sent" - So we shouldn't have to do the below. - - select { - case <-t.ch: - // read off channel if there's anything there - default: - } - */ - close(t.quit) -} diff --git a/libs/common/repeat_timer_test.go b/libs/common/repeat_timer_test.go deleted file mode 100644 index f2a7b16c3bb..00000000000 --- a/libs/common/repeat_timer_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package common - -import ( - "sync" - "testing" - "time" - - "github.com/fortytw2/leaktest" - "github.com/stretchr/testify/assert" -) - -func TestDefaultTicker(t *testing.T) { - ticker := defaultTickerMaker(time.Millisecond * 10) - <-ticker.Chan() - ticker.Stop() -} - -func TestRepeatTimer(t *testing.T) { - - ch := make(chan time.Time, 100) - mtx := new(sync.Mutex) - - // tick() fires from start to end - // (exclusive) in milliseconds with incr. - // It locks on mtx, so subsequent calls - // run in series. - tick := func(startMs, endMs, incrMs time.Duration) { - mtx.Lock() - go func() { - for tMs := startMs; tMs < endMs; tMs += incrMs { - lt := time.Time{} - lt = lt.Add(tMs * time.Millisecond) - ch <- lt - } - mtx.Unlock() - }() - } - - // tock consumes Ticker.Chan() events and checks them against the ms in "timesMs". - tock := func(t *testing.T, rt *RepeatTimer, timesMs []int64) { - - // Check against timesMs. - for _, timeMs := range timesMs { - tyme := <-rt.Chan() - sinceMs := tyme.Sub(time.Time{}) / time.Millisecond - assert.Equal(t, timeMs, int64(sinceMs)) - } - - // TODO detect number of running - // goroutines to ensure that - // no other times will fire. - // See https://github.com/tendermint/tendermint/libs/issues/120. - time.Sleep(time.Millisecond * 100) - done := true - select { - case <-rt.Chan(): - done = false - default: - } - assert.True(t, done) - } - - tm := NewLogicalTickerMaker(ch) - rt := NewRepeatTimerWithTickerMaker("bar", time.Second, tm) - - /* NOTE: Useful for debugging deadlocks... - go func() { - time.Sleep(time.Second * 3) - trace := make([]byte, 102400) - count := runtime.Stack(trace, true) - fmt.Printf("Stack of %d bytes: %s\n", count, trace) - }() - */ - - tick(0, 1000, 10) - tock(t, rt, []int64{}) - tick(1000, 2000, 10) - tock(t, rt, []int64{1000}) - tick(2005, 5000, 10) - tock(t, rt, []int64{2005, 3005, 4005}) - tick(5001, 5999, 1) - // Read 5005 instead of 5001 because - // it's 1 second greater than 4005. - tock(t, rt, []int64{5005}) - tick(6000, 7005, 1) - tock(t, rt, []int64{6005}) - tick(7033, 8032, 1) - tock(t, rt, []int64{7033}) - - // After a reset, nothing happens - // until two ticks are received. - rt.Reset() - tock(t, rt, []int64{}) - tick(8040, 8041, 1) - tock(t, rt, []int64{}) - tick(9555, 9556, 1) - tock(t, rt, []int64{9555}) - - // After a stop, nothing more is sent. - rt.Stop() - tock(t, rt, []int64{}) - - // Another stop panics. - assert.Panics(t, func() { rt.Stop() }) -} - -func TestRepeatTimerReset(t *testing.T) { - // check that we are not leaking any go-routines - defer leaktest.Check(t)() - - timer := NewRepeatTimer("test", 20*time.Millisecond) - defer timer.Stop() - - // test we don't receive tick before duration ms. - select { - case <-timer.Chan(): - t.Fatal("did not expect to receive tick") - default: - } - - timer.Reset() - - // test we receive tick after Reset is called - select { - case <-timer.Chan(): - // all good - case <-time.After(40 * time.Millisecond): - t.Fatal("expected to receive tick after reset") - } - - // just random calls - for i := 0; i < 100; i++ { - time.Sleep(time.Duration(RandIntn(40)) * time.Millisecond) - timer.Reset() - } -} diff --git a/libs/common/service.go b/libs/common/service.go index 96a5e632af0..21fb0df3ecb 100644 --- a/libs/common/service.go +++ b/libs/common/service.go @@ -209,7 +209,7 @@ func (bs *BaseService) Wait() { <-bs.quit } -// String implements Servce by returning a string representation of the service. +// String implements Service by returning a string representation of the service. func (bs *BaseService) String() string { return bs.name } diff --git a/mempool/bench_test.go b/mempool/bench_test.go index 8936f8dfbec..0cd394cd60f 100644 --- a/mempool/bench_test.go +++ b/mempool/bench_test.go @@ -26,6 +26,19 @@ func BenchmarkReap(b *testing.B) { } } +func BenchmarkCheckTx(b *testing.B) { + app := kvstore.NewKVStoreApplication() + cc := proxy.NewLocalClientCreator(app) + mempool, cleanup := newMempoolWithApp(cc) + defer cleanup() + + for i := 0; i < b.N; i++ { + tx := make([]byte, 8) + binary.BigEndian.PutUint64(tx, uint64(i)) + mempool.CheckTx(tx, nil) + } +} + func BenchmarkCacheInsertTime(b *testing.B) { cache := newMapTxCache(b.N) txs := make([][]byte, b.N) diff --git a/mempool/cache_test.go b/mempool/cache_test.go new file mode 100644 index 00000000000..ea9f63fd67b --- /dev/null +++ b/mempool/cache_test.go @@ -0,0 +1,101 @@ +package mempool + +import ( + "crypto/rand" + "crypto/sha256" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" +) + +func TestCacheRemove(t *testing.T) { + cache := newMapTxCache(100) + numTxs := 10 + txs := make([][]byte, numTxs) + for i := 0; i < numTxs; i++ { + // probability of collision is 2**-256 + txBytes := make([]byte, 32) + rand.Read(txBytes) // nolint: gosec + txs[i] = txBytes + cache.Push(txBytes) + // make sure its added to both the linked list and the map + require.Equal(t, i+1, len(cache.map_)) + require.Equal(t, i+1, cache.list.Len()) + } + for i := 0; i < numTxs; i++ { + cache.Remove(txs[i]) + // make sure its removed from both the map and the linked list + require.Equal(t, numTxs-(i+1), len(cache.map_)) + require.Equal(t, numTxs-(i+1), cache.list.Len()) + } +} + +func TestCacheAfterUpdate(t *testing.T) { + app := kvstore.NewKVStoreApplication() + cc := proxy.NewLocalClientCreator(app) + mempool, cleanup := newMempoolWithApp(cc) + defer cleanup() + + // reAddIndices & txsInCache can have elements > numTxsToCreate + // also assumes max index is 255 for convenience + // txs in cache also checks order of elements + tests := []struct { + numTxsToCreate int + updateIndices []int + reAddIndices []int + txsInCache []int + }{ + {1, []int{}, []int{1}, []int{1, 0}}, // adding new txs works + {2, []int{1}, []int{}, []int{1, 0}}, // update doesn't remove tx from cache + {2, []int{2}, []int{}, []int{2, 1, 0}}, // update adds new tx to cache + {2, []int{1}, []int{1}, []int{1, 0}}, // re-adding after update doesn't make dupe + } + for tcIndex, tc := range tests { + for i := 0; i < tc.numTxsToCreate; i++ { + tx := types.Tx{byte(i)} + err := mempool.CheckTx(tx, nil) + require.NoError(t, err) + } + + updateTxs := []types.Tx{} + for _, v := range tc.updateIndices { + tx := types.Tx{byte(v)} + updateTxs = append(updateTxs, tx) + } + mempool.Update(int64(tcIndex), updateTxs, nil, nil) + + for _, v := range tc.reAddIndices { + tx := types.Tx{byte(v)} + _ = mempool.CheckTx(tx, nil) + } + + cache := mempool.cache.(*mapTxCache) + node := cache.list.Front() + counter := 0 + for node != nil { + require.NotEqual(t, len(tc.txsInCache), counter, + "cache larger than expected on testcase %d", tcIndex) + + nodeVal := node.Value.([sha256.Size]byte) + expectedBz := sha256.Sum256([]byte{byte(tc.txsInCache[len(tc.txsInCache)-counter-1])}) + // Reference for reading the errors: + // >>> sha256('\x00').hexdigest() + // '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d' + // >>> sha256('\x01').hexdigest() + // '4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a' + // >>> sha256('\x02').hexdigest() + // 'dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986' + + require.Equal(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex) + counter++ + node = node.Next() + } + require.Equal(t, len(tc.txsInCache), counter, + "cache smaller than expected on testcase %d", tcIndex) + mempool.Flush() + } +} diff --git a/mempool/mempool.go b/mempool/mempool.go index 41ee59cb4c5..a5b14466ac9 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -31,6 +31,14 @@ type PreCheckFunc func(types.Tx) error // transaction doesn't require more gas than available for the block. type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error +// TxInfo are parameters that get passed when attempting to add a tx to the +// mempool. +type TxInfo struct { + // We don't use p2p.ID here because it's too big. The gain is to store max 2 + // bytes with each tx to identify the sender rather than 20 bytes. + PeerID uint16 +} + /* The mempool pushes new txs onto the proxyAppConn. @@ -141,6 +149,11 @@ func TxID(tx []byte) string { return fmt.Sprintf("%X", types.Tx(tx).Hash()) } +// txKey is the fixed length array sha256 hash used as the key in maps. +func txKey(tx types.Tx) [sha256.Size]byte { + return sha256.Sum256(tx) +} + // Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus // round. Transaction validity is checked using the CheckTx abci message before the transaction is // added to the pool. The Mempool uses a concurrent list structure for storing transactions that @@ -148,20 +161,30 @@ func TxID(tx []byte) string { type Mempool struct { config *cfg.MempoolConfig - proxyMtx sync.Mutex - proxyAppConn proxy.AppConnMempool - txs *clist.CList // concurrent linked-list of good txs - height int64 // the last block Update()'d to - rechecking int32 // for re-checking filtered txs on Update() - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here + proxyMtx sync.Mutex + proxyAppConn proxy.AppConnMempool + txs *clist.CList // concurrent linked-list of good txs + preCheck PreCheckFunc + postCheck PostCheckFunc + + // Track whether we're rechecking txs. + // These are not protected by a mutex and are expected to be mutated + // in serial (ie. by abci responses which are called in serial). + recheckCursor *clist.CElement // next expected response + recheckEnd *clist.CElement // re-checking stops here + + // notify listeners (ie. consensus) when txs are available notifiedTxsAvailable bool txsAvailable chan struct{} // fires once for each height, when the mempool is not empty - preCheck PreCheckFunc - postCheck PostCheckFunc + + // Map for quick access to txs to record sender in CheckTx. + // txsMap: txKey -> CElement + txsMap sync.Map // Atomic integers - txsBytes int64 // see TxsBytes + height int64 // the last block Update()'d to + rechecking int32 // for re-checking filtered txs on Update() + txsBytes int64 // total size of mempool, in bytes // Keep a cache of already-seen txs. // This reduces the pressure on the proxyApp. @@ -201,7 +224,7 @@ func NewMempool( } else { mempool.cache = nopTxCache{} } - proxyAppConn.SetResponseCallback(mempool.resCb) + proxyAppConn.SetResponseCallback(mempool.globalCb) for _, option := range options { option(mempool) } @@ -286,8 +309,8 @@ func (mem *Mempool) TxsBytes() int64 { return atomic.LoadInt64(&mem.txsBytes) } -// FlushAppConn flushes the mempool connection to ensure async resCb calls are -// done e.g. from CheckTx. +// FlushAppConn flushes the mempool connection to ensure async reqResCb calls are +// done. E.g. from CheckTx. func (mem *Mempool) FlushAppConn() error { return mem.proxyAppConn.FlushSync() } @@ -304,6 +327,7 @@ func (mem *Mempool) Flush() { e.DetachPrev() } + mem.txsMap = sync.Map{} _ = atomic.SwapInt64(&mem.txsBytes, 0) } @@ -327,6 +351,13 @@ func (mem *Mempool) TxsWaitChan() <-chan struct{} { // It gets called from another goroutine. // CONTRACT: Either cb will get called, or err returned. func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) { + return mem.CheckTxWithInfo(tx, cb, TxInfo{PeerID: UnknownPeerID}) +} + +// CheckTxWithInfo performs the same operation as CheckTx, but with extra meta data about the tx. +// Currently this metadata is the peer who sent it, +// used to prevent the tx from being gossiped back to them. +func (mem *Mempool) CheckTxWithInfo(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) (err error) { mem.proxyMtx.Lock() // use defer to unlock mutex because application (*local client*) might panic defer mem.proxyMtx.Unlock() @@ -357,6 +388,19 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) { // CACHE if !mem.cache.Push(tx) { + // Record a new sender for a tx we've already seen. + // Note it's possible a tx is still in the cache but no longer in the mempool + // (eg. after committing a block, txs are removed from mempool but not cache), + // so we only record the sender for txs still in the mempool. + if e, ok := mem.txsMap.Load(txKey(tx)); ok { + memTx := e.(*clist.CElement).Value.(*mempoolTx) + if _, loaded := memTx.senders.LoadOrStore(txInfo.PeerID, true); loaded { + // TODO: consider punishing peer for dups, + // its non-trivial since invalid txs can become valid, + // but they can spam the same tx with little cost to them atm. + } + } + return ErrTxInCache } // END CACHE @@ -379,29 +423,90 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) { if err = mem.proxyAppConn.Error(); err != nil { return err } + reqRes := mem.proxyAppConn.CheckTxAsync(tx) - if cb != nil { - reqRes.SetCallback(cb) - } + reqRes.SetCallback(mem.reqResCb(tx, txInfo.PeerID, cb)) return nil } -// ABCI callback function -func (mem *Mempool) resCb(req *abci.Request, res *abci.Response) { +// Global callback that will be called after every ABCI response. +// Having a single global callback avoids needing to set a callback for each request. +// However, processing the checkTx response requires the peerID (so we can track which txs we heard from who), +// and peerID is not included in the ABCI request, so we have to set request-specific callbacks that +// include this information. If we're not in the midst of a recheck, this function will just return, +// so the request specific callback can do the work. +// When rechecking, we don't need the peerID, so the recheck callback happens here. +func (mem *Mempool) globalCb(req *abci.Request, res *abci.Response) { if mem.recheckCursor == nil { - mem.resCbNormal(req, res) - } else { - mem.metrics.RecheckTimes.Add(1) - mem.resCbRecheck(req, res) + return } + + mem.metrics.RecheckTimes.Add(1) + mem.resCbRecheck(req, res) + + // update metrics mem.metrics.Size.Set(float64(mem.Size())) } -func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { +// Request specific callback that should be set on individual reqRes objects +// to incorporate local information when processing the response. +// This allows us to track the peer that sent us this tx, so we can avoid sending it back to them. +// NOTE: alternatively, we could include this information in the ABCI request itself. +// +// External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called +// when all other response processing is complete. +// +// Used in CheckTxWithInfo to record PeerID who sent us the tx. +func (mem *Mempool) reqResCb(tx []byte, peerID uint16, externalCb func(*abci.Response)) func(res *abci.Response) { + return func(res *abci.Response) { + if mem.recheckCursor != nil { + // this should never happen + panic("recheck cursor is not nil in reqResCb") + } + + mem.resCbFirstTime(tx, peerID, res) + + // update metrics + mem.metrics.Size.Set(float64(mem.Size())) + + // passed in by the caller of CheckTx, eg. the RPC + if externalCb != nil { + externalCb(res) + } + } +} + +// Called from: +// - resCbFirstTime (lock not held) if tx is valid +func (mem *Mempool) addTx(memTx *mempoolTx) { + e := mem.txs.PushBack(memTx) + mem.txsMap.Store(txKey(memTx.tx), e) + atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) + mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) +} + +// Called from: +// - Update (lock held) if tx was committed +// - resCbRecheck (lock not held) if tx was invalidated +func (mem *Mempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { + mem.txs.Remove(elem) + elem.DetachPrev() + mem.txsMap.Delete(txKey(tx)) + atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) + + if removeFromCache { + mem.cache.Remove(tx) + } +} + +// callback, which is called after the app checked the tx for the first time. +// +// The case where the app checks the tx for the second and subsequent times is +// handled by the resCbRecheck callback. +func (mem *Mempool) resCbFirstTime(tx []byte, peerID uint16, res *abci.Response) { switch r := res.Value.(type) { case *abci.Response_CheckTx: - tx := req.GetCheckTx().Tx var postCheckErr error if mem.postCheck != nil { postCheckErr = mem.postCheck(tx, r.CheckTx) @@ -412,15 +517,14 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { gasWanted: r.CheckTx.GasWanted, tx: tx, } - mem.txs.PushBack(memTx) - atomic.AddInt64(&mem.txsBytes, int64(len(tx))) + memTx.senders.Store(peerID, true) + mem.addTx(memTx) mem.logger.Info("Added good transaction", "tx", TxID(tx), "res", r, "height", memTx.height, "total", mem.Size(), ) - mem.metrics.TxSizeBytes.Observe(float64(len(tx))) mem.notifyTxsAvailable() } else { // ignore bad transaction @@ -434,6 +538,10 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { } } +// callback, which is called after the app rechecked the tx. +// +// The case where the app checks the tx for the first time is handled by the +// resCbFirstTime callback. func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) { switch r := res.Value.(type) { case *abci.Response_CheckTx: @@ -454,12 +562,8 @@ func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) { } else { // Tx became invalidated due to newly committed block. mem.logger.Info("Tx is no longer valid", "tx", TxID(tx), "res", r, "err", postCheckErr) - mem.txs.Remove(mem.recheckCursor) - atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) - mem.recheckCursor.DetachPrev() - - // remove from cache (it might be good later) - mem.cache.Remove(tx) + // NOTE: we remove tx from the cache because it might be good later + mem.removeTx(tx, mem.recheckCursor, true) } if mem.recheckCursor == mem.recheckEnd { mem.recheckCursor = nil @@ -627,12 +731,9 @@ func (mem *Mempool) removeTxs(txs types.Txs) []types.Tx { memTx := e.Value.(*mempoolTx) // Remove the tx if it's already in a block. if _, ok := txsMap[string(memTx.tx)]; ok { - // remove from clist - mem.txs.Remove(e) - atomic.AddInt64(&mem.txsBytes, int64(-len(memTx.tx))) - e.DetachPrev() - // NOTE: we don't remove committed txs from the cache. + mem.removeTx(memTx.tx, e, false) + continue } txsLeft = append(txsLeft, memTx.tx) @@ -650,7 +751,7 @@ func (mem *Mempool) recheckTxs(txs []types.Tx) { mem.recheckEnd = mem.txs.Back() // Push txs to proxyAppConn - // NOTE: resCb() may be called concurrently. + // NOTE: globalCb may be called concurrently. for _, tx := range txs { mem.proxyAppConn.CheckTxAsync(tx) } @@ -664,6 +765,10 @@ type mempoolTx struct { height int64 // height that this tx had been validated in gasWanted int64 // amount of gas this tx states it will require tx types.Tx // + + // ids of peers who've sent us this tx (as a map for quick lookups). + // senders: PeerID -> bool + senders sync.Map } // Height returns the height for this transaction @@ -679,13 +784,13 @@ type txCache interface { Remove(tx types.Tx) } -// mapTxCache maintains a cache of transactions. This only stores -// the hash of the tx, due to memory concerns. +// mapTxCache maintains a LRU cache of transactions. This only stores the hash +// of the tx, due to memory concerns. type mapTxCache struct { mtx sync.Mutex size int map_ map[[sha256.Size]byte]*list.Element - list *list.List // to remove oldest tx when cache gets too big + list *list.List } var _ txCache = (*mapTxCache)(nil) @@ -707,14 +812,14 @@ func (cache *mapTxCache) Reset() { cache.mtx.Unlock() } -// Push adds the given tx to the cache and returns true. It returns false if tx -// is already in the cache. +// Push adds the given tx to the cache and returns true. It returns +// false if tx is already in the cache. func (cache *mapTxCache) Push(tx types.Tx) bool { cache.mtx.Lock() defer cache.mtx.Unlock() // Use the tx hash in the cache - txHash := sha256.Sum256(tx) + txHash := txKey(tx) if moved, exists := cache.map_[txHash]; exists { cache.list.MoveToBack(moved) return false @@ -728,15 +833,15 @@ func (cache *mapTxCache) Push(tx types.Tx) bool { cache.list.Remove(popped) } } - cache.list.PushBack(txHash) - cache.map_[txHash] = cache.list.Back() + e := cache.list.PushBack(txHash) + cache.map_[txHash] = e return true } // Remove removes the given tx from the cache. func (cache *mapTxCache) Remove(tx types.Tx) { cache.mtx.Lock() - txHash := sha256.Sum256(tx) + txHash := txKey(tx) popped := cache.map_[txHash] delete(cache.map_, txHash) if popped != nil { diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index 5928fbc563c..d5f25396d33 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -6,17 +6,20 @@ import ( "encoding/binary" "fmt" "io/ioutil" + mrand "math/rand" "os" "path/filepath" "testing" "time" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" + abciserver "github.com/tendermint/tendermint/abci/server" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" cmn "github.com/tendermint/tendermint/libs/common" @@ -63,8 +66,9 @@ func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { } } -func checkTxs(t *testing.T, mempool *Mempool, count int) types.Txs { +func checkTxs(t *testing.T, mempool *Mempool, count int, peerID uint16) types.Txs { txs := make(types.Txs, count) + txInfo := TxInfo{PeerID: peerID} for i := 0; i < count; i++ { txBytes := make([]byte, 20) txs[i] = txBytes @@ -72,7 +76,7 @@ func checkTxs(t *testing.T, mempool *Mempool, count int) types.Txs { if err != nil { t.Error(err) } - if err := mempool.CheckTx(txBytes, nil); err != nil { + if err := mempool.CheckTxWithInfo(txBytes, nil, txInfo); err != nil { // Skip invalid txs. // TestMempoolFilters will fail otherwise. It asserts a number of txs // returned. @@ -92,7 +96,7 @@ func TestReapMaxBytesMaxGas(t *testing.T) { defer cleanup() // Ensure gas calculation behaves as expected - checkTxs(t, mempool, 1) + checkTxs(t, mempool, 1, UnknownPeerID) tx0 := mempool.TxsFront().Value.(*mempoolTx) // assert that kv store has gas wanted = 1. require.Equal(t, app.CheckTx(tx0.tx).GasWanted, int64(1), "KVStore had a gas value neq to 1") @@ -126,7 +130,7 @@ func TestReapMaxBytesMaxGas(t *testing.T) { {20, 20000, 30, 20}, } for tcIndex, tt := range tests { - checkTxs(t, mempool, tt.numTxsToCreate) + checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID) got := mempool.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas) assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d", len(got), tt.expectedNumTxs, tcIndex) @@ -167,7 +171,7 @@ func TestMempoolFilters(t *testing.T) { } for tcIndex, tt := range tests { mempool.Update(1, emptyTxArr, tt.preFilter, tt.postFilter) - checkTxs(t, mempool, tt.numTxsToCreate) + checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID) require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex) mempool.Flush() } @@ -198,7 +202,7 @@ func TestTxsAvailable(t *testing.T) { ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) // send a bunch of txs, it should only fire once - txs := checkTxs(t, mempool, 100) + txs := checkTxs(t, mempool, 100, UnknownPeerID) ensureFire(t, mempool.TxsAvailable(), timeoutMS) ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) @@ -213,7 +217,7 @@ func TestTxsAvailable(t *testing.T) { ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) // send a bunch more txs. we already fired for this height so it shouldnt fire again - moreTxs := checkTxs(t, mempool, 50) + moreTxs := checkTxs(t, mempool, 50, UnknownPeerID) ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) // now call update with all the txs. it should not fire as there are no txs left @@ -224,7 +228,7 @@ func TestTxsAvailable(t *testing.T) { ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) // send a bunch more txs, it should only fire once - checkTxs(t, mempool, 100) + checkTxs(t, mempool, 100, UnknownPeerID) ensureFire(t, mempool.TxsAvailable(), timeoutMS) ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) } @@ -340,28 +344,6 @@ func TestSerialReap(t *testing.T) { reapCheck(600) } -func TestCacheRemove(t *testing.T) { - cache := newMapTxCache(100) - numTxs := 10 - txs := make([][]byte, numTxs) - for i := 0; i < numTxs; i++ { - // probability of collision is 2**-256 - txBytes := make([]byte, 32) - rand.Read(txBytes) - txs[i] = txBytes - cache.Push(txBytes) - // make sure its added to both the linked list and the map - require.Equal(t, i+1, len(cache.map_)) - require.Equal(t, i+1, cache.list.Len()) - } - for i := 0; i < numTxs; i++ { - cache.Remove(txs[i]) - // make sure its removed from both the map and the linked list - require.Equal(t, numTxs-(i+1), len(cache.map_)) - require.Equal(t, numTxs-(i+1), cache.list.Len()) - } -} - func TestMempoolCloseWAL(t *testing.T) { // 1. Create the temporary directory for mempool and WAL testing. rootDir, err := ioutil.TempDir("", "mempool-test") @@ -530,6 +512,54 @@ func TestMempoolTxsBytes(t *testing.T) { assert.EqualValues(t, 0, mempool.TxsBytes()) } +// This will non-deterministically catch some concurrency failures like +// https://github.com/tendermint/tendermint/issues/3509 +// TODO: all of the tests should probably also run using the remote proxy app +// since otherwise we're not actually testing the concurrency of the mempool here! +func TestMempoolRemoteAppConcurrency(t *testing.T) { + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) + app := kvstore.NewKVStoreApplication() + cc, server := newRemoteApp(t, sockPath, app) + defer server.Stop() + config := cfg.ResetTestRoot("mempool_test") + mempool, cleanup := newMempoolWithAppAndConfig(cc, config) + defer cleanup() + + // generate small number of txs + nTxs := 10 + txLen := 200 + txs := make([]types.Tx, nTxs) + for i := 0; i < nTxs; i++ { + txs[i] = cmn.RandBytes(txLen) + } + + // simulate a group of peers sending them over and over + N := config.Mempool.Size + maxPeers := 5 + for i := 0; i < N; i++ { + peerID := mrand.Intn(maxPeers) + txNum := mrand.Intn(nTxs) + tx := txs[int(txNum)] + + // this will err with ErrTxInCache many times ... + mempool.CheckTxWithInfo(tx, nil, TxInfo{PeerID: uint16(peerID)}) + } + err := mempool.FlushAppConn() + require.NoError(t, err) +} + +// caller must close server +func newRemoteApp(t *testing.T, addr string, app abci.Application) (clientCreator proxy.ClientCreator, server cmn.Service) { + clientCreator = proxy.NewRemoteClientCreator(addr, "socket", true) + + // Start server + server = abciserver.NewSocketServer(addr, app) + server.SetLogger(log.TestingLogger().With("module", "abci-server")) + if err := server.Start(); err != nil { + t.Fatalf("Error starting socket server: %v", err.Error()) + } + return clientCreator, server +} func checksumIt(data []byte) string { h := sha256.New() h.Write(data) diff --git a/mempool/reactor.go b/mempool/reactor.go index ff87f05067f..e1376b2879e 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -2,14 +2,16 @@ package mempool import ( "fmt" + "math" "reflect" + "sync" "time" amino "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/libs/clist" - "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/clist" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" ) @@ -21,13 +23,85 @@ const ( maxTxSize = maxMsgSize - 8 // account for amino overhead of TxMessage peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount + + // UnknownPeerID is the peer ID to use when running CheckTx when there is + // no peer (e.g. RPC) + UnknownPeerID uint16 = 0 + + maxActiveIDs = math.MaxUint16 ) // MempoolReactor handles mempool tx broadcasting amongst peers. +// It maintains a map from peer ID to counter, to prevent gossiping txs to the +// peers you received it from. type MempoolReactor struct { p2p.BaseReactor config *cfg.MempoolConfig Mempool *Mempool + ids *mempoolIDs +} + +type mempoolIDs struct { + mtx sync.RWMutex + peerMap map[p2p.ID]uint16 + nextID uint16 // assumes that a node will never have over 65536 active peers + activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter +} + +// Reserve searches for the next unused ID and assignes it to the +// peer. +func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) { + ids.mtx.Lock() + defer ids.mtx.Unlock() + + curID := ids.nextPeerID() + ids.peerMap[peer.ID()] = curID + ids.activeIDs[curID] = struct{}{} +} + +// nextPeerID returns the next unused peer ID to use. +// This assumes that ids's mutex is already locked. +func (ids *mempoolIDs) nextPeerID() uint16 { + if len(ids.activeIDs) == maxActiveIDs { + panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", maxActiveIDs)) + } + + _, idExists := ids.activeIDs[ids.nextID] + for idExists { + ids.nextID++ + _, idExists = ids.activeIDs[ids.nextID] + } + curID := ids.nextID + ids.nextID++ + return curID +} + +// Reclaim returns the ID reserved for the peer back to unused pool. +func (ids *mempoolIDs) Reclaim(peer p2p.Peer) { + ids.mtx.Lock() + defer ids.mtx.Unlock() + + removedID, ok := ids.peerMap[peer.ID()] + if ok { + delete(ids.activeIDs, removedID) + delete(ids.peerMap, peer.ID()) + } +} + +// GetForPeer returns an ID reserved for the peer. +func (ids *mempoolIDs) GetForPeer(peer p2p.Peer) uint16 { + ids.mtx.RLock() + defer ids.mtx.RUnlock() + + return ids.peerMap[peer.ID()] +} + +func newMempoolIDs() *mempoolIDs { + return &mempoolIDs{ + peerMap: make(map[p2p.ID]uint16), + activeIDs: map[uint16]struct{}{0: {}}, + nextID: 1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx + } } // NewMempoolReactor returns a new MempoolReactor with the given config and mempool. @@ -35,6 +109,7 @@ func NewMempoolReactor(config *cfg.MempoolConfig, mempool *Mempool) *MempoolReac memR := &MempoolReactor{ config: config, Mempool: mempool, + ids: newMempoolIDs(), } memR.BaseReactor = *p2p.NewBaseReactor("MempoolReactor", memR) return memR @@ -68,11 +143,13 @@ func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor { // AddPeer implements Reactor. // It starts a broadcast routine ensuring all txs are forwarded to the given peer. func (memR *MempoolReactor) AddPeer(peer p2p.Peer) { + memR.ids.ReserveForPeer(peer) go memR.broadcastTxRoutine(peer) } // RemovePeer implements Reactor. func (memR *MempoolReactor) RemovePeer(peer p2p.Peer, reason interface{}) { + memR.ids.Reclaim(peer) // broadcast routine checks if peer is gone and returns } @@ -89,7 +166,8 @@ func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { switch msg := msg.(type) { case *TxMessage: - err := memR.Mempool.CheckTx(msg.Tx, nil) + peerID := memR.ids.GetForPeer(src) + err := memR.Mempool.CheckTxWithInfo(msg.Tx, nil, TxInfo{PeerID: peerID}) if err != nil { memR.Logger.Info("Could not check tx", "tx", TxID(msg.Tx), "err", err) } @@ -110,8 +188,13 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) { return } + peerID := memR.ids.GetForPeer(peer) var next *clist.CElement for { + // In case of both next.NextWaitChan() and peer.Quit() are variable at the same time + if !memR.IsRunning() || !peer.IsRunning() { + return + } // This happens because the CElement we were looking at got garbage // collected (removed). That is, .NextWait() returned nil. Go ahead and // start from the beginning. @@ -146,12 +229,15 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) { continue } - // send memTx - msg := &TxMessage{Tx: memTx.tx} - success := peer.Send(MempoolChannel, cdc.MustMarshalBinaryBare(msg)) - if !success { - time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) - continue + // ensure peer hasn't already sent us this tx + if _, ok := memTx.senders.Load(peerID); !ok { + // send memTx + msg := &TxMessage{Tx: memTx.tx} + success := peer.Send(MempoolChannel, cdc.MustMarshalBinaryBare(msg)) + if !success { + time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) + continue + } } select { diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index 51d130187f0..c9cf498098d 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -2,21 +2,21 @@ package mempool import ( "fmt" + "net" "sync" "testing" "time" "github.com/fortytw2/leaktest" + "github.com/go-kit/kit/log/term" "github.com/pkg/errors" "github.com/stretchr/testify/assert" - "github.com/go-kit/kit/log/term" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/libs/log" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/p2p/mock" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -102,6 +102,12 @@ func _waitForTxs(t *testing.T, wg *sync.WaitGroup, txs types.Txs, reactorIdx int wg.Done() } +// ensure no txs on reactor after some timeout +func ensureNoTxs(t *testing.T, reactor *MempoolReactor, timeout time.Duration) { + time.Sleep(timeout) // wait for the txs in all mempools + assert.Zero(t, reactor.Mempool.Size()) +} + const ( NUM_TXS = 1000 TIMEOUT = 120 * time.Second // ridiculously high because CircleCI is slow @@ -124,10 +130,26 @@ func TestReactorBroadcastTxMessage(t *testing.T) { // send a bunch of txs to the first reactor's mempool // and wait for them all to be received in the others - txs := checkTxs(t, reactors[0].Mempool, NUM_TXS) + txs := checkTxs(t, reactors[0].Mempool, NUM_TXS, UnknownPeerID) waitForTxs(t, txs, reactors) } +func TestReactorNoBroadcastToSender(t *testing.T) { + config := cfg.TestConfig() + const N = 2 + reactors := makeAndConnectMempoolReactors(config, N) + defer func() { + for _, r := range reactors { + r.Stop() + } + }() + + // send a bunch of txs to the first reactor's mempool, claiming it came from peer + // ensure peer gets no txs + checkTxs(t, reactors[0].Mempool, NUM_TXS, 1) + ensureNoTxs(t, reactors[1], 100*time.Millisecond) +} + func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") @@ -169,3 +191,36 @@ func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) { // i.e. broadcastTxRoutine finishes when reactor is stopped leaktest.CheckTimeout(t, 10*time.Second)() } + +func TestMempoolIDsBasic(t *testing.T) { + ids := newMempoolIDs() + + peer := mock.NewPeer(net.IP{127, 0, 0, 1}) + + ids.ReserveForPeer(peer) + assert.EqualValues(t, 1, ids.GetForPeer(peer)) + ids.Reclaim(peer) + + ids.ReserveForPeer(peer) + assert.EqualValues(t, 2, ids.GetForPeer(peer)) + ids.Reclaim(peer) +} + +func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) { + if testing.Short() { + return + } + + // 0 is already reserved for UnknownPeerID + ids := newMempoolIDs() + + for i := 0; i < maxActiveIDs-1; i++ { + peer := mock.NewPeer(net.IP{127, 0, 0, 1}) + ids.ReserveForPeer(peer) + } + + assert.Panics(t, func() { + peer := mock.NewPeer(net.IP{127, 0, 0, 1}) + ids.ReserveForPeer(peer) + }) +} diff --git a/node/node.go b/node/node.go index 22735792166..b2843ac8a29 100644 --- a/node/node.go +++ b/node/node.go @@ -490,7 +490,7 @@ func NewNode(config *cfg.Config, addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) // Add ourselves to addrbook to prevent dialing ourselves - addrBook.AddOurAddress(nodeInfo.NetAddress()) + addrBook.AddOurAddress(sw.NetAddress()) addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) if config.P2P.PexReactor { @@ -499,6 +499,12 @@ func NewNode(config *cfg.Config, &pex.PEXReactorConfig{ Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "), SeedMode: config.P2P.SeedMode, + // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 + // blocks assuming 10s blocks ~ 28 hours. + // TODO (melekes): make it dynamic based on the actual block latencies + // from the live network. + // https://github.com/tendermint/tendermint/issues/3523 + SeedDisconnectWaitPeriod: 28 * time.Hour, }) pexReactor.SetLogger(logger.With("module", "pex")) sw.AddReactor("PEX", pexReactor) @@ -716,13 +722,24 @@ func (n *Node) startRPC() ([]net.Listener, error) { }) rootHandler = corsMiddleware.Handler(mux) } + if n.config.RPC.IsTLSEnabled() { + go rpcserver.StartHTTPAndTLSServer( + listener, + rootHandler, + n.config.RPC.CertFile(), + n.config.RPC.KeyFile(), + rpcLogger, + config, + ) + } else { + go rpcserver.StartHTTPServer( + listener, + rootHandler, + rpcLogger, + config, + ) + } - go rpcserver.StartHTTPServer( - listener, - rootHandler, - rpcLogger, - config, - ) listeners[i] = listener } diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index c1e90ab76ad..e0ce062ab75 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -95,13 +95,13 @@ type MConnection struct { stopMtx sync.Mutex flushTimer *cmn.ThrottleTimer // flush writes as necessary but throttled. - pingTimer *cmn.RepeatTimer // send pings periodically + pingTimer *time.Ticker // send pings periodically // close conn if pong is not received in pongTimeout pongTimer *time.Timer pongTimeoutCh chan bool // true - timeout, false - peer sent pong - chStatsTimer *cmn.RepeatTimer // update channel stats periodically + chStatsTimer *time.Ticker // update channel stats periodically created time.Time // time of creation @@ -201,9 +201,9 @@ func (c *MConnection) OnStart() error { return err } c.flushTimer = cmn.NewThrottleTimer("flush", c.config.FlushThrottle) - c.pingTimer = cmn.NewRepeatTimer("ping", c.config.PingInterval) + c.pingTimer = time.NewTicker(c.config.PingInterval) c.pongTimeoutCh = make(chan bool, 1) - c.chStatsTimer = cmn.NewRepeatTimer("chStats", updateStats) + c.chStatsTimer = time.NewTicker(updateStats) c.quitSendRoutine = make(chan struct{}) c.doneSendRoutine = make(chan struct{}) go c.sendRoutine() @@ -401,11 +401,11 @@ FOR_LOOP: // NOTE: flushTimer.Set() must be called every time // something is written to .bufConnWriter. c.flush() - case <-c.chStatsTimer.Chan(): + case <-c.chStatsTimer.C: for _, channel := range c.channels { channel.updateStats() } - case <-c.pingTimer.Chan(): + case <-c.pingTimer.C: c.Logger.Debug("Send Ping") _n, err = cdc.MarshalBinaryLengthPrefixedWriter(c.bufConnWriter, PacketPing{}) if err != nil { diff --git a/p2p/dummy/peer.go b/p2p/dummy/peer.go deleted file mode 100644 index 57edafc6769..00000000000 --- a/p2p/dummy/peer.go +++ /dev/null @@ -1,100 +0,0 @@ -package dummy - -import ( - "net" - - cmn "github.com/tendermint/tendermint/libs/common" - p2p "github.com/tendermint/tendermint/p2p" - tmconn "github.com/tendermint/tendermint/p2p/conn" -) - -type peer struct { - cmn.BaseService - kv map[string]interface{} -} - -var _ p2p.Peer = (*peer)(nil) - -// NewPeer creates new dummy peer. -func NewPeer() *peer { - p := &peer{ - kv: make(map[string]interface{}), - } - p.BaseService = *cmn.NewBaseService(nil, "peer", p) - - return p -} - -// FlushStop just calls Stop. -func (p *peer) FlushStop() { - p.Stop() -} - -// ID always returns dummy. -func (p *peer) ID() p2p.ID { - return p2p.ID("dummy") -} - -// IsOutbound always returns false. -func (p *peer) IsOutbound() bool { - return false -} - -// IsPersistent always returns false. -func (p *peer) IsPersistent() bool { - return false -} - -// NodeInfo always returns empty node info. -func (p *peer) NodeInfo() p2p.NodeInfo { - return p2p.DefaultNodeInfo{} -} - -// RemoteIP always returns localhost. -func (p *peer) RemoteIP() net.IP { - return net.ParseIP("127.0.0.1") -} - -// Addr always returns tcp://localhost:8800. -func (p *peer) RemoteAddr() net.Addr { - return &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8800} -} - -// CloseConn always returns nil. -func (p *peer) CloseConn() error { - return nil -} - -// Status always returns empry connection status. -func (p *peer) Status() tmconn.ConnectionStatus { - return tmconn.ConnectionStatus{} -} - -// Send does not do anything and just returns true. -func (p *peer) Send(byte, []byte) bool { - return true -} - -// TrySend does not do anything and just returns true. -func (p *peer) TrySend(byte, []byte) bool { - return true -} - -// Set records value under key specified in the map. -func (p *peer) Set(key string, value interface{}) { - p.kv[key] = value -} - -// Get returns a value associated with the key. Nil is returned if no value -// found. -func (p *peer) Get(key string) interface{} { - if value, ok := p.kv[key]; ok { - return value - } - return nil -} - -// OriginalAddr always returns nil. -func (p *peer) OriginalAddr() *p2p.NetAddress { - return nil -} diff --git a/p2p/errors.go b/p2p/errors.go index 706150945b8..3650a7a0a8e 100644 --- a/p2p/errors.go +++ b/p2p/errors.go @@ -103,7 +103,7 @@ type ErrSwitchDuplicatePeerID struct { } func (e ErrSwitchDuplicatePeerID) Error() string { - return fmt.Sprintf("Duplicate peer ID %v", e.ID) + return fmt.Sprintf("duplicate peer ID %v", e.ID) } // ErrSwitchDuplicatePeerIP to be raised whena a peer is connecting with a known @@ -113,7 +113,7 @@ type ErrSwitchDuplicatePeerIP struct { } func (e ErrSwitchDuplicatePeerIP) Error() string { - return fmt.Sprintf("Duplicate peer IP %v", e.IP.String()) + return fmt.Sprintf("duplicate peer IP %v", e.IP.String()) } // ErrSwitchConnectToSelf to be raised when trying to connect to itself. @@ -122,7 +122,7 @@ type ErrSwitchConnectToSelf struct { } func (e ErrSwitchConnectToSelf) Error() string { - return fmt.Sprintf("Connect to self: %v", e.Addr) + return fmt.Sprintf("connect to self: %v", e.Addr) } type ErrSwitchAuthenticationFailure struct { @@ -132,7 +132,7 @@ type ErrSwitchAuthenticationFailure struct { func (e ErrSwitchAuthenticationFailure) Error() string { return fmt.Sprintf( - "Failed to authenticate peer. Dialed %v, but got peer with ID %s", + "failed to authenticate peer. Dialed %v, but got peer with ID %s", e.Dialed, e.Got, ) @@ -152,7 +152,7 @@ type ErrNetAddressNoID struct { } func (e ErrNetAddressNoID) Error() string { - return fmt.Sprintf("Address (%s) does not contain ID", e.Addr) + return fmt.Sprintf("address (%s) does not contain ID", e.Addr) } type ErrNetAddressInvalid struct { @@ -161,7 +161,7 @@ type ErrNetAddressInvalid struct { } func (e ErrNetAddressInvalid) Error() string { - return fmt.Sprintf("Invalid address (%s): %v", e.Addr, e.Err) + return fmt.Sprintf("invalid address (%s): %v", e.Addr, e.Err) } type ErrNetAddressLookup struct { @@ -170,5 +170,15 @@ type ErrNetAddressLookup struct { } func (e ErrNetAddressLookup) Error() string { - return fmt.Sprintf("Error looking up host (%s): %v", e.Addr, e.Err) + return fmt.Sprintf("error looking up host (%s): %v", e.Addr, e.Err) +} + +// ErrCurrentlyDialingOrExistingAddress indicates that we're currently +// dialing this address or it belongs to an existing peer. +type ErrCurrentlyDialingOrExistingAddress struct { + Addr string +} + +func (e ErrCurrentlyDialingOrExistingAddress) Error() string { + return fmt.Sprintf("connection with %s has been established or dialed", e.Addr) } diff --git a/p2p/mock/peer.go b/p2p/mock/peer.go new file mode 100644 index 00000000000..bdcf012de28 --- /dev/null +++ b/p2p/mock/peer.go @@ -0,0 +1,68 @@ +package mock + +import ( + "net" + + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/p2p/conn" +) + +type Peer struct { + *cmn.BaseService + ip net.IP + id p2p.ID + addr *p2p.NetAddress + kv map[string]interface{} + Outbound, Persistent bool +} + +// NewPeer creates and starts a new mock peer. If the ip +// is nil, random routable address is used. +func NewPeer(ip net.IP) *Peer { + var netAddr *p2p.NetAddress + if ip == nil { + _, netAddr = p2p.CreateRoutableAddr() + } else { + netAddr = p2p.NewNetAddressIPPort(ip, 26656) + } + nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()} + netAddr.ID = nodeKey.ID() + mp := &Peer{ + ip: ip, + id: nodeKey.ID(), + addr: netAddr, + kv: make(map[string]interface{}), + } + mp.BaseService = cmn.NewBaseService(nil, "MockPeer", mp) + mp.Start() + return mp +} + +func (mp *Peer) FlushStop() { mp.Stop() } +func (mp *Peer) TrySend(chID byte, msgBytes []byte) bool { return true } +func (mp *Peer) Send(chID byte, msgBytes []byte) bool { return true } +func (mp *Peer) NodeInfo() p2p.NodeInfo { + return p2p.DefaultNodeInfo{ + ID_: mp.addr.ID, + ListenAddr: mp.addr.DialString(), + } +} +func (mp *Peer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } +func (mp *Peer) ID() p2p.ID { return mp.id } +func (mp *Peer) IsOutbound() bool { return mp.Outbound } +func (mp *Peer) IsPersistent() bool { return mp.Persistent } +func (mp *Peer) Get(key string) interface{} { + if value, ok := mp.kv[key]; ok { + return value + } + return nil +} +func (mp *Peer) Set(key string, value interface{}) { + mp.kv[key] = value +} +func (mp *Peer) RemoteIP() net.IP { return mp.ip } +func (mp *Peer) SocketAddr() *p2p.NetAddress { return mp.addr } +func (mp *Peer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } +func (mp *Peer) CloseConn() error { return nil } diff --git a/p2p/node_info.go b/p2p/node_info.go index 699fd7f1e39..8195471ed71 100644 --- a/p2p/node_info.go +++ b/p2p/node_info.go @@ -23,14 +23,13 @@ func MaxNodeInfoSize() int { // NodeInfo exposes basic info of a node // and determines if we're compatible. type NodeInfo interface { + ID() ID nodeInfoAddress nodeInfoTransport } -// nodeInfoAddress exposes just the core info of a node. type nodeInfoAddress interface { - ID() ID - NetAddress() *NetAddress + NetAddress() (*NetAddress, error) } // nodeInfoTransport validates a nodeInfo and checks @@ -215,20 +214,9 @@ OUTER_LOOP: // it includes the authenticated peer ID and the self-reported // ListenAddr. Note that the ListenAddr is not authenticated and // may not match that address actually dialed if its an outbound peer. -func (info DefaultNodeInfo) NetAddress() *NetAddress { +func (info DefaultNodeInfo) NetAddress() (*NetAddress, error) { idAddr := IDAddressString(info.ID(), info.ListenAddr) - netAddr, err := NewNetAddressString(idAddr) - if err != nil { - switch err.(type) { - case ErrNetAddressLookup: - // XXX If the peer provided a host name and the lookup fails here - // we're out of luck. - // TODO: use a NetAddress in DefaultNodeInfo - default: - panic(err) // everything should be well formed by now - } - } - return netAddr + return NewNetAddressString(idAddr) } //----------------------------------------------------------- diff --git a/p2p/peer.go b/p2p/peer.go index 73332a2aa8f..fab3b42d46c 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -29,7 +29,7 @@ type Peer interface { NodeInfo() NodeInfo // peer's info Status() tmconn.ConnectionStatus - OriginalAddr() *NetAddress // original address for outbound peers + SocketAddr() *NetAddress // actual address of the socket Send(byte, []byte) bool TrySend(byte, []byte) bool @@ -46,7 +46,7 @@ type peerConn struct { persistent bool conn net.Conn // source connection - originalAddr *NetAddress // nil for inbound connections + socketAddr *NetAddress // cached RemoteIP() ip net.IP @@ -55,14 +55,14 @@ type peerConn struct { func newPeerConn( outbound, persistent bool, conn net.Conn, - originalAddr *NetAddress, + socketAddr *NetAddress, ) peerConn { return peerConn{ - outbound: outbound, - persistent: persistent, - conn: conn, - originalAddr: originalAddr, + outbound: outbound, + persistent: persistent, + conn: conn, + socketAddr: socketAddr, } } @@ -223,13 +223,12 @@ func (p *peer) NodeInfo() NodeInfo { return p.nodeInfo } -// OriginalAddr returns the original address, which was used to connect with -// the peer. Returns nil for inbound peers. -func (p *peer) OriginalAddr() *NetAddress { - if p.peerConn.outbound { - return p.peerConn.originalAddr - } - return nil +// SocketAddr returns the address of the socket. +// For outbound peers, it's the address dialed (after DNS resolution). +// For inbound peers, it's the address returned by the underlying connection +// (not what's reported in the peer's NodeInfo). +func (p *peer) SocketAddr() *NetAddress { + return p.peerConn.socketAddr } // Status returns the peer's ConnectionStatus. diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index 1d2372fb087..4bacb07d0e1 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -29,7 +29,7 @@ func (mp *mockPeer) IsPersistent() bool { return true } func (mp *mockPeer) Get(s string) interface{} { return s } func (mp *mockPeer) Set(string, interface{}) {} func (mp *mockPeer) RemoteIP() net.IP { return mp.ip } -func (mp *mockPeer) OriginalAddr() *NetAddress { return nil } +func (mp *mockPeer) SocketAddr() *NetAddress { return nil } func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } func (mp *mockPeer) CloseConn() error { return nil } diff --git a/p2p/peer_test.go b/p2p/peer_test.go index 90be311317c..bf61beb4fa6 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -109,25 +109,27 @@ func testOutboundPeerConn( persistent bool, ourNodePrivKey crypto.PrivKey, ) (peerConn, error) { + + var pc peerConn conn, err := testDial(addr, config) if err != nil { - return peerConn{}, cmn.ErrorWrap(err, "Error creating peer") + return pc, cmn.ErrorWrap(err, "Error creating peer") } - pc, err := testPeerConn(conn, config, true, persistent, ourNodePrivKey, addr) + pc, err = testPeerConn(conn, config, true, persistent, ourNodePrivKey, addr) if err != nil { if cerr := conn.Close(); cerr != nil { - return peerConn{}, cmn.ErrorWrap(err, cerr.Error()) + return pc, cmn.ErrorWrap(err, cerr.Error()) } - return peerConn{}, err + return pc, err } // ensure dialed ID matches connection ID if addr.ID != pc.ID() { if cerr := conn.Close(); cerr != nil { - return peerConn{}, cmn.ErrorWrap(err, cerr.Error()) + return pc, cmn.ErrorWrap(err, cerr.Error()) } - return peerConn{}, ErrSwitchAuthenticationFailure{addr, pc.ID()} + return pc, ErrSwitchAuthenticationFailure{addr, pc.ID()} } return pc, nil diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go index 3cda9ac7473..85dd05248ff 100644 --- a/p2p/pex/addrbook.go +++ b/p2p/pex/addrbook.go @@ -9,6 +9,7 @@ import ( "encoding/binary" "fmt" "math" + "math/rand" "net" "sync" "time" @@ -54,7 +55,7 @@ type AddrBook interface { PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress // Mark address - MarkGood(*p2p.NetAddress) + MarkGood(p2p.ID) MarkAttempt(*p2p.NetAddress) MarkBad(*p2p.NetAddress) @@ -65,8 +66,7 @@ type AddrBook interface { // Send a selection of addresses with bias GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress - // TODO: remove - ListOfKnownAddresses() []*knownAddress + Size() int // Persist to disk Save() @@ -253,7 +253,7 @@ func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress { bookSize := a.size() if bookSize <= 0 { if bookSize < 0 { - a.Logger.Error("Addrbook size less than 0", "nNew", a.nNew, "nOld", a.nOld) + panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) } return nil } @@ -296,11 +296,11 @@ func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress { // MarkGood implements AddrBook - it marks the peer as good and // moves it into an "old" bucket. -func (a *addrBook) MarkGood(addr *p2p.NetAddress) { +func (a *addrBook) MarkGood(id p2p.ID) { a.mtx.Lock() defer a.mtx.Unlock() - ka := a.addrLookup[addr.ID] + ka := a.addrLookup[id] if ka == nil { return } @@ -338,7 +338,7 @@ func (a *addrBook) GetSelection() []*p2p.NetAddress { bookSize := a.size() if bookSize <= 0 { if bookSize < 0 { - a.Logger.Error("Addrbook size less than 0", "nNew", a.nNew, "nOld", a.nOld) + panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) } return nil } @@ -388,7 +388,7 @@ func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddre bookSize := a.size() if bookSize <= 0 { if bookSize < 0 { - a.Logger.Error("Addrbook size less than 0", "nNew", a.nNew, "nOld", a.nOld) + panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) } return nil } @@ -405,104 +405,14 @@ func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddre bookSize*getSelectionPercent/100) numAddresses = cmn.MinInt(maxGetSelection, numAddresses) - selection := make([]*p2p.NetAddress, numAddresses) - - oldBucketToAddrsMap := make(map[int]map[string]struct{}) - var oldIndex int - newBucketToAddrsMap := make(map[int]map[string]struct{}) - var newIndex int - - // initialize counters used to count old and new added addresses. - // len(oldBucketToAddrsMap) cannot be used as multiple addresses can endup in the same bucket. - var oldAddressesAdded int - var newAddressesAdded int - // number of new addresses that, if possible, should be in the beginning of the selection - numRequiredNewAdd := percentageOfNum(biasTowardsNewAddrs, numAddresses) - - selectionIndex := 0 -ADDRS_LOOP: - for selectionIndex < numAddresses { - // biasedTowardsOldAddrs indicates if the selection can switch to old addresses - biasedTowardsOldAddrs := selectionIndex >= numRequiredNewAdd - // An old addresses is selected if: - // - the bias is for old and old addressees are still available or, - // - there are no new addresses or all new addresses have been selected. - // numAddresses <= a.nOld + a.nNew therefore it is guaranteed that there are enough - // addresses to fill the selection - pickFromOldBucket := - (biasedTowardsOldAddrs && oldAddressesAdded < a.nOld) || - a.nNew == 0 || newAddressesAdded >= a.nNew - - bucket := make(map[string]*knownAddress) - - // loop until we pick a random non-empty bucket - for len(bucket) == 0 { - if pickFromOldBucket { - oldIndex = a.rand.Intn(len(a.bucketsOld)) - bucket = a.bucketsOld[oldIndex] - } else { - newIndex = a.rand.Intn(len(a.bucketsNew)) - bucket = a.bucketsNew[newIndex] - } - } - - // pick a random index - randIndex := a.rand.Intn(len(bucket)) - - // loop over the map to return that index - var selectedAddr *p2p.NetAddress - for _, ka := range bucket { - if randIndex == 0 { - selectedAddr = ka.Addr - break - } - randIndex-- - } - - // if we have selected the address before, restart the loop - // otherwise, record it and continue - if pickFromOldBucket { - if addrsMap, ok := oldBucketToAddrsMap[oldIndex]; ok { - if _, ok = addrsMap[selectedAddr.String()]; ok { - continue ADDRS_LOOP - } - } else { - oldBucketToAddrsMap[oldIndex] = make(map[string]struct{}) - } - oldBucketToAddrsMap[oldIndex][selectedAddr.String()] = struct{}{} - oldAddressesAdded++ - } else { - if addrsMap, ok := newBucketToAddrsMap[newIndex]; ok { - if _, ok = addrsMap[selectedAddr.String()]; ok { - continue ADDRS_LOOP - } - } else { - newBucketToAddrsMap[newIndex] = make(map[string]struct{}) - } - newBucketToAddrsMap[newIndex][selectedAddr.String()] = struct{}{} - newAddressesAdded++ - } - - selection[selectionIndex] = selectedAddr - selectionIndex++ - } - + // if there are no enough old addrs, will choose new addr instead. + numRequiredNewAdd := cmn.MaxInt(percentageOfNum(biasTowardsNewAddrs, numAddresses), numAddresses-a.nOld) + selection := a.randomPickAddresses(bucketTypeNew, numRequiredNewAdd) + selection = append(selection, a.randomPickAddresses(bucketTypeOld, numAddresses-len(selection))...) return selection } -// ListOfKnownAddresses returns the new and old addresses. -func (a *addrBook) ListOfKnownAddresses() []*knownAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - addrs := []*knownAddress{} - for _, addr := range a.addrLookup { - addrs = append(addrs, addr.copy()) - } - return addrs -} - //------------------------------------------------ // Size returns the number of addresses in the book. @@ -550,8 +460,7 @@ func (a *addrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAd case bucketTypeOld: return a.bucketsOld[bucketIdx] default: - cmn.PanicSanity("Should not happen") - return nil + panic("Invalid bucket type") } } @@ -677,16 +586,16 @@ func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { return ErrAddrBookNilAddr{addr, src} } - if a.routabilityStrict && !addr.Routable() { - return ErrAddrBookNonRoutable{addr} + if !addr.HasID() { + return ErrAddrBookInvalidAddrNoID{addr} } - if !addr.Valid() { - return ErrAddrBookInvalidAddr{addr} + if _, ok := a.privateIDs[addr.ID]; ok { + return ErrAddrBookPrivate{addr} } - if !addr.HasID() { - return ErrAddrBookInvalidAddrNoID{addr} + if _, ok := a.privateIDs[src.ID]; ok { + return ErrAddrBookPrivateSrc{src} } // TODO: we should track ourAddrs by ID and by IP:PORT and refuse both. @@ -694,12 +603,12 @@ func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { return ErrAddrBookSelf{addr} } - if _, ok := a.privateIDs[addr.ID]; ok { - return ErrAddrBookPrivate{addr} + if a.routabilityStrict && !addr.Routable() { + return ErrAddrBookNonRoutable{addr} } - if _, ok := a.privateIDs[src.ID]; ok { - return ErrAddrBookPrivateSrc{src} + if !addr.Valid() { + return ErrAddrBookInvalidAddr{addr} } ka := a.addrLookup[addr.ID] @@ -726,6 +635,44 @@ func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { return nil } +func (a *addrBook) randomPickAddresses(bucketType byte, num int) []*p2p.NetAddress { + var buckets []map[string]*knownAddress + switch bucketType { + case bucketTypeNew: + buckets = a.bucketsNew + case bucketTypeOld: + buckets = a.bucketsOld + default: + panic("unexpected bucketType") + } + total := 0 + for _, bucket := range buckets { + total = total + len(bucket) + } + addresses := make([]*knownAddress, 0, total) + for _, bucket := range buckets { + for _, ka := range bucket { + addresses = append(addresses, ka) + } + } + selection := make([]*p2p.NetAddress, 0, num) + chosenSet := make(map[string]bool, num) + rand.Shuffle(total, func(i, j int) { + addresses[i], addresses[j] = addresses[j], addresses[i] + }) + for _, addr := range addresses { + if chosenSet[addr.Addr.String()] { + continue + } + chosenSet[addr.Addr.String()] = true + selection = append(selection, addr.Addr) + if len(selection) >= num { + return selection + } + } + return selection +} + // Make space in the new buckets by expiring the really bad entries. // If no bad entries are available we remove the oldest. func (a *addrBook) expireNew(bucketIdx int) { diff --git a/p2p/pex/addrbook_test.go b/p2p/pex/addrbook_test.go index 9effa5d0e90..13bac28c866 100644 --- a/p2p/pex/addrbook_test.go +++ b/p2p/pex/addrbook_test.go @@ -41,7 +41,7 @@ func TestAddrBookPickAddress(t *testing.T) { assert.NotNil(t, addr, "expected an address") // pick an address when we only have old address - book.MarkGood(addrSrc.addr) + book.MarkGood(addrSrc.addr.ID) addr = book.PickAddress(0) assert.NotNil(t, addr, "expected an address") addr = book.PickAddress(50) @@ -126,7 +126,7 @@ func TestAddrBookPromoteToOld(t *testing.T) { // Promote half of them for i, addrSrc := range randAddrs { if i%2 == 0 { - book.MarkGood(addrSrc.addr) + book.MarkGood(addrSrc.addr.ID) } } @@ -330,7 +330,7 @@ func TestAddrBookGetSelectionWithBias(t *testing.T) { randAddrsLen := len(randAddrs) for i, addrSrc := range randAddrs { if int((float64(i)/float64(randAddrsLen))*100) >= 20 { - book.MarkGood(addrSrc.addr) + book.MarkGood(addrSrc.addr.ID) } } @@ -435,12 +435,12 @@ func TestPrivatePeers(t *testing.T) { func testAddrBookAddressSelection(t *testing.T, bookSize int) { // generate all combinations of old (m) and new addresses - for nOld := 0; nOld <= bookSize; nOld++ { - nNew := bookSize - nOld - dbgStr := fmt.Sprintf("book of size %d (new %d, old %d)", bookSize, nNew, nOld) + for nBookOld := 0; nBookOld <= bookSize; nBookOld++ { + nBookNew := bookSize - nBookOld + dbgStr := fmt.Sprintf("book of size %d (new %d, old %d)", bookSize, nBookNew, nBookOld) // create book and get selection - book, fname := createAddrBookWithMOldAndNNewAddrs(t, nOld, nNew) + book, fname := createAddrBookWithMOldAndNNewAddrs(t, nBookOld, nBookNew) defer deleteTempFile(fname) addrs := book.GetSelectionWithBias(biasToSelectNewPeers) assert.NotNil(t, addrs, "%s - expected a non-nil selection", dbgStr) @@ -460,27 +460,25 @@ func testAddrBookAddressSelection(t *testing.T, bookSize int) { // Given: // n - num new addrs, m - num old addrs // k - num new addrs expected in the beginning (based on bias %) - // i=min(n, k), aka expFirstNew + // i=min(n, max(k,r-m)), aka expNew // j=min(m, r-i), aka expOld // // We expect this layout: - // indices: 0...i-1 i...i+j-1 i+j...r - // addresses: N0..Ni-1 O0..Oj-1 Ni... + // indices: 0...i-1 i...i+j-1 + // addresses: N0..Ni-1 O0..Oj-1 // // There is at least one partition and at most three. var ( - k = percentageOfNum(biasToSelectNewPeers, nAddrs) - expFirstNew = cmn.MinInt(nNew, k) - expOld = cmn.MinInt(nOld, nAddrs-expFirstNew) - expNew = nAddrs - expOld - expLastNew = expNew - expFirstNew + k = percentageOfNum(biasToSelectNewPeers, nAddrs) + expNew = cmn.MinInt(nNew, cmn.MaxInt(k, nAddrs-nBookOld)) + expOld = cmn.MinInt(nOld, nAddrs-expNew) ) // Verify that the number of old and new addresses are as expected - if nNew < expNew || nNew > expNew { + if nNew != expNew { t.Fatalf("%s - expected new addrs %d, got %d", dbgStr, expNew, nNew) } - if nOld < expOld || nOld > expOld { + if nOld != expOld { t.Fatalf("%s - expected old addrs %d, got %d", dbgStr, expOld, nOld) } @@ -499,15 +497,12 @@ func testAddrBookAddressSelection(t *testing.T, bookSize int) { case expOld == 0: // all new addresses expSeqLens = []int{nAddrs} expSeqTypes = []int{1} - case expFirstNew == 0: // all old addresses + case expNew == 0: // all old addresses expSeqLens = []int{nAddrs} expSeqTypes = []int{2} - case nAddrs-expFirstNew-expOld == 0: // new addresses, old addresses - expSeqLens = []int{expFirstNew, expOld} + case nAddrs-expNew-expOld == 0: // new addresses, old addresses + expSeqLens = []int{expNew, expOld} expSeqTypes = []int{1, 2} - default: // new addresses, old addresses, new addresses - expSeqLens = []int{expFirstNew, expOld, expLastNew} - expSeqTypes = []int{1, 2, 1} } assert.Equal(t, expSeqLens, seqLens, @@ -574,7 +569,7 @@ func createAddrBookWithMOldAndNNewAddrs(t *testing.T, nOld, nNew int) (book *add randAddrs := randNetAddressPairs(t, nOld) for _, addr := range randAddrs { book.AddAddress(addr.addr, addr.src) - book.MarkGood(addr.addr) + book.MarkGood(addr.addr.ID) } randAddrs = randNetAddressPairs(t, nNew) diff --git a/p2p/pex/errors.go b/p2p/pex/errors.go index 1f44ceee7e0..543056af53f 100644 --- a/p2p/pex/errors.go +++ b/p2p/pex/errors.go @@ -30,6 +30,10 @@ func (err ErrAddrBookPrivate) Error() string { return fmt.Sprintf("Cannot add private peer with address %v", err.Addr) } +func (err ErrAddrBookPrivate) PrivateAddr() bool { + return true +} + type ErrAddrBookPrivateSrc struct { Src *p2p.NetAddress } @@ -38,6 +42,10 @@ func (err ErrAddrBookPrivateSrc) Error() string { return fmt.Sprintf("Cannot add peer coming from private peer with address %v", err.Src) } +func (err ErrAddrBookPrivateSrc) PrivateAddr() bool { + return true +} + type ErrAddrBookNilAddr struct { Addr *p2p.NetAddress Src *p2p.NetAddress diff --git a/p2p/pex/file.go b/p2p/pex/file.go index 33fec033695..d4a516850e0 100644 --- a/p2p/pex/file.go +++ b/p2p/pex/file.go @@ -16,16 +16,15 @@ type addrBookJSON struct { } func (a *addrBook) saveToFile(filePath string) { - a.Logger.Info("Saving AddrBook to file", "size", a.Size()) - a.mtx.Lock() defer a.mtx.Unlock() - // Compile Addrs - addrs := []*knownAddress{} + + a.Logger.Info("Saving AddrBook to file", "size", a.size()) + + addrs := make([]*knownAddress, 0, len(a.addrLookup)) for _, ka := range a.addrLookup { addrs = append(addrs, ka) } - aJSON := &addrBookJSON{ Key: a.key, Addrs: addrs, diff --git a/p2p/pex/known_address.go b/p2p/pex/known_address.go index 5673dec1197..acde385bcfb 100644 --- a/p2p/pex/known_address.go +++ b/p2p/pex/known_address.go @@ -33,18 +33,6 @@ func (ka *knownAddress) ID() p2p.ID { return ka.Addr.ID } -func (ka *knownAddress) copy() *knownAddress { - return &knownAddress{ - Addr: ka.Addr, - Src: ka.Src, - Attempts: ka.Attempts, - LastAttempt: ka.LastAttempt, - LastSuccess: ka.LastSuccess, - BucketType: ka.BucketType, - Buckets: ka.Buckets, - } -} - func (ka *knownAddress) isOld() bool { return ka.BucketType == bucketTypeOld } diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index 01d1d8db588..c24ee983cfc 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -3,7 +3,6 @@ package pex import ( "fmt" "reflect" - "sort" "sync" "time" @@ -35,16 +34,11 @@ const ( // Seed/Crawler constants - // We want seeds to only advertise good peers. Therefore they should wait at - // least as long as we expect it to take for a peer to become good before - // disconnecting. - // see consensus/reactor.go: blocksToContributeToBecomeGoodPeer - // 10000 blocks assuming 1s blocks ~ 2.7 hours. - defaultSeedDisconnectWaitPeriod = 3 * time.Hour + // minTimeBetweenCrawls is a minimum time between attempts to crawl a peer. + minTimeBetweenCrawls = 2 * time.Minute - defaultCrawlPeerInterval = 2 * time.Minute // don't redial for this. TODO: back-off. what for? - - defaultCrawlPeersPeriod = 30 * time.Second // check some peers every this + // check some peers every this + crawlPeerPeriod = 30 * time.Second maxAttemptsToDial = 16 // ~ 35h in total (last attempt - 18h) @@ -77,6 +71,9 @@ type PEXReactor struct { seedAddrs []*p2p.NetAddress attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)} + + // seed/crawled mode fields + crawlPeerInfos map[p2p.ID]crawlPeerInfo } func (r *PEXReactor) minReceiveRequestInterval() time.Duration { @@ -90,6 +87,11 @@ type PEXReactorConfig struct { // Seed/Crawler mode SeedMode bool + // We want seeds to only advertise good peers. Therefore they should wait at + // least as long as we expect it to take for a peer to become good before + // disconnecting. + SeedDisconnectWaitPeriod time.Duration + // Seeds is a list of addresses reactor may use // if it can't connect to peers in the addrbook. Seeds []string @@ -108,6 +110,7 @@ func NewPEXReactor(b AddrBook, config *PEXReactorConfig) *PEXReactor { ensurePeersPeriod: defaultEnsurePeersPeriod, requestsSent: cmn.NewCMap(), lastReceivedRequests: cmn.NewCMap(), + crawlPeerInfos: make(map[p2p.ID]crawlPeerInfo), } r.BaseReactor = *p2p.NewBaseReactor("PEXReactor", r) return r @@ -167,12 +170,18 @@ func (r *PEXReactor) AddPeer(p Peer) { } } else { // inbound peer is its own source - addr := p.NodeInfo().NetAddress() + addr, err := p.NodeInfo().NetAddress() + if err != nil { + r.Logger.Error("Failed to get peer NetAddress", "err", err, "peer", p) + return + } + + // Make it explicit that addr and src are the same for an inbound peer. src := addr // add to book. dont RequestAddrs right away because // we don't trust inbound as much - let ensurePeersRoutine handle it. - err := r.book.AddAddress(addr, src) + err = r.book.AddAddress(addr, src) r.logErrAddrBook(err) } } @@ -309,7 +318,10 @@ func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { } r.requestsSent.Delete(id) - srcAddr := src.NodeInfo().NetAddress() + srcAddr, err := src.NodeInfo().NetAddress() + if err != nil { + return err + } for _, netAddr := range addrs { // Validate netAddr. Disconnect from a peer if it sends us invalid data. if netAddr == nil { @@ -363,9 +375,9 @@ func (r *PEXReactor) ensurePeersRoutine() { ) // Randomize first round of communication to avoid thundering herd. - // If no potential peers are present directly start connecting so we guarantee - // swift setup with the help of configured seeds. - if r.hasPotentialPeers() { + // If no peers are present directly start connecting so we guarantee swift + // setup with the help of configured seeds. + if r.nodeHasSomePeersOrDialingAny() { time.Sleep(time.Duration(jitter)) } @@ -493,23 +505,26 @@ func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) { err := r.Switch.DialPeerWithAddress(addr, false) if err != nil { + if _, ok := err.(p2p.ErrCurrentlyDialingOrExistingAddress); ok { + return + } + r.Logger.Error("Dialing failed", "addr", addr, "err", err, "attempts", attempts) - // TODO: detect more "bad peer" scenarios + markAddrInBookBasedOnErr(addr, r.book, err) if _, ok := err.(p2p.ErrSwitchAuthenticationFailure); ok { - r.book.MarkBad(addr) r.attemptsToDial.Delete(addr.DialString()) } else { - r.book.MarkAttempt(addr) // FIXME: if the addr is going to be removed from the addrbook (hard to // tell at this point), we need to Delete it from attemptsToDial, not // record another attempt. // record attempt r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()}) } - } else { - // cleanup any history - r.attemptsToDial.Delete(addr.DialString()) + return } + + // cleanup any history + r.attemptsToDial.Delete(addr.DialString()) } // checkSeeds checks that addresses are well formed. @@ -568,101 +583,92 @@ func (r *PEXReactor) AttemptsToDial(addr *p2p.NetAddress) int { // from peers, except other seed nodes. func (r *PEXReactor) crawlPeersRoutine() { // Do an initial crawl - r.crawlPeers() + r.crawlPeers(r.book.GetSelection()) // Fire periodically - ticker := time.NewTicker(defaultCrawlPeersPeriod) + ticker := time.NewTicker(crawlPeerPeriod) for { select { case <-ticker.C: r.attemptDisconnects() - r.crawlPeers() + r.crawlPeers(r.book.GetSelection()) + r.cleanupCrawlPeerInfos() case <-r.Quit(): return } } } -// hasPotentialPeers indicates if there is a potential peer to connect to, by -// consulting the Switch as well as the AddrBook. -func (r *PEXReactor) hasPotentialPeers() bool { +// nodeHasSomePeersOrDialingAny returns true if the node is connected to some +// peers or dialing them currently. +func (r *PEXReactor) nodeHasSomePeersOrDialingAny() bool { out, in, dial := r.Switch.NumPeers() - - return out+in+dial > 0 && len(r.book.ListOfKnownAddresses()) > 0 + return out+in+dial > 0 } -// crawlPeerInfo handles temporary data needed for the -// network crawling performed during seed/crawler mode. +// crawlPeerInfo handles temporary data needed for the network crawling +// performed during seed/crawler mode. type crawlPeerInfo struct { - // The listening address of a potential peer we learned about - Addr *p2p.NetAddress - - // The last time we attempt to reach this address - LastAttempt time.Time - - // The last time we successfully reached this address - LastSuccess time.Time + Addr *p2p.NetAddress `json:"addr"` + // The last time we crawled the peer or attempted to do so. + LastCrawled time.Time `json:"last_crawled"` } -// oldestFirst implements sort.Interface for []crawlPeerInfo -// based on the LastAttempt field. -type oldestFirst []crawlPeerInfo - -func (of oldestFirst) Len() int { return len(of) } -func (of oldestFirst) Swap(i, j int) { of[i], of[j] = of[j], of[i] } -func (of oldestFirst) Less(i, j int) bool { return of[i].LastAttempt.Before(of[j].LastAttempt) } +// crawlPeers will crawl the network looking for new peer addresses. +func (r *PEXReactor) crawlPeers(addrs []*p2p.NetAddress) { + now := time.Now() -// getPeersToCrawl returns addresses of potential peers that we wish to validate. -// NOTE: The status information is ordered as described above. -func (r *PEXReactor) getPeersToCrawl() []crawlPeerInfo { - // TODO: be more selective - addrs := r.book.ListOfKnownAddresses() - of := make(oldestFirst, 0, len(addrs)) for _, addr := range addrs { - if len(addr.ID()) == 0 { - continue // dont use peers without id - } - - of = append(of, crawlPeerInfo{ - Addr: addr.Addr, - LastAttempt: addr.LastAttempt, - LastSuccess: addr.LastSuccess, - }) - } - sort.Sort(of) - return of -} - -// crawlPeers will crawl the network looking for new peer addresses. (once) -func (r *PEXReactor) crawlPeers() { - peerInfos := r.getPeersToCrawl() + peerInfo, ok := r.crawlPeerInfos[addr.ID] - now := time.Now() - // Use addresses we know of to reach additional peers - for _, pi := range peerInfos { - // Do not attempt to connect with peers we recently dialed - if now.Sub(pi.LastAttempt) < defaultCrawlPeerInterval { + // Do not attempt to connect with peers we recently crawled. + if ok && now.Sub(peerInfo.LastCrawled) < minTimeBetweenCrawls { continue } - // Otherwise, attempt to connect with the known address - err := r.Switch.DialPeerWithAddress(pi.Addr, false) + + // Record crawling attempt. + r.crawlPeerInfos[addr.ID] = crawlPeerInfo{ + Addr: addr, + LastCrawled: now, + } + + err := r.Switch.DialPeerWithAddress(addr, false) if err != nil { - r.book.MarkAttempt(pi.Addr) + if _, ok := err.(p2p.ErrCurrentlyDialingOrExistingAddress); ok { + continue + } + + r.Logger.Error("Dialing failed", "addr", addr, "err", err) + markAddrInBookBasedOnErr(addr, r.book, err) continue } - // Ask for more addresses - peer := r.Switch.Peers().Get(pi.Addr.ID) + + peer := r.Switch.Peers().Get(addr.ID) if peer != nil { r.RequestAddrs(peer) } } } +func (r *PEXReactor) cleanupCrawlPeerInfos() { + for id, info := range r.crawlPeerInfos { + // If we did not crawl a peer for 24 hours, it means the peer was removed + // from the addrbook => remove + // + // 10000 addresses / maxGetSelection = 40 cycles to get all addresses in + // the ideal case, + // 40 * crawlPeerPeriod ~ 20 minutes + if time.Since(info.LastCrawled) > 24*time.Hour { + delete(r.crawlPeerInfos, id) + } + } +} + // attemptDisconnects checks if we've been with each peer long enough to disconnect func (r *PEXReactor) attemptDisconnects() { for _, peer := range r.Switch.Peers().List() { - if peer.Status().Duration < defaultSeedDisconnectWaitPeriod { + if peer.Status().Duration < r.config.SeedDisconnectWaitPeriod { continue } if peer.IsPersistent() { @@ -672,6 +678,16 @@ func (r *PEXReactor) attemptDisconnects() { } } +func markAddrInBookBasedOnErr(addr *p2p.NetAddress, book AddrBook, err error) { + // TODO: detect more "bad peer" scenarios + switch err.(type) { + case p2p.ErrSwitchAuthenticationFailure: + book.MarkBad(addr) + default: + book.MarkAttempt(addr) + } +} + //----------------------------------------------------------------------------- // Messages diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index 4f4ccb03948..077f07a60f5 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -3,7 +3,6 @@ package pex import ( "fmt" "io/ioutil" - "net" "os" "path/filepath" "testing" @@ -12,14 +11,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/p2p/conn" + "github.com/tendermint/tendermint/p2p/mock" ) var ( @@ -101,7 +96,7 @@ func TestPEXReactorRunning(t *testing.T) { } addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) { - addr := switches[otherSwitchIndex].NodeInfo().NetAddress() + addr := switches[otherSwitchIndex].NetAddress() books[switchIndex].AddAddress(addr, addr) } @@ -132,7 +127,7 @@ func TestPEXReactorReceive(t *testing.T) { r.RequestAddrs(peer) size := book.Size() - addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} + addrs := []*p2p.NetAddress{peer.SocketAddr()} msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs}) r.Receive(PexChannel, peer, msg) assert.Equal(t, size+1, book.Size()) @@ -148,7 +143,7 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) { sw := createSwitchAndAddReactors(r) sw.SetAddrBook(book) - peer := newMockPeer() + peer := mock.NewPeer(nil) p2p.AddPeerToSwitch(sw, peer) assert.True(t, sw.Peers().Has(peer.ID())) @@ -178,7 +173,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) { sw := createSwitchAndAddReactors(r) sw.SetAddrBook(book) - peer := newMockPeer() + peer := mock.NewPeer(nil) p2p.AddPeerToSwitch(sw, peer) assert.True(t, sw.Peers().Has(peer.ID())) @@ -189,7 +184,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) { assert.True(t, r.requestsSent.Has(id)) assert.True(t, sw.Peers().Has(peer.ID())) - addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} + addrs := []*p2p.NetAddress{peer.SocketAddr()} msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs}) // receive some addrs. should clear the request @@ -209,36 +204,36 @@ func TestCheckSeeds(t *testing.T) { defer os.RemoveAll(dir) // nolint: errcheck // 1. test creating peer with no seeds works - peer := testCreateDefaultPeer(dir, 0) - require.Nil(t, peer.Start()) - peer.Stop() + peerSwitch := testCreateDefaultPeer(dir, 0) + require.Nil(t, peerSwitch.Start()) + peerSwitch.Stop() // 2. create seed seed := testCreateSeed(dir, 1, []*p2p.NetAddress{}, []*p2p.NetAddress{}) // 3. test create peer with online seed works - peer = testCreatePeerWithSeed(dir, 2, seed) - require.Nil(t, peer.Start()) - peer.Stop() + peerSwitch = testCreatePeerWithSeed(dir, 2, seed) + require.Nil(t, peerSwitch.Start()) + peerSwitch.Stop() // 4. test create peer with all seeds having unresolvable DNS fails badPeerConfig := &PEXReactorConfig{ Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657"}, } - peer = testCreatePeerWithConfig(dir, 2, badPeerConfig) - require.Error(t, peer.Start()) - peer.Stop() + peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) + require.Error(t, peerSwitch.Start()) + peerSwitch.Stop() // 5. test create peer with one good seed address succeeds badPeerConfig = &PEXReactorConfig{ Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657", - seed.NodeInfo().NetAddress().String()}, + seed.NetAddress().String()}, } - peer = testCreatePeerWithConfig(dir, 2, badPeerConfig) - require.Nil(t, peer.Start()) - peer.Stop() + peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) + require.Nil(t, peerSwitch.Start()) + peerSwitch.Stop() } func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { @@ -268,12 +263,13 @@ func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) { defer os.RemoveAll(dir) // nolint: errcheck // 1. create peer - peer := testCreateDefaultPeer(dir, 1) - require.Nil(t, peer.Start()) - defer peer.Stop() + peerSwitch := testCreateDefaultPeer(dir, 1) + require.Nil(t, peerSwitch.Start()) + defer peerSwitch.Stop() // 2. Create seed which knows about the peer - seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peer.NodeInfo().NetAddress()}, []*p2p.NetAddress{peer.NodeInfo().NetAddress()}) + peerAddr := peerSwitch.NetAddress() + seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peerAddr}, []*p2p.NetAddress{peerAddr}) require.Nil(t, seed.Start()) defer seed.Stop() @@ -289,31 +285,41 @@ func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) { assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 1*time.Second, 2) } -func TestPEXReactorCrawlStatus(t *testing.T) { - pexR, book := createReactor(&PEXReactorConfig{SeedMode: true}) +func TestPEXReactorSeedMode(t *testing.T) { + // directory to store address books + dir, err := ioutil.TempDir("", "pex_reactor") + require.Nil(t, err) + defer os.RemoveAll(dir) // nolint: errcheck + + pexR, book := createReactor(&PEXReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond}) defer teardownReactor(book) - // Seed/Crawler mode uses data from the Switch sw := createSwitchAndAddReactors(pexR) sw.SetAddrBook(book) + err = sw.Start() + require.NoError(t, err) + defer sw.Stop() - // Create a peer, add it to the peer set and the addrbook. - peer := p2p.CreateRandomPeer(false) - p2p.AddPeerToSwitch(pexR.Switch, peer) - addr1 := peer.NodeInfo().NetAddress() - pexR.book.AddAddress(addr1, addr1) + assert.Zero(t, sw.Peers().Size()) + + peerSwitch := testCreateDefaultPeer(dir, 1) + require.NoError(t, peerSwitch.Start()) + defer peerSwitch.Stop() - // Add a non-connected address to the book. - _, addr2 := p2p.CreateRoutableAddr() - pexR.book.AddAddress(addr2, addr1) + // 1. Test crawlPeers dials the peer + pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) + assert.Equal(t, 1, sw.Peers().Size()) + assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID())) - // Get some peerInfos to crawl - peerInfos := pexR.getPeersToCrawl() + // 2. attemptDisconnects should not disconnect because of wait period + pexR.attemptDisconnects() + assert.Equal(t, 1, sw.Peers().Size()) - // Make sure it has the proper number of elements - assert.Equal(t, 2, len(peerInfos)) + time.Sleep(100 * time.Millisecond) - // TODO: test + // 3. attemptDisconnects should disconnect after wait period + pexR.attemptDisconnects() + assert.Equal(t, 0, sw.Peers().Size()) } // connect a peer to a seed, wait a bit, then stop it. @@ -364,7 +370,7 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) { reactor := switches[0].Reactors()["pex"].(*PEXReactor) peerID := switches[1].NodeInfo().ID() - err = switches[1].DialPeerWithAddress(switches[0].NodeInfo().NetAddress(), false) + err = switches[1].DialPeerWithAddress(switches[0].NetAddress(), false) assert.NoError(t, err) // sleep up to a second while waiting for the peer to send us a message. @@ -402,7 +408,7 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { pexR.RequestAddrs(peer) size := book.Size() - addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} + addrs := []*p2p.NetAddress{peer.SocketAddr()} msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs}) pexR.Receive(PexChannel, peer, msg) assert.Equal(t, size, book.Size()) @@ -418,8 +424,8 @@ func TestPEXReactorDialPeer(t *testing.T) { sw := createSwitchAndAddReactors(pexR) sw.SetAddrBook(book) - peer := newMockPeer() - addr := peer.NodeInfo().NetAddress() + peer := mock.NewPeer(nil) + addr := peer.SocketAddr() assert.Equal(t, 0, pexR.AttemptsToDial(addr)) @@ -444,44 +450,6 @@ func TestPEXReactorDialPeer(t *testing.T) { } } -type mockPeer struct { - *cmn.BaseService - pubKey crypto.PubKey - addr *p2p.NetAddress - outbound, persistent bool -} - -func newMockPeer() mockPeer { - _, netAddr := p2p.CreateRoutableAddr() - mp := mockPeer{ - addr: netAddr, - pubKey: ed25519.GenPrivKey().PubKey(), - } - mp.BaseService = cmn.NewBaseService(nil, "MockPeer", mp) - mp.Start() - return mp -} - -func (mp mockPeer) FlushStop() { mp.Stop() } -func (mp mockPeer) ID() p2p.ID { return mp.addr.ID } -func (mp mockPeer) IsOutbound() bool { return mp.outbound } -func (mp mockPeer) IsPersistent() bool { return mp.persistent } -func (mp mockPeer) NodeInfo() p2p.NodeInfo { - return p2p.DefaultNodeInfo{ - ID_: mp.addr.ID, - ListenAddr: mp.addr.DialString(), - } -} -func (mockPeer) RemoteIP() net.IP { return net.ParseIP("127.0.0.1") } -func (mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } -func (mockPeer) Send(byte, []byte) bool { return false } -func (mockPeer) TrySend(byte, []byte) bool { return false } -func (mockPeer) Set(string, interface{}) {} -func (mockPeer) Get(string) interface{} { return nil } -func (mockPeer) OriginalAddr() *p2p.NetAddress { return nil } -func (mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8800} } -func (mockPeer) CloseConn() error { return nil } - func assertPeersWithTimeout( t *testing.T, switches []*p2p.Switch, @@ -571,7 +539,7 @@ func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) book.SetLogger(log.TestingLogger()) for j := 0; j < len(knownAddrs); j++ { book.AddAddress(knownAddrs[j], srcAddrs[j]) - book.MarkGood(knownAddrs[j]) + book.MarkGood(knownAddrs[j].ID) } sw.SetAddrBook(book) @@ -590,7 +558,7 @@ func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) // Starting and stopping the peer is left to the caller func testCreatePeerWithSeed(dir string, id int, seed *p2p.Switch) *p2p.Switch { conf := &PEXReactorConfig{ - Seeds: []string{seed.NodeInfo().NetAddress().String()}, + Seeds: []string{seed.NetAddress().String()}, } return testCreatePeerWithConfig(dir, id, conf) } diff --git a/p2p/switch.go b/p2p/switch.go index a07f70ce976..afd7d9656d6 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -6,6 +6,8 @@ import ( "sync" "time" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/config" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p/conn" @@ -46,7 +48,7 @@ type AddrBook interface { AddAddress(addr *NetAddress, src *NetAddress) error AddOurAddress(*NetAddress) OurAddress(*NetAddress) bool - MarkGood(*NetAddress) + MarkGood(ID) RemoveAddress(*NetAddress) HasAddress(*NetAddress) bool Save() @@ -86,6 +88,12 @@ type Switch struct { metrics *Metrics } +// NetAddress returns the address the switch is listening on. +func (sw *Switch) NetAddress() *NetAddress { + addr := sw.transport.NetAddress() + return &addr +} + // SwitchOption sets an optional parameter on the Switch. type SwitchOption func(*Switch) @@ -234,21 +242,26 @@ func (sw *Switch) OnStop() { // // NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved. func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool { - successChan := make(chan bool, len(sw.peers.List())) sw.Logger.Debug("Broadcast", "channel", chID, "msgBytes", fmt.Sprintf("%X", msgBytes)) + + peers := sw.peers.List() var wg sync.WaitGroup - for _, peer := range sw.peers.List() { - wg.Add(1) - go func(peer Peer) { + wg.Add(len(peers)) + successChan := make(chan bool, len(peers)) + + for _, peer := range peers { + go func(p Peer) { defer wg.Done() - success := peer.Send(chID, msgBytes) + success := p.Send(chID, msgBytes) successChan <- success }(peer) } + go func() { wg.Wait() close(successChan) }() + return successChan } @@ -284,13 +297,7 @@ func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { sw.stopAndRemovePeer(peer, reason) if peer.IsPersistent() { - addr := peer.OriginalAddr() - if addr == nil { - // FIXME: persistent peers can't be inbound right now. - // self-reported address for inbound persistent peers - addr = peer.NodeInfo().NetAddress() - } - go sw.reconnectToPeer(addr) + go sw.reconnectToPeer(peer.SocketAddr()) } } @@ -334,14 +341,11 @@ func (sw *Switch) reconnectToPeer(addr *NetAddress) { return } - if sw.IsDialingOrExistingAddress(addr) { - sw.Logger.Debug("Peer connection has been established or dialed while we waiting next try", "addr", addr) - return - } - err := sw.DialPeerWithAddress(addr, true) if err == nil { return // success + } else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok { + return } sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr) @@ -360,9 +364,12 @@ func (sw *Switch) reconnectToPeer(addr *NetAddress) { // sleep an exponentially increasing amount sleepIntervalSeconds := math.Pow(reconnectBackOffBaseSeconds, float64(i)) sw.randomSleep(time.Duration(sleepIntervalSeconds) * time.Second) + err := sw.DialPeerWithAddress(addr, true) if err == nil { return // success + } else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok { + return } sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr) } @@ -378,13 +385,22 @@ func (sw *Switch) SetAddrBook(addrBook AddrBook) { // like contributed to consensus. func (sw *Switch) MarkPeerAsGood(peer Peer) { if sw.addrBook != nil { - sw.addrBook.MarkGood(peer.NodeInfo().NetAddress()) + sw.addrBook.MarkGood(peer.ID()) } } //--------------------------------------------------------------------- // Dialing +type privateAddr interface { + PrivateAddr() bool +} + +func isPrivateAddr(err error) bool { + te, ok := errors.Cause(err).(privateAddr) + return ok && te.PrivateAddr() +} + // DialPeersAsync dials a list of peers asynchronously in random order (optionally, making them persistent). // Used to dial peers from config on startup or from unsafe-RPC (trusted sources). // TODO: remove addrBook arg since it's now set on the switch @@ -395,7 +411,7 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b sw.Logger.Error("Error in peer's address", "err", err) } - ourAddr := sw.nodeInfo.NetAddress() + ourAddr := sw.NetAddress() // TODO: this code feels like it's in the wrong place. // The integration tests depend on the addrBook being saved @@ -407,7 +423,11 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b // do not add our address or ID if !netAddr.Same(ourAddr) { if err := addrBook.AddAddress(netAddr, ourAddr); err != nil { - sw.Logger.Error("Can't add peer's address to addrbook", "err", err) + if isPrivateAddr(err) { + sw.Logger.Debug("Won't add peer's address to addrbook", "err", err) + } else { + sw.Logger.Error("Can't add peer's address to addrbook", "err", err) + } } } } @@ -430,15 +450,10 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b sw.randomSleep(0) - if sw.IsDialingOrExistingAddress(addr) { - sw.Logger.Debug("Ignore attempt to connect to an existing peer", "addr", addr) - return - } - err := sw.DialPeerWithAddress(addr, persistent) if err != nil { switch err.(type) { - case ErrSwitchConnectToSelf, ErrSwitchDuplicatePeerID: + case ErrSwitchConnectToSelf, ErrSwitchDuplicatePeerID, ErrCurrentlyDialingOrExistingAddress: sw.Logger.Debug("Error dialing peer", "err", err) default: sw.Logger.Error("Error dialing peer", "err", err) @@ -449,11 +464,20 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b return nil } -// DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects and authenticates successfully. -// If `persistent == true`, the switch will always try to reconnect to this peer if the connection ever fails. +// DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects +// and authenticates successfully. +// If `persistent == true`, the switch will always try to reconnect to this +// peer if the connection ever fails. +// If we're currently dialing this address or it belongs to an existing peer, +// ErrCurrentlyDialingOrExistingAddress is returned. func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) error { + if sw.IsDialingOrExistingAddress(addr) { + return ErrCurrentlyDialingOrExistingAddress{addr.String()} + } + sw.dialing.Set(string(addr.ID), addr) defer sw.dialing.Delete(string(addr.ID)) + return sw.addOutboundPeerWithConfig(addr, sw.config, persistent) } @@ -531,7 +555,7 @@ func (sw *Switch) acceptRoutine() { if in >= sw.config.MaxNumInboundPeers { sw.Logger.Info( "Ignoring inbound connection: already have enough inbound peers", - "address", p.NodeInfo().NetAddress().String(), + "address", p.SocketAddr(), "have", in, "max", sw.config.MaxNumInboundPeers, ) @@ -648,7 +672,7 @@ func (sw *Switch) addPeer(p Peer) error { return err } - p.SetLogger(sw.Logger.With("peer", p.NodeInfo().NetAddress())) + p.SetLogger(sw.Logger.With("peer", p.SocketAddr())) // Handle the shut down case where the switch has stopped but we're // concurrently trying to add a peer. diff --git a/p2p/switch_test.go b/p2p/switch_test.go index d5dd178b671..bf105e0fa23 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -161,10 +161,6 @@ func assertMsgReceivedWithTimeout(t *testing.T, msgBytes []byte, channel byte, r func TestSwitchFiltersOutItself(t *testing.T) { s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc) - // addr := s1.NodeInfo().NetAddress() - - // // add ourselves like we do in node.go#427 - // s1.addrBook.AddOurAddress(addr) // simulate s1 having a public IP by creating a remote peer with the same ID rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg} @@ -498,7 +494,7 @@ func TestSwitchAcceptRoutine(t *testing.T) { rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} remotePeers = append(remotePeers, rp) rp.Start() - c, err := rp.Dial(sw.NodeInfo().NetAddress()) + c, err := rp.Dial(sw.NetAddress()) require.NoError(t, err) // spawn a reading routine to prevent connection from closing go func(c net.Conn) { @@ -517,7 +513,7 @@ func TestSwitchAcceptRoutine(t *testing.T) { // 2. check we close new connections if we already have MaxNumInboundPeers peers rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} rp.Start() - conn, err := rp.Dial(sw.NodeInfo().NetAddress()) + conn, err := rp.Dial(sw.NetAddress()) require.NoError(t, err) // check conn is closed one := make([]byte, 1) @@ -537,6 +533,10 @@ type errorTransport struct { acceptErr error } +func (et errorTransport) NetAddress() NetAddress { + panic("not implemented") +} + func (et errorTransport) Accept(c peerConfig) (Peer, error) { return nil, et.acceptErr } @@ -626,7 +626,7 @@ func (book *addrBookMock) OurAddress(addr *NetAddress) bool { _, ok := book.ourAddrs[addr.String()] return ok } -func (book *addrBookMock) MarkGood(*NetAddress) {} +func (book *addrBookMock) MarkGood(ID) {} func (book *addrBookMock) HasAddress(addr *NetAddress) bool { _, ok := book.addrs[addr.String()] return ok diff --git a/p2p/test_util.go b/p2p/test_util.go index 2d320df8594..f8020924cf1 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -23,7 +23,7 @@ type mockNodeInfo struct { } func (ni mockNodeInfo) ID() ID { return ni.addr.ID } -func (ni mockNodeInfo) NetAddress() *NetAddress { return ni.addr } +func (ni mockNodeInfo) NetAddress() (*NetAddress, error) { return ni.addr, nil } func (ni mockNodeInfo) Validate() error { return nil } func (ni mockNodeInfo) CompatibleWith(other NodeInfo) error { return nil } @@ -35,7 +35,8 @@ func CreateRandomPeer(outbound bool) *peer { addr, netAddr := CreateRoutableAddr() p := &peer{ peerConn: peerConn{ - outbound: outbound, + outbound: outbound, + socketAddr: netAddr, }, nodeInfo: mockNodeInfo{netAddr}, mconn: &conn.MConnection{}, @@ -174,10 +175,15 @@ func MakeSwitch( PrivKey: ed25519.GenPrivKey(), } nodeInfo := testNodeInfo(nodeKey.ID(), fmt.Sprintf("node%d", i)) + addr, err := NewNetAddressString( + IDAddressString(nodeKey.ID(), nodeInfo.(DefaultNodeInfo).ListenAddr), + ) + if err != nil { + panic(err) + } t := NewMultiplexTransport(nodeInfo, nodeKey, MConnConfig(cfg)) - addr := nodeInfo.NetAddress() if err := t.Listen(*addr); err != nil { panic(err) } @@ -214,7 +220,7 @@ func testPeerConn( cfg *config.P2PConfig, outbound, persistent bool, ourNodePrivKey crypto.PrivKey, - originalAddr *NetAddress, + socketAddr *NetAddress, ) (pc peerConn, err error) { conn := rawConn @@ -231,12 +237,7 @@ func testPeerConn( } // Only the information we already have - return peerConn{ - outbound: outbound, - persistent: persistent, - conn: conn, - originalAddr: originalAddr, - }, nil + return newPeerConn(outbound, persistent, conn, socketAddr), nil } //---------------------------------------------------------------- diff --git a/p2p/transport.go b/p2p/transport.go index d36065ab1c5..ebf77c9f4b8 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -24,6 +24,7 @@ type IPResolver interface { // accept is the container to carry the upgraded connection and NodeInfo from an // asynchronously running routine to the Accept method. type accept struct { + netAddr *NetAddress conn net.Conn nodeInfo NodeInfo err error @@ -47,6 +48,9 @@ type peerConfig struct { // the transport. Each transport is also responsible to filter establishing // peers specific to its domain. type Transport interface { + // Listening address. + NetAddress() NetAddress + // Accept returns a newly connected Peer. Accept(peerConfig) (Peer, error) @@ -115,6 +119,7 @@ func MultiplexTransportResolver(resolver IPResolver) MultiplexTransportOption { // MultiplexTransport accepts and dials tcp connections and upgrades them to // multiplexed peers. type MultiplexTransport struct { + netAddr NetAddress listener net.Listener acceptc chan accept @@ -161,6 +166,11 @@ func NewMultiplexTransport( } } +// NetAddress implements Transport. +func (mt *MultiplexTransport) NetAddress() NetAddress { + return mt.netAddr +} + // Accept implements Transport. func (mt *MultiplexTransport) Accept(cfg peerConfig) (Peer, error) { select { @@ -173,7 +183,7 @@ func (mt *MultiplexTransport) Accept(cfg peerConfig) (Peer, error) { cfg.outbound = false - return mt.wrapPeer(a.conn, a.nodeInfo, cfg, nil), nil + return mt.wrapPeer(a.conn, a.nodeInfo, cfg, a.netAddr), nil case <-mt.closec: return nil, ErrTransportClosed{} } @@ -224,6 +234,7 @@ func (mt *MultiplexTransport) Listen(addr NetAddress) error { return err } + mt.netAddr = addr mt.listener = ln go mt.acceptPeers() @@ -258,15 +269,21 @@ func (mt *MultiplexTransport) acceptPeers() { var ( nodeInfo NodeInfo secretConn *conn.SecretConnection + netAddr *NetAddress ) err := mt.filterConn(c) if err == nil { secretConn, nodeInfo, err = mt.upgrade(c, nil) + if err == nil { + addr := c.RemoteAddr() + id := PubKeyToID(secretConn.RemotePubKey()) + netAddr = NewNetAddress(id, addr) + } } select { - case mt.acceptc <- accept{secretConn, nodeInfo, err}: + case mt.acceptc <- accept{netAddr, secretConn, nodeInfo, err}: // Make the upgraded peer available. case <-mt.closec: // Give up if the transport was closed. @@ -347,7 +364,7 @@ func (mt *MultiplexTransport) upgrade( if err != nil { return nil, nil, ErrRejected{ conn: c, - err: fmt.Errorf("secrect conn failed: %v", err), + err: fmt.Errorf("secret conn failed: %v", err), isAuthFailure: true, } } @@ -360,7 +377,7 @@ func (mt *MultiplexTransport) upgrade( conn: c, id: connID, err: fmt.Errorf( - "conn.ID (%v) dialed ID (%v) missmatch", + "conn.ID (%v) dialed ID (%v) mismatch", connID, dialedID, ), @@ -392,7 +409,7 @@ func (mt *MultiplexTransport) upgrade( conn: c, id: connID, err: fmt.Errorf( - "conn.ID (%v) NodeInfo.ID (%v) missmatch", + "conn.ID (%v) NodeInfo.ID (%v) mismatch", connID, nodeInfo.ID(), ), @@ -426,14 +443,14 @@ func (mt *MultiplexTransport) wrapPeer( c net.Conn, ni NodeInfo, cfg peerConfig, - dialedAddr *NetAddress, + socketAddr *NetAddress, ) Peer { peerConn := newPeerConn( cfg.outbound, cfg.persistent, c, - dialedAddr, + socketAddr, ) p := newPeer( diff --git a/p2p/transport_test.go b/p2p/transport_test.go index 81f9d1b8e9a..35fd9c66b97 100644 --- a/p2p/transport_test.go +++ b/p2p/transport_test.go @@ -8,6 +8,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/p2p/conn" ) @@ -142,43 +144,23 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) { func TestTransportMultiplexAcceptMultiple(t *testing.T) { mt := testSetupMultiplexTransport(t) + id, addr := mt.nodeKey.ID(), mt.listener.Addr().String() + laddr, err := NewNetAddressStringWithOptionalID(IDAddressString(id, addr)) + require.NoError(t, err) var ( - seed = rand.New(rand.NewSource(time.Now().UnixNano())) - errc = make(chan error, seed.Intn(64)+64) + seed = rand.New(rand.NewSource(time.Now().UnixNano())) + nDialers = seed.Intn(64) + 64 + errc = make(chan error, nDialers) ) // Setup dialers. - for i := 0; i < cap(errc); i++ { - go func() { - var ( - pv = ed25519.GenPrivKey() - dialer = newMultiplexTransport( - testNodeInfo(PubKeyToID(pv.PubKey()), defaultNodeName), - NodeKey{ - PrivKey: pv, - }, - ) - ) - addr, err := NewNetAddressStringWithOptionalID(IDAddressString(mt.nodeKey.ID(), mt.listener.Addr().String())) - if err != nil { - errc <- err - return - } - - _, err = dialer.Dial(*addr, peerConfig{}) - if err != nil { - errc <- err - return - } - - // Signal that the connection was established. - errc <- nil - }() + for i := 0; i < nDialers; i++ { + go testDialer(*laddr, errc) } // Catch connection errors. - for i := 0; i < cap(errc); i++ { + for i := 0; i < nDialers; i++ { if err := <-errc; err != nil { t.Fatal(err) } @@ -216,6 +198,27 @@ func TestTransportMultiplexAcceptMultiple(t *testing.T) { } } +func testDialer(dialAddr NetAddress, errc chan error) { + var ( + pv = ed25519.GenPrivKey() + dialer = newMultiplexTransport( + testNodeInfo(PubKeyToID(pv.PubKey()), defaultNodeName), + NodeKey{ + PrivKey: pv, + }, + ) + ) + + _, err := dialer.Dial(dialAddr, peerConfig{}) + if err != nil { + errc <- err + return + } + + // Signal that the connection was established. + errc <- nil +} + func TestTransportMultiplexAcceptNonBlocking(t *testing.T) { mt := testSetupMultiplexTransport(t) @@ -591,6 +594,7 @@ func TestTransportHandshake(t *testing.T) { } } +// create listener func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport { var ( pv = ed25519.GenPrivKey() diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index e982292e7d4..55c7b4f17a2 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -52,11 +52,7 @@ func NewHTTP(remote, wsEndpoint string) *HTTP { } } -var ( - _ Client = (*HTTP)(nil) - _ NetworkClient = (*HTTP)(nil) - _ EventsClient = (*HTTP)(nil) -) +var _ Client = (*HTTP)(nil) func (c *HTTP) Status() (*ctypes.ResultStatus, error) { result := new(ctypes.ResultStatus) diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 605d84ba25c..8f9ed937224 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -72,17 +72,15 @@ type StatusClient interface { type Client interface { cmn.Service ABCIClient - SignClient + EventsClient HistoryClient + NetworkClient + SignClient StatusClient - EventsClient } // NetworkClient is general info about the network state. May not // be needed usually. -// -// Not included in the Client interface, but generally implemented -// by concrete implementations. type NetworkClient interface { NetInfo() (*ctypes.ResultNetInfo, error) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) diff --git a/rpc/client/localclient.go b/rpc/client/localclient.go index 976c9892a8a..d57ced311cf 100644 --- a/rpc/client/localclient.go +++ b/rpc/client/localclient.go @@ -58,11 +58,7 @@ func NewLocal(node *nm.Node) *Local { } } -var ( - _ Client = (*Local)(nil) - _ NetworkClient = (*Local)(nil) - _ EventsClient = (*Local)(nil) -) +var _ Client = (*Local)(nil) // SetLogger allows to set a logger on the client. func (c *Local) SetLogger(l log.Logger) { diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 9c0eb75b828..c2e19b6d4fa 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -108,6 +108,18 @@ func (c Client) NetInfo() (*ctypes.ResultNetInfo, error) { return core.NetInfo(&rpctypes.Context{}) } +func (c Client) ConsensusState() (*ctypes.ResultConsensusState, error) { + return core.ConsensusState(&rpctypes.Context{}) +} + +func (c Client) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { + return core.DumpConsensusState(&rpctypes.Context{}) +} + +func (c Client) Health() (*ctypes.ResultHealth, error) { + return core.Health(&rpctypes.Context{}) +} + func (c Client) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { return core.UnsafeDialSeeds(&rpctypes.Context{}, seeds) } diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index b8a91f107b5..ad23a461c24 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -10,6 +10,8 @@ import ( // Get the validator set at the given block height. // If no height is provided, it will fetch the current validator set. +// Note the validators are sorted by their address - this is the canonical +// order for the validators in the set as used in computing their Merkle root. // // ```shell // curl 'localhost:26657/validators' @@ -216,7 +218,7 @@ func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState } peerStates[i] = ctypes.PeerStateInfo{ // Peer basic info. - NodeAddress: peer.NodeInfo().NetAddress().String(), + NodeAddress: peer.SocketAddr().String(), // Peer consensus state. PeerState: peerStateJSON, } diff --git a/rpc/lib/client/http_client.go b/rpc/lib/client/http_client.go index 97b8dfe7b7e..cfa26e89c67 100644 --- a/rpc/lib/client/http_client.go +++ b/rpc/lib/client/http_client.go @@ -74,7 +74,9 @@ func makeHTTPClient(remoteAddr string) (string, *http.Client) { protocol, address, dialer := makeHTTPDialer(remoteAddr) return protocol + "://" + address, &http.Client{ Transport: &http.Transport{ - Dial: dialer, + // Set to true to prevent GZIP-bomb DoS attacks + DisableCompression: true, + Dial: dialer, }, } } diff --git a/scripts/release_management/README.md b/scripts/release_management/README.md new file mode 100644 index 00000000000..e92f1ccf657 --- /dev/null +++ b/scripts/release_management/README.md @@ -0,0 +1,65 @@ +# Release management scripts + +## Overview +The scripts in this folder are used for release management in CircleCI. Although the scripts are fully configurable using input parameters, +the default settings were modified to accommodate CircleCI execution. + +# Build scripts +These scripts help during the build process. They prepare the release files. + +## bump-semver.py +Bumps the semantic version of the input `--version`. Versions are expected in vMAJOR.MINOR.PATCH format or vMAJOR.MINOR format. + +In vMAJOR.MINOR format, the result will be patch version 0 of that version, for example `v1.2 -> v1.2.0`. + +In vMAJOR.MINOR.PATCH format, the result will be a bumped PATCH version, for example `v1.2.3 -> v1.2.4`. + +If the PATCH number contains letters, it is considered a development version, in which case, the result is the non-development version of that number. +The patch number will not be bumped, only the "-dev" or similar additional text will be removed. For example: `v1.2.6-rc1 -> v1.2.6`. + +## zip-file.py +Specialized ZIP command for release management. Special features: +1. Uses Python ZIP libaries, so the `zip` command does not need to be installed. +1. Can only zip one file. +1. Optionally gets file version, Go OS and architecture. +1. By default all inputs and output is formatted exactly how CircleCI needs it. + +By default, the command will try to ZIP the file at `build/tendermint_${GOOS}_${GOARCH}`. +This can be changed with the `--file` input parameter. + +By default, the command will output the ZIP file to `build/tendermint_${CIRCLE_TAG}_${GOOS}_${GOARCH}.zip`. +This can be changed with the `--destination` (folder), `--version`, `--goos` and `--goarch` input parameters respectively. + +## sha-files.py +Specialized `shasum` command for release management. Special features: +1. Reads all ZIP files in the given folder. +1. By default all inputs and output is formatted exactly how CircleCI needs it. + +By default, the command will look up all ZIP files in the `build/` folder. + +By default, the command will output results into the `build/SHA256SUMS` file. + +# GitHub management +Uploading build results to GitHub requires at least these steps: +1. Create a new release on GitHub with content +2. Upload all binaries to the release +3. Publish the release +The below scripts help with these steps. + +## github-draft.py +Creates a GitHub release and fills the content with the CHANGELOG.md link. The version number can be changed by the `--version` parameter. + +By default, the command will use the tendermint/tendermint organization/repo, which can be changed using the `--org` and `--repo` parameters. + +By default, the command will get the version number from the `${CIRCLE_TAG}` variable. + +Returns the GitHub release ID. + +## github-upload.py +Upload a file to a GitHub release. The release is defined by the mandatory `--id` (release ID) input parameter. + +By default, the command will upload the file `/tmp/workspace/tendermint_${CIRCLE_TAG}_${GOOS}_${GOARCH}.zip`. This can be changed by the `--file` input parameter. + +## github-publish.py +Publish a GitHub release. The release is defined by the mandatory `--id` (release ID) input parameter. + diff --git a/scripts/release_management/bump-semver.py b/scripts/release_management/bump-semver.py new file mode 100755 index 00000000000..b13a1034291 --- /dev/null +++ b/scripts/release_management/bump-semver.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +# Bump the release number of a semantic version number and print it. --version is required. +# Version is +# - vA.B.C, in which case vA.B.C+1 will be returned +# - vA.B.C-devorwhatnot in which case vA.B.C will be returned +# - vA.B in which case vA.B.0 will be returned + +import re +import argparse + + +def semver(ver): + if re.match('v[0-9]+\.[0-9]+',ver) is None: + ver="v0.0" + #raise argparse.ArgumentTypeError('--version must be a semantic version number with major, minor and patch numbers') + return ver + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--version", help="Version number to bump, e.g.: v1.0.0", required=True, type=semver) + args = parser.parse_args() + + found = re.match('(v[0-9]+\.[0-9]+)(\.(.+))?', args.version) + majorminorprefix = found.group(1) + patch = found.group(3) + if patch is None: + patch = "0-new" + + if re.match('[0-9]+$',patch) is None: + patchfound = re.match('([0-9]+)',patch) + patch = int(patchfound.group(1)) + else: + patch = int(patch) + 1 + + print("{0}.{1}".format(majorminorprefix, patch)) diff --git a/scripts/release_management/github-draft.py b/scripts/release_management/github-draft.py new file mode 100755 index 00000000000..8a189d53e30 --- /dev/null +++ b/scripts/release_management/github-draft.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python + +# Create a draft release on GitHub. By default in the tendermint/tendermint repo. +# Optimized for CircleCI + +import argparse +import httplib +import json +import os +from base64 import b64encode + +def request(org, repo, data): + user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") + headers = { + 'User-Agent': 'tenderbot', + 'Accept': 'application/vnd.github.v3+json', + 'Authorization': 'Basic %s' % user_and_pass + } + + conn = httplib.HTTPSConnection('api.github.com', timeout=5) + conn.request('POST', '/repos/{0}/{1}/releases'.format(org,repo), data, headers) + response = conn.getresponse() + if response.status < 200 or response.status > 299: + print("{0}: {1}".format(response.status, response.reason)) + conn.close() + raise IOError(response.reason) + responsedata = response.read() + conn.close() + return json.loads(responsedata) + + +def create_draft(org,repo,branch,version): + draft = { + 'tag_name': version, + 'target_commitish': '{0}'.format(branch), + 'name': '{0} (WARNING: ALPHA SOFTWARE)'.format(version), + 'body': 'https://github.com/{0}/{1}/blob/{2}/CHANGELOG.md#{3}'.format(org,repo,branch,version.replace('.','')), + 'draft': True, + 'prerelease': False + } + data=json.dumps(draft) + return request(org, repo, data) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--org", default="tendermint", help="GitHub organization") + parser.add_argument("--repo", default="tendermint", help="GitHub repository") + parser.add_argument("--branch", default=os.environ.get('CIRCLE_BRANCH'), help="Branch to build from, e.g.: v1.0") + parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for binary, e.g.: v1.0.0") + args = parser.parse_args() + + if not os.environ.has_key('GITHUB_USERNAME'): + raise parser.error('environment variable GITHUB_USERNAME is required') + + if not os.environ.has_key('GITHUB_TOKEN'): + raise parser.error('environment variable GITHUB_TOKEN is required') + + release = create_draft(args.org,args.repo,args.branch,args.version) + + print(release["id"]) + diff --git a/scripts/release_management/github-openpr.py b/scripts/release_management/github-openpr.py new file mode 100755 index 00000000000..af0434f02dd --- /dev/null +++ b/scripts/release_management/github-openpr.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python + +# Open a PR against the develop branch. --branch required. +# Optimized for CircleCI + +import json +import os +import argparse +import httplib +from base64 import b64encode + + +def request(org, repo, data): + user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") + headers = { + 'User-Agent': 'tenderbot', + 'Accept': 'application/vnd.github.v3+json', + 'Authorization': 'Basic %s' % user_and_pass + } + + conn = httplib.HTTPSConnection('api.github.com', timeout=5) + conn.request('POST', '/repos/{0}/{1}/pulls'.format(org,repo), data, headers) + response = conn.getresponse() + if response.status < 200 or response.status > 299: + print(response) + conn.close() + raise IOError(response.reason) + responsedata = response.read() + conn.close() + return json.loads(responsedata) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--org", default="tendermint", help="GitHub organization. Defaults to tendermint.") + parser.add_argument("--repo", default="tendermint", help="GitHub repository. Defaults to tendermint.") + parser.add_argument("--head", help="The name of the branch where your changes are implemented.", required=True) + parser.add_argument("--base", help="The name of the branch you want the changes pulled into.", required=True) + parser.add_argument("--title", default="Security release {0}".format(os.environ.get('CIRCLE_TAG')), help="The title of the pull request.") + args = parser.parse_args() + + if not os.environ.has_key('GITHUB_USERNAME'): + raise parser.error('GITHUB_USERNAME not set.') + + if not os.environ.has_key('GITHUB_TOKEN'): + raise parser.error('GITHUB_TOKEN not set.') + + if os.environ.get('CIRCLE_TAG') is None: + raise parser.error('CIRCLE_TAG not set.') + + result = request(args.org, args.repo, data=json.dumps({'title':"{0}".format(args.title),'head':"{0}".format(args.head),'base':"{0}".format(args.base),'body':""})) + print(result['html_url']) diff --git a/scripts/release_management/github-public-newbranch.bash b/scripts/release_management/github-public-newbranch.bash new file mode 100644 index 00000000000..ca2fa131412 --- /dev/null +++ b/scripts/release_management/github-public-newbranch.bash @@ -0,0 +1,28 @@ +#!/bin/sh + +# github-public-newbranch.bash - create public branch from the security repository + +set -euo pipefail + +# Create new branch +BRANCH="${CIRCLE_TAG:-v0.0.0}-security-`date -u +%Y%m%d%H%M%S`" +# Check if the patch release exist already as a branch +if [ -n "`git branch | grep '${BRANCH}'`" ]; then + echo "WARNING: Branch ${BRANCH} already exists." +else + echo "Creating branch ${BRANCH}." + git branch "${BRANCH}" +fi + +# ... and check it out +git checkout "${BRANCH}" + +# Add entry to public repository +git remote add tendermint-origin git@github.com:tendermint/tendermint.git + +# Push branch and tag to public repository +git push tendermint-origin +git push tendermint-origin --tags + +# Create a PR from the public branch to the assumed release branch in public (release branch has to exist) +python -u scripts/release_management/github-openpr.py --head "${BRANCH}" --base "${BRANCH:%.*}" diff --git a/scripts/release_management/github-publish.py b/scripts/release_management/github-publish.py new file mode 100755 index 00000000000..31071aecdb8 --- /dev/null +++ b/scripts/release_management/github-publish.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python + +# Publish an existing GitHub draft release. --id required. +# Optimized for CircleCI + +import json +import os +import argparse +import httplib +from base64 import b64encode + + +def request(org, repo, id, data): + user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") + headers = { + 'User-Agent': 'tenderbot', + 'Accept': 'application/vnd.github.v3+json', + 'Authorization': 'Basic %s' % user_and_pass + } + + conn = httplib.HTTPSConnection('api.github.com', timeout=5) + conn.request('POST', '/repos/{0}/{1}/releases/{2}'.format(org,repo,id), data, headers) + response = conn.getresponse() + if response.status < 200 or response.status > 299: + print(response) + conn.close() + raise IOError(response.reason) + responsedata = response.read() + conn.close() + return json.loads(responsedata) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--org", default="tendermint", help="GitHub organization") + parser.add_argument("--repo", default="tendermint", help="GitHub repository") + parser.add_argument("--id", help="GitHub release ID", required=True, type=int) + parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for the release, e.g.: v1.0.0") + args = parser.parse_args() + + if not os.environ.has_key('GITHUB_USERNAME'): + raise parser.error('GITHUB_USERNAME not set.') + + if not os.environ.has_key('GITHUB_TOKEN'): + raise parser.error('GITHUB_TOKEN not set.') + + try: + result = request(args.org, args.repo, args.id, data=json.dumps({'draft':False,'tag_name':"{0}".format(args.version)})) + except IOError as e: + print(e) + result = request(args.org, args.repo, args.id, data=json.dumps({'draft':False,'tag_name':"{0}-autorelease".format(args.version)})) + + print(result['name']) diff --git a/scripts/release_management/github-upload.py b/scripts/release_management/github-upload.py new file mode 100755 index 00000000000..77c76a75544 --- /dev/null +++ b/scripts/release_management/github-upload.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python + +# Upload a file to a GitHub draft release. --id and --file are required. +# Optimized for CircleCI + +import json +import os +import re +import argparse +import mimetypes +import httplib +from base64 import b64encode + + +def request(baseurl, path, mimetype, mimeencoding, data): + user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") + + headers = { + 'User-Agent': 'tenderbot', + 'Accept': 'application/vnd.github.v3.raw+json', + 'Authorization': 'Basic %s' % user_and_pass, + 'Content-Type': mimetype, + 'Content-Encoding': mimeencoding + } + + conn = httplib.HTTPSConnection(baseurl, timeout=5) + conn.request('POST', path, data, headers) + response = conn.getresponse() + if response.status < 200 or response.status > 299: + print(response) + conn.close() + raise IOError(response.reason) + responsedata = response.read() + conn.close() + return json.loads(responsedata) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--id", help="GitHub release ID", required=True, type=int) + parser.add_argument("--file", default="/tmp/workspace/tendermint_{0}_{1}_{2}.zip".format(os.environ.get('CIRCLE_TAG'),os.environ.get('GOOS'),os.environ.get('GOARCH')), help="File to upload") + parser.add_argument("--return-id-only", help="Return only the release ID after upload to GitHub.", action='store_true') + args = parser.parse_args() + + if not os.environ.has_key('GITHUB_USERNAME'): + raise parser.error('GITHUB_USERNAME not set.') + + if not os.environ.has_key('GITHUB_TOKEN'): + raise parser.error('GITHUB_TOKEN not set.') + + mimetypes.init() + filename = os.path.basename(args.file) + mimetype,mimeencoding = mimetypes.guess_type(filename, strict=False) + if mimetype is None: + mimetype = 'application/zip' + if mimeencoding is None: + mimeencoding = 'utf8' + + with open(args.file,'rb') as f: + asset = f.read() + + result = request('uploads.github.com', '/repos/tendermint/tendermint/releases/{0}/assets?name={1}'.format(args.id, filename), mimetype, mimeencoding, asset) + + if args.return_id_only: + print(result['id']) + else: + print(result['browser_download_url']) + diff --git a/scripts/release_management/sha-files.py b/scripts/release_management/sha-files.py new file mode 100755 index 00000000000..2a9ee0d594d --- /dev/null +++ b/scripts/release_management/sha-files.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python + +# Create SHA256 summaries from all ZIP files in a folder +# Optimized for CircleCI + +import re +import os +import argparse +import zipfile +import hashlib + + +BLOCKSIZE = 65536 + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--folder", default="/tmp/workspace", help="Folder to look for, for ZIP files") + parser.add_argument("--shafile", default="/tmp/workspace/SHA256SUMS", help="SHA256 summaries File") + args = parser.parse_args() + + for filename in os.listdir(args.folder): + if re.search('\.zip$',filename) is None: + continue + if not os.path.isfile(os.path.join(args.folder, filename)): + continue + with open(args.shafile,'a+') as shafile: + hasher = hashlib.sha256() + with open(os.path.join(args.folder, filename),'r') as f: + buf = f.read(BLOCKSIZE) + while len(buf) > 0: + hasher.update(buf) + buf = f.read(BLOCKSIZE) + shafile.write("{0} {1}\n".format(hasher.hexdigest(),filename)) + diff --git a/scripts/release_management/zip-file.py b/scripts/release_management/zip-file.py new file mode 100755 index 00000000000..5d2f5b2c816 --- /dev/null +++ b/scripts/release_management/zip-file.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python + +# ZIP one file as "tendermint" into a ZIP like tendermint_VERSION_OS_ARCH.zip +# Use environment variables CIRCLE_TAG, GOOS and GOARCH for easy input parameters. +# Optimized for CircleCI + +import os +import argparse +import zipfile +import hashlib + + +BLOCKSIZE = 65536 + + +def zip_asset(file,destination,arcname,version,goos,goarch): + filename = os.path.basename(file) + output = "{0}/{1}_{2}_{3}_{4}.zip".format(destination,arcname,version,goos,goarch) + + with zipfile.ZipFile(output,'w') as f: + f.write(filename=file,arcname=arcname) + f.comment=filename + return output + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--file", default="build/tendermint_{0}_{1}".format(os.environ.get('GOOS'),os.environ.get('GOARCH')), help="File to zip") + parser.add_argument("--destination", default="build", help="Destination folder for files") + parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for binary, e.g.: v1.0.0") + parser.add_argument("--goos", default=os.environ.get('GOOS'), help="GOOS parameter") + parser.add_argument("--goarch", default=os.environ.get('GOARCH'), help="GOARCH parameter") + args = parser.parse_args() + + if args.version is None: + raise parser.error("argument --version is required") + if args.goos is None: + raise parser.error("argument --goos is required") + if args.goarch is None: + raise parser.error("argument --goarch is required") + + file = zip_asset(args.file,args.destination,"tendermint",args.version,args.goos,args.goarch) + print(file) + diff --git a/state/services.go b/state/services.go index 02c3aa7d179..07d12c5a10c 100644 --- a/state/services.go +++ b/state/services.go @@ -23,6 +23,7 @@ type Mempool interface { Size() int CheckTx(types.Tx, func(*abci.Response)) error + CheckTxWithInfo(types.Tx, func(*abci.Response), mempool.TxInfo) error ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs Update(int64, types.Txs, mempool.PreCheckFunc, mempool.PostCheckFunc) error Flush() @@ -37,11 +38,17 @@ type MockMempool struct{} var _ Mempool = MockMempool{} -func (MockMempool) Lock() {} -func (MockMempool) Unlock() {} -func (MockMempool) Size() int { return 0 } -func (MockMempool) CheckTx(_ types.Tx, _ func(*abci.Response)) error { return nil } -func (MockMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } +func (MockMempool) Lock() {} +func (MockMempool) Unlock() {} +func (MockMempool) Size() int { return 0 } +func (MockMempool) CheckTx(_ types.Tx, _ func(*abci.Response)) error { + return nil +} +func (MockMempool) CheckTxWithInfo(_ types.Tx, _ func(*abci.Response), + _ mempool.TxInfo) error { + return nil +} +func (MockMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } func (MockMempool) Update( _ int64, _ types.Txs, diff --git a/state/store.go b/state/store.go index 6b01a829575..73116b43fc6 100644 --- a/state/store.go +++ b/state/store.go @@ -9,6 +9,14 @@ import ( "github.com/tendermint/tendermint/types" ) +const ( + // persist validators every valSetCheckpointInterval blocks to avoid + // LoadValidators taking too much time. + // https://github.com/tendermint/tendermint/pull/3438 + // 100000 results in ~ 100ms to get 100 validators (see BenchmarkLoadValidators) + valSetCheckpointInterval = 100000 +) + //------------------------------------------------------------------------ func calcValidatorsKey(height int64) []byte { @@ -182,25 +190,38 @@ func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) { if valInfo == nil { return nil, ErrNoValSetForHeight{height} } - if valInfo.ValidatorSet == nil { - valInfo2 := loadValidatorsInfo(db, valInfo.LastHeightChanged) + lastStoredHeight := lastStoredHeightFor(height, valInfo.LastHeightChanged) + valInfo2 := loadValidatorsInfo(db, lastStoredHeight) if valInfo2 == nil { - panic( - fmt.Sprintf( - "Couldn't find validators at height %d as last changed from height %d", - valInfo.LastHeightChanged, - height, - ), - ) + // TODO (melekes): remove the below if condition in the 0.33 major + // release and just panic. Old chains might panic otherwise if they + // haven't saved validators at intermediate (%valSetCheckpointInterval) + // height yet. + // https://github.com/tendermint/tendermint/issues/3543 + valInfo2 = loadValidatorsInfo(db, valInfo.LastHeightChanged) + lastStoredHeight = valInfo.LastHeightChanged + if valInfo2 == nil { + panic( + fmt.Sprintf("Couldn't find validators at height %d (height %d was originally requested)", + lastStoredHeight, + height, + ), + ) + } } - valInfo2.ValidatorSet.IncrementProposerPriority(int(height - valInfo.LastHeightChanged)) // mutate + valInfo2.ValidatorSet.IncrementProposerPriority(int(height - lastStoredHeight)) // mutate valInfo = valInfo2 } return valInfo.ValidatorSet, nil } +func lastStoredHeightFor(height, lastHeightChanged int64) int64 { + checkpointHeight := height - height%valSetCheckpointInterval + return cmn.MaxInt64(checkpointHeight, lastHeightChanged) +} + // CONTRACT: Returned ValidatorsInfo can be mutated. func loadValidatorsInfo(db dbm.DB, height int64) *ValidatorsInfo { buf := db.Get(calcValidatorsKey(height)) @@ -221,10 +242,10 @@ func loadValidatorsInfo(db dbm.DB, height int64) *ValidatorsInfo { } // saveValidatorsInfo persists the validator set. -// `height` is the effective height for which the validator is responsible for signing. -// It should be called from s.Save(), right before the state itself is persisted. -// If the validator set did not change after processing the latest block, -// only the last height for which the validators changed is persisted. +// +// `height` is the effective height for which the validator is responsible for +// signing. It should be called from s.Save(), right before the state itself is +// persisted. func saveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) { if lastHeightChanged > height { panic("LastHeightChanged cannot be greater than ValidatorsInfo height") @@ -232,7 +253,9 @@ func saveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *type valInfo := &ValidatorsInfo{ LastHeightChanged: lastHeightChanged, } - if lastHeightChanged == height { + // Only persist validator set if it was updated or checkpoint height (see + // valSetCheckpointInterval) is reached. + if height == lastHeightChanged || height%valSetCheckpointInterval == 0 { valInfo.ValidatorSet = valSet } db.Set(calcValidatorsKey(height), valInfo.Bytes()) diff --git a/state/store_test.go b/state/store_test.go new file mode 100644 index 00000000000..dd48cae718b --- /dev/null +++ b/state/store_test.go @@ -0,0 +1,67 @@ +package state + +import ( + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + cfg "github.com/tendermint/tendermint/config" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/types" +) + +func TestSaveValidatorsInfo(t *testing.T) { + // test we persist validators every valSetCheckpointInterval blocks + stateDB := dbm.NewMemDB() + val, _ := types.RandValidator(true, 10) + vals := types.NewValidatorSet([]*types.Validator{val}) + + // TODO(melekes): remove in 0.33 release + // https://github.com/tendermint/tendermint/issues/3543 + saveValidatorsInfo(stateDB, 1, 1, vals) + saveValidatorsInfo(stateDB, 2, 1, vals) + assert.NotPanics(t, func() { + _, err := LoadValidators(stateDB, 2) + if err != nil { + panic(err) + } + }) + //ENDREMOVE + + saveValidatorsInfo(stateDB, valSetCheckpointInterval, 1, vals) + + loadedVals, err := LoadValidators(stateDB, valSetCheckpointInterval) + assert.NoError(t, err) + assert.NotZero(t, loadedVals.Size()) +} + +func BenchmarkLoadValidators(b *testing.B) { + const valSetSize = 100 + + config := cfg.ResetTestRoot("state_") + defer os.RemoveAll(config.RootDir) + dbType := dbm.DBBackendType(config.DBBackend) + stateDB := dbm.NewDB("state", dbType, config.DBDir()) + state, err := LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) + if err != nil { + b.Fatal(err) + } + state.Validators = genValSet(valSetSize) + state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) + SaveState(stateDB, state) + + for i := 10; i < 10000000000; i *= 10 { // 10, 100, 1000, ... + saveValidatorsInfo(stateDB, int64(i), state.LastHeightValidatorsChanged, state.NextValidators) + + b.Run(fmt.Sprintf("height=%d", i), func(b *testing.B) { + for n := 0; n < b.N; n++ { + _, err := LoadValidators(stateDB, int64(i)) + if err != nil { + b.Fatal(err) + } + } + }) + } +} diff --git a/tools/tm-signer-harness/internal/test_harness.go b/tools/tm-signer-harness/internal/test_harness.go index 00548913378..7fefdfb4201 100644 --- a/tools/tm-signer-harness/internal/test_harness.go +++ b/tools/tm-signer-harness/internal/test_harness.go @@ -198,8 +198,8 @@ func (th *TestHarness) TestSignProposal() error { hash := tmhash.Sum([]byte("hash")) prop := &types.Proposal{ Type: types.ProposalType, - Height: 12345, - Round: 23456, + Height: 100, + Round: 0, POLRound: -1, BlockID: types.BlockID{ Hash: hash, @@ -240,8 +240,8 @@ func (th *TestHarness) TestSignVote() error { hash := tmhash.Sum([]byte("hash")) vote := &types.Vote{ Type: voteType, - Height: 12345, - Round: 23456, + Height: 101, + Round: 0, BlockID: types.BlockID{ Hash: hash, PartsHeader: types.PartSetHeader{ diff --git a/types/protobuf.go b/types/protobuf.go index e10b9186954..c87e82c0a81 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -220,36 +220,3 @@ func (pb2tm) ValidatorUpdates(vals []abci.ValidatorUpdate) ([]*Validator, error) } return tmVals, nil } - -// BlockParams.TimeIotaMs is not exposed to the application. Therefore a caller -// must provide it. -func (pb2tm) ConsensusParams(csp *abci.ConsensusParams, blockTimeIotaMs int64) ConsensusParams { - params := ConsensusParams{ - Block: BlockParams{}, - Evidence: EvidenceParams{}, - Validator: ValidatorParams{}, - } - - // we must defensively consider any structs may be nil - if csp.Block != nil { - params.Block = BlockParams{ - MaxBytes: csp.Block.MaxBytes, - MaxGas: csp.Block.MaxGas, - TimeIotaMs: blockTimeIotaMs, - } - } - - if csp.Evidence != nil { - params.Evidence = EvidenceParams{ - MaxAge: csp.Evidence.MaxAge, - } - } - - if csp.Validator != nil { - params.Validator = ValidatorParams{ - PubKeyTypes: csp.Validator.PubKeyTypes, - } - } - - return params -} diff --git a/types/protobuf_test.go b/types/protobuf_test.go index 152c92d12ff..64caa3f4c11 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -64,7 +64,7 @@ func TestABCIValidators(t *testing.T) { func TestABCIConsensusParams(t *testing.T) { cp := DefaultConsensusParams() abciCP := TM2PB.ConsensusParams(cp) - cp2 := PB2TM.ConsensusParams(abciCP, cp.Block.TimeIotaMs) + cp2 := cp.Update(abciCP) assert.Equal(t, *cp, cp2) } diff --git a/types/validator_set.go b/types/validator_set.go index 3d31cf7d076..36ce67f061e 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -31,7 +31,8 @@ const ( // ValidatorSet represent a set of *Validator at a given height. // The validators can be fetched by address or index. // The index is in order of .Address, so the indices are fixed -// for all rounds of a given blockchain height. +// for all rounds of a given blockchain height - ie. the validators +// are sorted by their address. // On the other hand, the .ProposerPriority of each validator and // the designated .GetProposer() of a set changes every round, // upon calling .IncrementProposerPriority(). diff --git a/version/version.go b/version/version.go index b2202206cf5..9ba38de6cfe 100644 --- a/version/version.go +++ b/version/version.go @@ -20,7 +20,7 @@ const ( // Must be a string because scripts like dist.sh read this file. // XXX: Don't change the name of this variable or you will break // automation :) - TMCoreSemVer = "0.31.0" + TMCoreSemVer = "0.31.4" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.16.0"