diff --git a/cmd/clef/main.go b/cmd/clef/main.go
index 802e118e70..088701eeee 100644
--- a/cmd/clef/main.go
+++ b/cmd/clef/main.go
@@ -533,7 +533,12 @@ func DefaultConfigDir() string {
if runtime.GOOS == "darwin" {
return filepath.Join(home, "Library", "Signer")
} else if runtime.GOOS == "windows" {
- return filepath.Join(home, "AppData", "Roaming", "Signer")
+ appdata := os.Getenv("APPDATA")
+ if appdata != "" {
+ return filepath.Join(appdata, "Signer")
+ } else {
+ return filepath.Join(home, "AppData", "Roaming", "Signer")
+ }
} else {
return filepath.Join(home, ".clef")
}
diff --git a/core/blockchain.go b/core/blockchain.go
index feb9fb9426..b6605e66c9 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -456,7 +456,7 @@ func (bc *BlockChain) repair(head **types.Block) error {
if block == nil {
return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
}
- (*head) = block
+ *head = block
}
}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 0372a9848d..a16b3ba8af 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -1412,7 +1412,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
}
b.StopTimer()
if got := chain.CurrentBlock().Transactions().Len(); got != numTxs*numBlocks {
- b.Fatalf("Transactions were not included, expected %d, got %d", (numTxs * numBlocks), got)
+ b.Fatalf("Transactions were not included, expected %d, got %d", numTxs*numBlocks, got)
}
}
diff --git a/core/vm/gas.go b/core/vm/gas.go
index bba7058c7e..dba1b3a02f 100644
--- a/core/vm/gas.go
+++ b/core/vm/gas.go
@@ -30,10 +30,6 @@ const (
GasMidStep uint64 = 8
GasSlowStep uint64 = 10
GasExtStep uint64 = 20
-
- GasReturn uint64 = 0
- GasStop uint64 = 0
- GasContractByte uint64 = 200
)
// calcGas returns the actual gas cost of the call.
diff --git a/crypto/signature_cgo.go b/crypto/signature_cgo.go
index 340bfc221e..aadf028d26 100644
--- a/crypto/signature_cgo.go
+++ b/crypto/signature_cgo.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-// +build !nacl,!js,!nocgo
+// +build !nacl,!js,cgo
package crypto
diff --git a/crypto/signature_nocgo.go b/crypto/signature_nocgo.go
index e8fa18ed47..90d072cda7 100644
--- a/crypto/signature_nocgo.go
+++ b/crypto/signature_nocgo.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-// +build nacl js nocgo
+// +build nacl js !cgo
package crypto
diff --git a/eth/api_tracer.go b/eth/api_tracer.go
index 7aa48b2563..a529ea118e 100644
--- a/eth/api_tracer.go
+++ b/eth/api_tracer.go
@@ -526,13 +526,7 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block,
func (api *PrivateDebugAPI) standardTraceBlockToFile(ctx context.Context, block *types.Block, config *StdTraceConfig) ([]string, error) {
// If we're tracing a single transaction, make sure it's present
if config != nil && config.TxHash != (common.Hash{}) {
- var exists bool
- for _, tx := range block.Transactions() {
- if exists = (tx.Hash() == config.TxHash); exists {
- break
- }
- }
- if !exists {
+ if !containsTx(block, config.TxHash) {
return nil, fmt.Errorf("transaction %#x not found in block", config.TxHash)
}
}
@@ -625,6 +619,17 @@ func (api *PrivateDebugAPI) standardTraceBlockToFile(ctx context.Context, block
return dumps, nil
}
+// containsTx reports whether the transaction with a certain hash
+// is contained within the specified block.
+func containsTx(block *types.Block, hash common.Hash) bool {
+ for _, tx := range block.Transactions() {
+ if tx.Hash() == hash {
+ return true
+ }
+ }
+ return false
+}
+
// computeStateDB retrieves the state database associated with a certain block.
// If no state is locally available for the given block, a number of blocks are
// attempted to be reexecuted to generate the desired state.
diff --git a/eth/config.go b/eth/config.go
index f71b8dfee4..aca9b5e68b 100644
--- a/eth/config.go
+++ b/eth/config.go
@@ -68,8 +68,15 @@ func init() {
home = user.HomeDir
}
}
- if runtime.GOOS == "windows" {
- DefaultConfig.Ethash.DatasetDir = filepath.Join(home, "AppData", "Ethash")
+ if runtime.GOOS == "darwin" {
+ DefaultConfig.Ethash.DatasetDir = filepath.Join(home, "Library", "Ethash")
+ } else if runtime.GOOS == "windows" {
+ localappdata := os.Getenv("LOCALAPPDATA")
+ if localappdata != "" {
+ DefaultConfig.Ethash.DatasetDir = filepath.Join(localappdata, "Ethash")
+ } else {
+ DefaultConfig.Ethash.DatasetDir = filepath.Join(home, "AppData", "Local", "Ethash")
+ }
} else {
DefaultConfig.Ethash.DatasetDir = filepath.Join(home, ".ethash")
}
diff --git a/node/defaults.go b/node/defaults.go
index cea4997cb4..73b262429d 100644
--- a/node/defaults.go
+++ b/node/defaults.go
@@ -58,11 +58,20 @@ func DefaultDataDir() string {
// Try to place the data folder in the user's home dir
home := homeDir()
if home != "" {
- if runtime.GOOS == "darwin" {
+ switch runtime.GOOS {
+ case "darwin":
return filepath.Join(home, "Library", "Ethereum")
- } else if runtime.GOOS == "windows" {
- return filepath.Join(home, "AppData", "Roaming", "Ethereum")
- } else {
+ case "windows":
+ // We used to put everything in %HOME%\AppData\Roaming, but this caused
+ // problems with non-typical setups. If this fallback location exists and
+ // is non-empty, use it, otherwise DTRT and check %LOCALAPPDATA%.
+ fallback := filepath.Join(home, "AppData", "Roaming", "Ethereum")
+ appdata := windowsAppData()
+ if appdata == "" || isNonEmptyDir(fallback) {
+ return fallback
+ }
+ return filepath.Join(appdata, "Ethereum")
+ default:
return filepath.Join(home, ".ethereum")
}
}
@@ -70,6 +79,26 @@ func DefaultDataDir() string {
return ""
}
+func windowsAppData() string {
+ if v := os.Getenv("LOCALAPPDATA"); v != "" {
+ return v // Vista+
+ }
+ if v := os.Getenv("APPDATA"); v != "" {
+ return filepath.Join(v, "Local")
+ }
+ return ""
+}
+
+func isNonEmptyDir(dir string) bool {
+ f, err := os.Open(dir)
+ if err != nil {
+ return false
+ }
+ names, _ := f.Readdir(1)
+ f.Close()
+ return len(names) > 0
+}
+
func homeDir() string {
if home := os.Getenv("HOME"); home != "" {
return home
diff --git a/p2p/discover/udp.go b/p2p/discover/udp.go
index df9a3065fa..e386af363d 100644
--- a/p2p/discover/udp.go
+++ b/p2p/discover/udp.go
@@ -54,6 +54,11 @@ const (
ntpFailureThreshold = 32 // Continuous timeouts after which to check NTP
ntpWarningCooldown = 10 * time.Minute // Minimum amount of time to pass before repeating NTP warning
driftThreshold = 10 * time.Second // Allowed clock drift before warning user
+
+ // Discovery packets are defined to be no larger than 1280 bytes.
+ // Packets larger than this size will be cut at the end and treated
+ // as invalid because their hash won't match.
+ maxPacketSize = 1280
)
// RPC packet types
@@ -496,7 +501,7 @@ var (
headSpace = make([]byte, headSize)
// Neighbors replies are sent across multiple packets to
- // stay below the 1280 byte limit. We compute the maximum number
+ // stay below the packet size limit. We compute the maximum number
// of entries by stuffing a packet until it grows too large.
maxNeighbors int
)
@@ -511,7 +516,7 @@ func init() {
// If this ever happens, it will be caught by the unit tests.
panic("cannot encode: " + err.Error())
}
- if headSize+size+1 >= 1280 {
+ if headSize+size+1 >= maxPacketSize {
maxNeighbors = n
break
}
@@ -562,10 +567,7 @@ func (t *udp) readLoop(unhandled chan<- ReadPacket) {
defer close(unhandled)
}
- // Discovery packets are defined to be no larger than 1280 bytes.
- // Packets larger than this size will be cut at the end and treated
- // as invalid because their hash won't match.
- buf := make([]byte, 1280)
+ buf := make([]byte, maxPacketSize)
for {
nbytes, from, err := t.conn.ReadFromUDP(buf)
if netutil.IsTemporaryError(err) {
@@ -715,7 +717,7 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID enode.ID, mac []by
t.tab.mutex.Unlock()
// Send neighbors in chunks with at most maxNeighbors per packet
- // to stay below the 1280 byte limit.
+ // to stay below the packet size limit.
p := neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())}
var sent bool
for _, n := range closest {
diff --git a/p2p/simulations/network_test.go b/p2p/simulations/network_test.go
index 8b644ffb0f..01cd1000de 100644
--- a/p2p/simulations/network_test.go
+++ b/p2p/simulations/network_test.go
@@ -193,7 +193,7 @@ OUTER:
connEventCount = nodeCount
-OUTER_TWO:
+OuterTwo:
for {
select {
case <-ctx.Done():
@@ -211,7 +211,7 @@ OUTER_TWO:
connEventCount--
log.Debug("ev", "count", connEventCount)
if connEventCount == 0 {
- break OUTER_TWO
+ break OuterTwo
}
}
}
diff --git a/rpc/websocket.go b/rpc/websocket.go
index b8e067a5f2..c5383667d9 100644
--- a/rpc/websocket.go
+++ b/rpc/websocket.go
@@ -124,6 +124,13 @@ func wsHandshakeValidator(allowedOrigins []string) func(*websocket.Config, *http
log.Debug(fmt.Sprintf("Allowed origin(s) for WS RPC interface %v", origins.ToSlice()))
f := func(cfg *websocket.Config, req *http.Request) error {
+ // Skip origin verification if no Origin header is present. The origin check
+ // is supposed to protect against browser based attacks. Browsers always set
+ // Origin. Non-browser software can put anything in origin and checking it doesn't
+ // provide additional security.
+ if _, ok := req.Header["Origin"]; !ok {
+ return nil
+ }
// Verify origin against whitelist.
origin := strings.ToLower(req.Header.Get("Origin"))
if allowAllOrigins || origins.Contains(origin) {
diff --git a/swarm/network/stream/delivery.go b/swarm/network/stream/delivery.go
index fae6994f0c..02c5f222c5 100644
--- a/swarm/network/stream/delivery.go
+++ b/swarm/network/stream/delivery.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/storage"
+ "github.com/ethereum/go-ethereum/swarm/tracing"
opentracing "github.com/opentracing/opentracing-go"
)
@@ -143,7 +144,7 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
var osp opentracing.Span
ctx, osp = spancontext.StartSpan(
ctx,
- "retrieve.request")
+ "stream.handle.retrieve")
s, err := sp.getServer(NewStream(swarmChunkServerStreamName, "", true))
if err != nil {
@@ -207,17 +208,19 @@ type ChunkDeliveryMsgRetrieval ChunkDeliveryMsg
//defines a chunk delivery for syncing (without accounting)
type ChunkDeliveryMsgSyncing ChunkDeliveryMsg
-// TODO: Fix context SNAFU
+// chunk delivery msg is response to retrieverequest msg
func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *ChunkDeliveryMsg) error {
- var osp opentracing.Span
- ctx, osp = spancontext.StartSpan(
- ctx,
- "chunk.delivery")
processReceivedChunksCount.Inc(1)
+ // retrieve the span for the originating retrieverequest
+ spanId := fmt.Sprintf("stream.send.request.%v.%v", sp.ID(), req.Addr)
+ span := tracing.ShiftSpanByKey(spanId)
+
go func() {
- defer osp.Finish()
+ if span != nil {
+ defer span.(opentracing.Span).Finish()
+ }
req.peer = sp
err := d.chunkStore.Put(ctx, storage.NewChunk(req.Addr, req.SData))
@@ -233,7 +236,9 @@ func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *Ch
return nil
}
-// RequestFromPeers sends a chunk retrieve request to
+// RequestFromPeers sends a chunk retrieve request to a peer
+// The most eligible peer that hasn't already been sent to is chosen
+// TODO: define "eligible"
func (d *Delivery) RequestFromPeers(ctx context.Context, req *network.Request) (*enode.ID, chan struct{}, error) {
requestFromPeersCount.Inc(1)
var sp *Peer
@@ -268,11 +273,15 @@ func (d *Delivery) RequestFromPeers(ctx context.Context, req *network.Request) (
}
}
+ // setting this value in the context creates a new span that can persist across the sendpriority queue and the network roundtrip
+ // this span will finish only when delivery is handled (or times out)
+ ctx = context.WithValue(ctx, tracing.StoreLabelId, "stream.send.request")
+ ctx = context.WithValue(ctx, tracing.StoreLabelMeta, fmt.Sprintf("%v.%v", sp.ID(), req.Addr))
err := sp.SendPriority(ctx, &RetrieveRequestMsg{
Addr: req.Addr,
SkipCheck: req.SkipCheck,
HopCount: req.HopCount,
- }, Top, "request.from.peers")
+ }, Top)
if err != nil {
return nil, nil, err
}
diff --git a/swarm/network/stream/messages.go b/swarm/network/stream/messages.go
index de4e8a3bb4..b293724cc7 100644
--- a/swarm/network/stream/messages.go
+++ b/swarm/network/stream/messages.go
@@ -300,7 +300,7 @@ func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg
return
}
log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
- err := p.SendPriority(ctx, msg, c.priority, "")
+ err := p.SendPriority(ctx, msg, c.priority)
if err != nil {
log.Warn("SendPriority error", "err", err)
}
diff --git a/swarm/network/stream/peer.go b/swarm/network/stream/peer.go
index 68da8f44ad..c59799e08a 100644
--- a/swarm/network/stream/peer.go
+++ b/swarm/network/stream/peer.go
@@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
+ "github.com/ethereum/go-ethereum/swarm/tracing"
opentracing "github.com/opentracing/opentracing-go"
)
@@ -65,7 +66,6 @@ type Peer struct {
// on creating a new client in offered hashes handler.
clientParams map[Stream]*clientParams
quit chan struct{}
- spans sync.Map
}
type WrappedPriorityMsg struct {
@@ -83,16 +83,10 @@ func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer {
clients: make(map[Stream]*client),
clientParams: make(map[Stream]*clientParams),
quit: make(chan struct{}),
- spans: sync.Map{},
}
ctx, cancel := context.WithCancel(context.Background())
go p.pq.Run(ctx, func(i interface{}) {
wmsg := i.(WrappedPriorityMsg)
- defer p.spans.Delete(wmsg.Context)
- sp, ok := p.spans.Load(wmsg.Context)
- if ok {
- defer sp.(opentracing.Span).Finish()
- }
err := p.Send(wmsg.Context, wmsg.Msg)
if err != nil {
log.Error("Message send error, dropping peer", "peer", p.ID(), "err", err)
@@ -129,6 +123,7 @@ func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer {
go func() {
<-p.quit
+
cancel()
}()
return p
@@ -158,21 +153,15 @@ func (p *Peer) Deliver(ctx context.Context, chunk storage.Chunk, priority uint8,
spanName += ".retrieval"
}
- return p.SendPriority(ctx, msg, priority, spanName)
+ ctx = context.WithValue(ctx, "stream_send_tag", nil)
+ return p.SendPriority(ctx, msg, priority)
}
// SendPriority sends message to the peer using the outgoing priority queue
-func (p *Peer) SendPriority(ctx context.Context, msg interface{}, priority uint8, traceId string) error {
+func (p *Peer) SendPriority(ctx context.Context, msg interface{}, priority uint8) error {
defer metrics.GetOrRegisterResettingTimer(fmt.Sprintf("peer.sendpriority_t.%d", priority), nil).UpdateSince(time.Now())
+ tracing.StartSaveSpan(ctx)
metrics.GetOrRegisterCounter(fmt.Sprintf("peer.sendpriority.%d", priority), nil).Inc(1)
- if traceId != "" {
- var sp opentracing.Span
- ctx, sp = spancontext.StartSpan(
- ctx,
- traceId,
- )
- p.spans.Store(ctx, sp)
- }
wmsg := WrappedPriorityMsg{
Context: ctx,
Msg: msg,
@@ -190,7 +179,8 @@ func (p *Peer) SendOfferedHashes(s *server, f, t uint64) error {
var sp opentracing.Span
ctx, sp := spancontext.StartSpan(
context.TODO(),
- "send.offered.hashes")
+ "send.offered.hashes",
+ )
defer sp.Finish()
hashes, from, to, proof, err := s.setNextBatch(f, t)
@@ -215,7 +205,8 @@ func (p *Peer) SendOfferedHashes(s *server, f, t uint64) error {
Stream: s.stream,
}
log.Trace("Swarm syncer offer batch", "peer", p.ID(), "stream", s.stream, "len", len(hashes), "from", from, "to", to)
- return p.SendPriority(ctx, msg, s.priority, "send.offered.hashes")
+ ctx = context.WithValue(ctx, "stream_send_tag", "send.offered.hashes")
+ return p.SendPriority(ctx, msg, s.priority)
}
func (p *Peer) getServer(s Stream) (*server, error) {
diff --git a/swarm/network/stream/stream.go b/swarm/network/stream/stream.go
index 65bcce8b94..83a0b141f2 100644
--- a/swarm/network/stream/stream.go
+++ b/swarm/network/stream/stream.go
@@ -359,7 +359,7 @@ func (r *Registry) Subscribe(peerId enode.ID, s Stream, h *Range, priority uint8
}
log.Debug("Subscribe ", "peer", peerId, "stream", s, "history", h)
- return peer.SendPriority(context.TODO(), msg, priority, "")
+ return peer.SendPriority(context.TODO(), msg, priority)
}
func (r *Registry) Unsubscribe(peerId enode.ID, s Stream) error {
@@ -730,7 +730,7 @@ func (c *client) batchDone(p *Peer, req *OfferedHashesMsg, hashes []byte) error
return err
}
- if err := p.SendPriority(context.TODO(), tp, c.priority, ""); err != nil {
+ if err := p.SendPriority(context.TODO(), tp, c.priority); err != nil {
return err
}
if c.to > 0 && tp.Takeover.End >= c.to {
diff --git a/swarm/storage/netstore.go b/swarm/storage/netstore.go
index 202af2bf58..8a44f51a88 100644
--- a/swarm/storage/netstore.go
+++ b/swarm/storage/netstore.go
@@ -26,6 +26,9 @@ import (
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/swarm/log"
+ "github.com/ethereum/go-ethereum/swarm/spancontext"
+ "github.com/opentracing/opentracing-go"
+
lru "github.com/hashicorp/golang-lru"
)
@@ -208,7 +211,11 @@ func (n *NetStore) getOrCreateFetcher(ctx context.Context, ref Address) *fetcher
// the peers which requested the chunk should not be requested to deliver it.
peers := &sync.Map{}
- fetcher := newFetcher(ref, n.NewNetFetcherFunc(cctx, ref, peers), destroy, peers, n.closeC)
+ cctx, sp := spancontext.StartSpan(
+ cctx,
+ "netstore.fetcher",
+ )
+ fetcher := newFetcher(sp, ref, n.NewNetFetcherFunc(cctx, ref, peers), destroy, peers, n.closeC)
n.fetchers.Add(key, fetcher)
return fetcher
@@ -233,15 +240,16 @@ func (n *NetStore) RequestsCacheLen() int {
// One fetcher object is responsible to fetch one chunk for one address, and keep track of all the
// peers who have requested it and did not receive it yet.
type fetcher struct {
- addr Address // address of chunk
- chunk Chunk // fetcher can set the chunk on the fetcher
- deliveredC chan struct{} // chan signalling chunk delivery to requests
- cancelledC chan struct{} // chan signalling the fetcher has been cancelled (removed from fetchers in NetStore)
- netFetcher NetFetcher // remote fetch function to be called with a request source taken from the context
- cancel func() // cleanup function for the remote fetcher to call when all upstream contexts are called
- peers *sync.Map // the peers which asked for the chunk
- requestCnt int32 // number of requests on this chunk. If all the requests are done (delivered or context is done) the cancel function is called
- deliverOnce *sync.Once // guarantees that we only close deliveredC once
+ addr Address // address of chunk
+ chunk Chunk // fetcher can set the chunk on the fetcher
+ deliveredC chan struct{} // chan signalling chunk delivery to requests
+ cancelledC chan struct{} // chan signalling the fetcher has been cancelled (removed from fetchers in NetStore)
+ netFetcher NetFetcher // remote fetch function to be called with a request source taken from the context
+ cancel func() // cleanup function for the remote fetcher to call when all upstream contexts are called
+ peers *sync.Map // the peers which asked for the chunk
+ requestCnt int32 // number of requests on this chunk. If all the requests are done (delivered or context is done) the cancel function is called
+ deliverOnce *sync.Once // guarantees that we only close deliveredC once
+ span opentracing.Span // measure retrieve time per chunk
}
// newFetcher creates a new fetcher object for the fiven addr. fetch is the function which actually
@@ -250,7 +258,7 @@ type fetcher struct {
// 1. when the chunk has been fetched all peers have been either notified or their context has been done
// 2. the chunk has not been fetched but all context from all the requests has been done
// The peers map stores all the peers which have requested chunk.
-func newFetcher(addr Address, nf NetFetcher, cancel func(), peers *sync.Map, closeC chan struct{}) *fetcher {
+func newFetcher(span opentracing.Span, addr Address, nf NetFetcher, cancel func(), peers *sync.Map, closeC chan struct{}) *fetcher {
cancelOnce := &sync.Once{} // cancel should only be called once
return &fetcher{
addr: addr,
@@ -264,6 +272,7 @@ func newFetcher(addr Address, nf NetFetcher, cancel func(), peers *sync.Map, clo
})
},
peers: peers,
+ span: span,
}
}
@@ -276,6 +285,7 @@ func (f *fetcher) Fetch(rctx context.Context) (Chunk, error) {
if atomic.AddInt32(&f.requestCnt, -1) == 0 {
f.cancel()
}
+ f.span.Finish()
}()
// The peer asking for the chunk. Store in the shared peers map, but delete after the request
diff --git a/swarm/swarm.go b/swarm/swarm.go
index 3ab98b3ab5..651ad97c78 100644
--- a/swarm/swarm.go
+++ b/swarm/swarm.go
@@ -426,6 +426,7 @@ func (s *Swarm) Start(srv *p2p.Server) error {
func (s *Swarm) Stop() error {
if s.tracerClose != nil {
err := s.tracerClose.Close()
+ tracing.FinishSpans()
if err != nil {
return err
}
diff --git a/swarm/tracing/tracing.go b/swarm/tracing/tracing.go
index f95fa41b8f..55875464b9 100644
--- a/swarm/tracing/tracing.go
+++ b/swarm/tracing/tracing.go
@@ -1,21 +1,39 @@
package tracing
import (
+ "context"
"io"
"os"
"strings"
+ "sync"
"time"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/swarm/spancontext"
+
+ opentracing "github.com/opentracing/opentracing-go"
jaeger "github.com/uber/jaeger-client-go"
jaegercfg "github.com/uber/jaeger-client-go/config"
cli "gopkg.in/urfave/cli.v1"
)
-var Enabled bool = false
+var (
+ // Enabled turns tracing on for the current swarm instance
+ Enabled bool = false
+ store = spanStore{}
+)
+
+const (
+ // TracingEnabledFlag is the CLI flag name to use to enable trace collections.
+ TracingEnabledFlag = "tracing"
+
+ // StoreLabelId is the context value key of the name of the span to be saved
+ StoreLabelId = "span_save_id"
-// TracingEnabledFlag is the CLI flag name to use to enable trace collections.
-const TracingEnabledFlag = "tracing"
+ // StoreLabelMeta is the context value key that together with StoreLabelId constitutes the retrieval key for saved spans in the span store
+ // StartSaveSpan and ShiftSpanByKey
+ StoreLabelMeta = "span_save_meta"
+)
var (
Closer io.Closer
@@ -100,3 +118,56 @@ func initTracer(endpoint, svc string) (closer io.Closer) {
return closer
}
+
+// spanStore holds saved spans
+type spanStore struct {
+ spans sync.Map
+}
+
+// StartSaveSpan stores the span specified in the passed context for later retrieval
+// The span object but be context value on the key StoreLabelId.
+// It will be stored under the the following string key context.Value(StoreLabelId)|.|context.Value(StoreLabelMeta)
+func StartSaveSpan(ctx context.Context) context.Context {
+ if !Enabled {
+ return ctx
+ }
+ traceId := ctx.Value(StoreLabelId)
+
+ if traceId != nil {
+ traceStr := traceId.(string)
+ var sp opentracing.Span
+ ctx, sp = spancontext.StartSpan(
+ ctx,
+ traceStr,
+ )
+ traceMeta := ctx.Value(StoreLabelMeta)
+ if traceMeta != nil {
+ traceStr = traceStr + "." + traceMeta.(string)
+ }
+ store.spans.Store(traceStr, sp)
+ }
+ return ctx
+}
+
+// ShiftSpanByKey retrieves the span stored under the key of the string given as argument
+// The span is then deleted from the store
+func ShiftSpanByKey(k string) opentracing.Span {
+ if !Enabled {
+ return nil
+ }
+ span, spanOk := store.spans.Load(k)
+ if !spanOk {
+ return nil
+ }
+ store.spans.Delete(k)
+ return span.(opentracing.Span)
+}
+
+// FinishSpans calls `Finish()` on all stored spans
+// It should be called on instance shutdown
+func FinishSpans() {
+ store.spans.Range(func(_, v interface{}) bool {
+ v.(opentracing.Span).Finish()
+ return true
+ })
+}
diff --git a/trie/database.go b/trie/database.go
index aba5943f5a..958823eb8d 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -809,7 +809,7 @@ func (db *Database) verifyIntegrity() {
db.accumulate(child, reachable)
}
// Find any unreachable but cached nodes
- unreachable := []string{}
+ var unreachable []string
for hash, node := range db.dirties {
if _, ok := reachable[hash]; !ok {
unreachable = append(unreachable, fmt.Sprintf("%x: {Node: %v, Parents: %d, Prev: %x, Next: %x}",
diff --git a/trie/proof.go b/trie/proof.go
index f90ecd7d88..1334bde970 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -37,7 +37,7 @@ import (
func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.Putter) error {
// Collect all nodes on the path to key.
key = keybytesToHex(key)
- nodes := []node{}
+ var nodes []node
tn := t.root
for len(key) > 0 && tn != nil {
switch n := tn.(type) {
diff --git a/trie/sync.go b/trie/sync.go
index 67dff5a8b6..44f5087b9f 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -157,7 +157,7 @@ func (s *Sync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) {
// Missing retrieves the known missing nodes from the trie for retrieval.
func (s *Sync) Missing(max int) []common.Hash {
- requests := []common.Hash{}
+ var requests []common.Hash
for !s.queue.Empty() && (max == 0 || len(requests) < max) {
requests = append(requests, s.queue.PopItem().(common.Hash))
}
@@ -254,7 +254,7 @@ func (s *Sync) children(req *request, object node) ([]*request, error) {
node node
depth int
}
- children := []child{}
+ var children []child
switch node := (object).(type) {
case *shortNode:
diff --git a/trie/sync_test.go b/trie/sync_test.go
index c76779e5c7..ff15baa52c 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -313,7 +313,7 @@ func TestIncompleteSync(t *testing.T) {
triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil)
- added := []common.Hash{}
+ var added []common.Hash
queue := append([]common.Hash{}, sched.Missing(1)...)
for len(queue) > 0 {
// Fetch a batch of trie nodes