From 714eb36de85e0c04f8d493c9d60dbccfa74a8750 Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Thu, 15 Feb 2024 16:55:09 -0800 Subject: [PATCH 1/6] start logging refactor --- datastore/blockstore.go | 6 +- datastore/store.go | 5 +- db/collection.go | 2 +- db/collection_delete.go | 2 +- db/collection_update.go | 2 +- db/db.go | 19 ++++--- db/subscriptions.go | 2 +- go.mod | 7 ++- http/logger.go | 17 +++--- merkle/clock/clock.go | 23 ++++---- merkle/clock/heads.go | 11 ++-- merkle/crdt/composite.go | 4 +- merkle/crdt/merklecrdt.go | 4 +- net/client.go | 17 +++--- net/dag.go | 12 ++-- net/dag_test.go | 2 +- net/net.go | 6 +- net/node.go | 21 ++++--- net/peer.go | 82 ++++++++++++++-------------- net/process.go | 27 +++++---- net/server.go | 55 +++++++++---------- node/node.go | 6 +- tests/bench/bench_util.go | 7 ++- tests/integration/explain.go | 3 +- tests/integration/net/order/utils.go | 34 ++++++------ tests/integration/p2p.go | 3 +- tests/integration/utils2.go | 30 +++++----- 27 files changed, 202 insertions(+), 207 deletions(-) diff --git a/datastore/blockstore.go b/datastore/blockstore.go index 8525f8410e..ba994ca30b 100644 --- a/datastore/blockstore.go +++ b/datastore/blockstore.go @@ -64,7 +64,7 @@ func (bs *bstore) HashOnRead(enabled bool) { // Get returns a block from the blockstore. func (bs *bstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { if !k.Defined() { - log.Error(ctx, "Undefined CID in blockstore") + log.ErrorContext(ctx, "Undefined CID in blockstore") return nil, ipld.ErrNotFound{Cid: k} } bdata, err := bs.store.Get(ctx, dshelp.MultihashToDsKey(k.Hash())) @@ -164,13 +164,13 @@ func (bs *bstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { return } if e.Error != nil { - log.ErrorE(ctx, "Blockstore.AllKeysChan errored", e.Error) + log.ErrorContextE(ctx, "Blockstore.AllKeysChan errored", e.Error) return } hash, err := dshelp.DsKeyToMultihash(ds.RawKey(e.Key)) if err != nil { - log.ErrorE(ctx, "Error parsing key from binary", err) + log.ErrorContextE(ctx, "Error parsing key from binary", err) continue } k := cid.NewCidV1(cid.Raw, hash) diff --git a/datastore/store.go b/datastore/store.go index 759eef01db..7f2764a65d 100644 --- a/datastore/store.go +++ b/datastore/store.go @@ -14,12 +14,13 @@ import ( blockstore "github.com/ipfs/boxo/blockstore" ds "github.com/ipfs/go-datastore" + "github.com/sourcenetwork/corelog" + "github.com/sourcenetwork/defradb/datastore/iterable" - "github.com/sourcenetwork/defradb/logging" ) var ( - log = logging.MustNewLogger("store") + log = corelog.NewLogger("store") ) // RootStore wraps Batching and TxnDatastore requiring datastore to support both batching and transactions. diff --git a/db/collection.go b/db/collection.go index 3925f99bc7..aaaea03932 100644 --- a/db/collection.go +++ b/db/collection.go @@ -841,7 +841,7 @@ func (c *collection) getAllDocIDsChan( go func() { defer func() { if err := q.Close(); err != nil { - log.ErrorE(ctx, errFailedtoCloseQueryReqAllIDs, err) + log.ErrorContextE(ctx, errFailedtoCloseQueryReqAllIDs, err) } close(resCh) c.discardImplicitTxn(ctx, txn) diff --git a/db/collection_delete.go b/db/collection_delete.go index 6c360d09c0..6639dc5e6e 100644 --- a/db/collection_delete.go +++ b/db/collection_delete.go @@ -180,7 +180,7 @@ func (c *collection) deleteWithFilter( // If the plan isn't properly closed at any exit point log the error. defer func() { if err := selectionPlan.Close(); err != nil { - log.ErrorE(ctx, "Failed to close the request plan, after filter delete", err) + log.ErrorContextE(ctx, "Failed to close the request plan, after filter delete", err) } }() diff --git a/db/collection_update.go b/db/collection_update.go index 58e6e9086f..90beae7aa6 100644 --- a/db/collection_update.go +++ b/db/collection_update.go @@ -240,7 +240,7 @@ func (c *collection) updateWithFilter( // If the plan isn't properly closed at any exit point log the error. defer func() { if err := selectionPlan.Close(); err != nil { - log.ErrorE(ctx, "Failed to close the selection plan, after filter update", err) + log.ErrorContextE(ctx, "Failed to close the selection plan, after filter update", err) } }() diff --git a/db/db.go b/db/db.go index 1046b2db54..3e1daf98a1 100644 --- a/db/db.go +++ b/db/db.go @@ -22,6 +22,7 @@ import ( blockstore "github.com/ipfs/boxo/blockstore" ds "github.com/ipfs/go-datastore" dsq "github.com/ipfs/go-datastore/query" + "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" @@ -35,7 +36,7 @@ import ( ) var ( - log = logging.MustNewLogger("db") + log = corelog.NewLogger("db") ) // make sure we match our client interface @@ -109,7 +110,7 @@ func NewDB(ctx context.Context, rootstore datastore.RootStore, options ...Option } func newDB(ctx context.Context, rootstore datastore.RootStore, options ...Option) (*implicitTxnDB, error) { - log.Debug(ctx, "Loading: internal datastores") + log.DebugContext(ctx, "Loading: internal datastores") multistore := datastore.MultiStoreFrom(rootstore) parser, err := graphql.NewParser() @@ -197,7 +198,7 @@ func (db *db) initialize(ctx context.Context) error { } defer txn.Discard(ctx) - log.Debug(ctx, "Checking if DB has already been initialized...") + log.DebugContext(ctx, "Checking if DB has already been initialized...") exists, err := txn.Systemstore().Has(ctx, ds.NewKey("init")) if err != nil && !errors.Is(err, ds.ErrNotFound) { return err @@ -205,7 +206,7 @@ func (db *db) initialize(ctx context.Context) error { // if we're loading an existing database, just load the schema // and migrations and finish initialization if exists { - log.Debug(ctx, "DB has already been initialized, continuing") + log.DebugContext(ctx, "DB has already been initialized, continuing") err = db.loadSchema(ctx, txn) if err != nil { return err @@ -222,7 +223,7 @@ func (db *db) initialize(ctx context.Context) error { return txn.Commit(ctx) } - log.Debug(ctx, "Opened a new DB, needs full initialization") + log.DebugContext(ctx, "Opened a new DB, needs full initialization") // init meta data // collection sequence @@ -261,16 +262,16 @@ func (db *db) PrintDump(ctx context.Context) error { // Close is called when we are shutting down the database. // This is the place for any last minute cleanup or releasing of resources (i.e.: Badger instance). func (db *db) Close() { - log.Info(context.Background(), "Closing DefraDB process...") + log.Info("Closing DefraDB process...") if db.events.Updates.HasValue() { db.events.Updates.Value().Close() } err := db.rootstore.Close() if err != nil { - log.ErrorE(context.Background(), "Failure closing running process", err) + log.ErrorE("Failure closing running process", err) } - log.Info(context.Background(), "Successfully closed running process") + log.Info("Successfully closed running process") } func printStore(ctx context.Context, store datastore.DSReaderWriter) error { @@ -286,7 +287,7 @@ func printStore(ctx context.Context, store datastore.DSReaderWriter) error { } for r := range results.Next() { - log.Info(ctx, "", logging.NewKV(r.Key, r.Value)) + log.InfoContext(ctx, "", logging.NewKV(r.Key, r.Value)) } return results.Close() diff --git a/db/subscriptions.go b/db/subscriptions.go index 2e7d2d4123..bc013ae587 100644 --- a/db/subscriptions.go +++ b/db/subscriptions.go @@ -55,7 +55,7 @@ func (db *db) handleSubscription( for evt := range pub.Event() { txn, err := db.NewTxn(ctx, false) if err != nil { - log.Error(ctx, err.Error()) + log.ErrorContext(ctx, err.Error()) continue } diff --git a/go.mod b/go.mod index a0523cf430..4e0f17012a 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/sourcenetwork/defradb -go 1.21 +go 1.21.3 require ( github.com/bits-and-blooms/bitset v1.13.0 @@ -32,6 +32,7 @@ require ( github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multihash v0.2.3 github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 + github.com/sourcenetwork/corelog v1.0.0 github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.13 github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd github.com/sourcenetwork/immutable v0.3.0 @@ -46,7 +47,6 @@ require ( go.opentelemetry.io/otel/metric v1.22.0 go.opentelemetry.io/otel/sdk/metric v1.22.0 go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.18.0 golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc golang.org/x/net v0.20.0 google.golang.org/grpc v1.61.0 @@ -182,6 +182,7 @@ require ( go.uber.org/fx v1.20.1 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.18.0 // indirect golang.org/x/mod v0.14.0 // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.16.0 // indirect @@ -193,3 +194,5 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect ) + +replace github.com/sourcenetwork/corelog => ../corelog diff --git a/http/logger.go b/http/logger.go index d23f65e94a..21dba68e0f 100644 --- a/http/logger.go +++ b/http/logger.go @@ -15,11 +15,10 @@ import ( "time" "github.com/go-chi/chi/v5/middleware" - - "github.com/sourcenetwork/defradb/logging" + "github.com/sourcenetwork/corelog" ) -var log = logging.MustNewLogger("http") +var log = corelog.NewLogger("http") type logEntry struct { req *http.Request @@ -28,14 +27,14 @@ type logEntry struct { var _ middleware.LogEntry = (*logEntry)(nil) func (e *logEntry) Write(status, bytes int, header http.Header, elapsed time.Duration, extra any) { - log.Info( + log.InfoContext( e.req.Context(), "Request", - logging.NewKV("Method", e.req.Method), - logging.NewKV("Path", e.req.URL.Path), - logging.NewKV("Status", status), - logging.NewKV("LengthBytes", bytes), - logging.NewKV("ElapsedTime", elapsed.String()), + "Method", e.req.Method, + "Path", e.req.URL.Path, + "Status", status, + "LengthBytes", bytes, + "ElapsedTime", elapsed.String(), ) } diff --git a/merkle/clock/clock.go b/merkle/clock/clock.go index 2bdc9fda93..a6b310f7ea 100644 --- a/merkle/clock/clock.go +++ b/merkle/clock/clock.go @@ -19,13 +19,14 @@ import ( cid "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" + "github.com/sourcenetwork/corelog" + "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/logging" ) var ( - log = logging.MustNewLogger("merkleclock") + log = corelog.NewLogger("merkleclock") ) // MerkleClock is a MerkleCRDT clock that can be used to read/write events (deltas) to the clock. @@ -121,7 +122,7 @@ func (mc *MerkleClock) ProcessNode( nodeCid := node.Cid() priority := delta.GetPriority() - log.Debug(ctx, "Running ProcessNode", logging.NewKV("CID", nodeCid)) + log.DebugContext(ctx, "Running ProcessNode", "CID", nodeCid) err := mc.crdt.Merge(ctx, delta) if err != nil { return NewErrMergingDelta(nodeCid, err) @@ -130,16 +131,16 @@ func (mc *MerkleClock) ProcessNode( links := node.Links() // check if we have any HEAD links hasHeads := false - log.Debug(ctx, "Stepping through node links") + log.DebugContext(ctx, "Stepping through node links") for _, l := range links { - log.Debug(ctx, "Checking link", logging.NewKV("Name", l.Name), logging.NewKV("CID", l.Cid)) + log.DebugContext(ctx, "Checking link", "Name", l.Name, "CID", l.Cid) if l.Name == "_head" { hasHeads = true break } } if !hasHeads { // reached the bottom, at a leaf - log.Debug(ctx, "No heads found") + log.DebugContext(ctx, "No heads found") err := mc.headset.Write(ctx, nodeCid, priority) if err != nil { return NewErrAddingHead(nodeCid, err) @@ -148,14 +149,14 @@ func (mc *MerkleClock) ProcessNode( for _, l := range links { linkCid := l.Cid - log.Debug(ctx, "Scanning for replacement heads", logging.NewKV("Child", linkCid)) + log.DebugContext(ctx, "Scanning for replacement heads", "Child", linkCid) isHead, err := mc.headset.IsHead(ctx, linkCid) if err != nil { return NewErrCheckingHead(linkCid, err) } if isHead { - log.Debug(ctx, "Found head, replacing!") + log.DebugContext(ctx, "Found head, replacing!") // reached one of the current heads, replace it with the tip // of current branch err = mc.headset.Replace(ctx, linkCid, nodeCid, priority) @@ -173,14 +174,14 @@ func (mc *MerkleClock) ProcessNode( if known { // we reached a non-head node in the known tree. // This means our root block is a new head - log.Debug(ctx, "Adding head") + log.DebugContext(ctx, "Adding head") err := mc.headset.Write(ctx, nodeCid, priority) if err != nil { - log.ErrorE( + log.ErrorContextE( ctx, "Failure adding head (when root is a new head)", err, - logging.NewKV("Root", nodeCid), + "Root", nodeCid, ) // OR should this also return like below comment?? // return nil, errors.Wrap("error adding head (when root is new head): %s ", root, err) diff --git a/merkle/clock/heads.go b/merkle/clock/heads.go index cafc7cb6fa..5f43b90172 100644 --- a/merkle/clock/heads.go +++ b/merkle/clock/heads.go @@ -21,7 +21,6 @@ import ( "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/logging" ) // heads manages the current Merkle-CRDT heads. @@ -55,12 +54,12 @@ func (hh *heads) IsHead(ctx context.Context, c cid.Cid) (bool, error) { // Replace replaces a head with a new CID. func (hh *heads) Replace(ctx context.Context, old cid.Cid, new cid.Cid, height uint64) error { - log.Info( + log.InfoContext( ctx, "Replacing DAG head", - logging.NewKV("Old", old), - logging.NewKV("CID", new), - logging.NewKV("Height", height)) + "Old", old, + "CID", new, + "Height", height) err := hh.store.Delete(ctx, hh.key(old).ToDS()) if err != nil { @@ -91,7 +90,7 @@ func (hh *heads) List(ctx context.Context) ([]cid.Cid, uint64, error) { defer func() { err := results.Close() if err != nil { - log.ErrorE(ctx, "Error closing results", err) + log.ErrorContextE(ctx, "Error closing results", err) } }() diff --git a/merkle/crdt/composite.go b/merkle/crdt/composite.go index ee43348bdc..a922a81e02 100644 --- a/merkle/crdt/composite.go +++ b/merkle/crdt/composite.go @@ -59,7 +59,7 @@ func (m *MerkleCompositeDAG) Delete( ) (ipld.Node, uint64, error) { // Set() call on underlying CompositeDAG CRDT // persist/publish delta - log.Debug(ctx, "Applying delta-mutator 'Delete' on CompositeDAG") + log.DebugContext(ctx, "Applying delta-mutator 'Delete' on CompositeDAG") delta := m.reg.Set(links) delta.Status = client.Deleted nd, err := m.clock.AddDAGNode(ctx, delta) @@ -78,7 +78,7 @@ func (m *MerkleCompositeDAG) Save(ctx context.Context, data any) (ipld.Node, uin } // Set() call on underlying CompositeDAG CRDT // persist/publish delta - log.Debug(ctx, "Applying delta-mutator 'Set' on CompositeDAG") + log.DebugContext(ctx, "Applying delta-mutator 'Set' on CompositeDAG") delta := m.reg.Set(value) nd, err := m.clock.AddDAGNode(ctx, delta) if err != nil { diff --git a/merkle/crdt/merklecrdt.go b/merkle/crdt/merklecrdt.go index b52fb7cf6d..8c52d3688e 100644 --- a/merkle/crdt/merklecrdt.go +++ b/merkle/crdt/merklecrdt.go @@ -17,15 +17,15 @@ import ( "context" ipld "github.com/ipfs/go-ipld-format" + "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/logging" ) var ( - log = logging.MustNewLogger("merklecrdt") + log = corelog.NewLogger("merklecrdt") ) type Stores interface { diff --git a/net/client.go b/net/client.go index 20c33e33fd..5757ddc422 100644 --- a/net/client.go +++ b/net/client.go @@ -20,7 +20,6 @@ import ( "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" - "github.com/sourcenetwork/defradb/logging" pb "github.com/sourcenetwork/defradb/net/pb" ) @@ -33,12 +32,12 @@ var ( // pushLog creates a pushLog request and sends it to another node // over libp2p grpc connection func (s *server) pushLog(ctx context.Context, evt events.Update, pid peer.ID) error { - log.Debug( + log.DebugContext( ctx, "Preparing pushLog request", - logging.NewKV("DocID", evt.DocID), - logging.NewKV("CID", evt.Cid), - logging.NewKV("SchemaRoot", evt.SchemaRoot)) + "DocID", evt.DocID, + "CID", evt.Cid, + "SchemaRoot", evt.SchemaRoot) body := &pb.PushLogRequest_Body{ DocID: []byte(evt.DocID), @@ -53,11 +52,11 @@ func (s *server) pushLog(ctx context.Context, evt events.Update, pid peer.ID) er Body: body, } - log.Debug( + log.DebugContext( ctx, "Pushing log", - logging.NewKV("DocID", evt.DocID), - logging.NewKV("CID", evt.Cid), - logging.NewKV("PeerID", pid), + "DocID", evt.DocID, + "CID", evt.Cid, + "PeerID", pid, ) client, err := s.dial(pid) // grpc dial over P2P stream diff --git a/net/dag.go b/net/dag.go index f083904915..b45c037feb 100644 --- a/net/dag.go +++ b/net/dag.go @@ -19,8 +19,6 @@ import ( "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" - - "github.com/sourcenetwork/defradb/logging" ) var ( @@ -100,11 +98,11 @@ func (p *Peer) sendJobWorker() { // initialization in New(). func (p *Peer) dagWorker(jobs chan *dagJob) { for job := range jobs { - log.Debug( + log.DebugContext( p.ctx, "Starting new job from DAG queue", - logging.NewKV("Datastore Key", job.bp.dsKey), - logging.NewKV("CID", job.cid), + "Datastore Key", job.bp.dsKey, + "CID", job.cid, ) select { @@ -119,7 +117,7 @@ func (p *Peer) dagWorker(jobs chan *dagJob) { if j.bp.getter != nil && j.cid.Defined() { cNode, err := j.bp.getter.Get(p.ctx, j.cid) if err != nil { - log.ErrorE(p.ctx, "Failed to get node", err, logging.NewKV("CID", j.cid)) + log.ErrorContextE(p.ctx, "Failed to get node", err, "CID", j.cid) j.session.Done() return } @@ -130,7 +128,7 @@ func (p *Peer) dagWorker(jobs chan *dagJob) { j.isComposite, ) if err != nil { - log.ErrorE(p.ctx, "Failed to process remote block", err, logging.NewKV("CID", j.cid)) + log.ErrorContextE(p.ctx, "Failed to process remote block", err, "CID", j.cid) } } p.queuedChildren.Remove(j.cid) diff --git a/net/dag_test.go b/net/dag_test.go index 524847bfb8..6cc471f6c7 100644 --- a/net/dag_test.go +++ b/net/dag_test.go @@ -188,7 +188,7 @@ func TestSendJobWorker_WithPeer_NoError(t *testing.T) { var getter ipld.NodeGetter = n2.Peer.newDAGSyncerTxn(txn2) if sessionMaker, ok := getter.(SessionDAGSyncer); ok { - log.Debug(ctx, "Upgrading DAGSyncer with a session") + log.DebugContext(ctx, "Upgrading DAGSyncer with a session") getter = sessionMaker.Session(ctx) } diff --git a/net/net.go b/net/net.go index add509a709..c7dbaf1810 100644 --- a/net/net.go +++ b/net/net.go @@ -12,10 +12,8 @@ package net -import ( - "github.com/sourcenetwork/defradb/logging" -) +import "github.com/sourcenetwork/corelog" var ( - log = logging.MustNewLogger("net") + log = corelog.NewLogger("net") ) diff --git a/net/node.go b/net/node.go index 9245f78772..a0af242ef6 100644 --- a/net/node.go +++ b/net/node.go @@ -46,7 +46,6 @@ import ( "github.com/libp2p/go-libp2p/p2p/net/connmgr" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/logging" ) var evtWaitTimeout = 10 * time.Second @@ -144,11 +143,11 @@ func NewNode( if err != nil { return nil, fin.Cleanup(err) } - log.Info( + log.InfoContext( ctx, "Created LibP2P host", - logging.NewKV("PeerId", h.ID()), - logging.NewKV("Address", options.ListenAddresses), + "PeerId", h.ID(), + "Address", options.ListenAddresses, ) var ps *pubsub.PubSub @@ -214,10 +213,10 @@ func (n *Node) Bootstrap(addrs []peer.AddrInfo) { defer wg.Done() err := n.host.Connect(n.ctx, pinfo) if err != nil { - log.Info(n.ctx, "Cannot connect to peer", logging.NewKV("Error", err)) + log.InfoContext(n.ctx, "Cannot connect to peer", "Error", err) return } - log.Info(n.ctx, "Connected", logging.NewKV("PeerID", pinfo.ID)) + log.InfoContext(n.ctx, "Connected", "PeerID", pinfo.ID) atomic.AddUint64(&connected, 1) }(pinfo) } @@ -225,12 +224,12 @@ func (n *Node) Bootstrap(addrs []peer.AddrInfo) { wg.Wait() if nPeers := len(addrs); int(connected) < nPeers/2 { - log.Info(n.ctx, fmt.Sprintf("Only connected to %d bootstrap peers out of %d", connected, nPeers)) + log.InfoContext(n.ctx, fmt.Sprintf("Only connected to %d bootstrap peers out of %d", connected, nPeers)) } err := n.dht.Bootstrap(n.ctx) if err != nil { - log.ErrorE(n.ctx, "Problem bootstraping using DHT", err) + log.ErrorContextE(n.ctx, "Problem bootstraping using DHT", err) return } } @@ -254,7 +253,7 @@ func (n *Node) PeerInfo() peer.AddrInfo { func (n *Node) subscribeToPeerConnectionEvents() { sub, err := n.host.EventBus().Subscribe(new(event.EvtPeerConnectednessChanged)) if err != nil { - log.Info( + log.InfoContext( n.ctx, fmt.Sprintf("failed to subscribe to peer connectedness changed event: %v", err), ) @@ -276,7 +275,7 @@ func (n *Node) subscribeToPeerConnectionEvents() { func (n *Node) subscribeToPubSubEvents() { sub, err := n.host.EventBus().Subscribe(new(EvtPubSub)) if err != nil { - log.Info( + log.InfoContext( n.ctx, fmt.Sprintf("failed to subscribe to pubsub event: %v", err), ) @@ -298,7 +297,7 @@ func (n *Node) subscribeToPubSubEvents() { func (n *Node) subscribeToPushLogEvents() { sub, err := n.host.EventBus().Subscribe(new(EvtReceivedPushLog)) if err != nil { - log.Info( + log.InfoContext( n.ctx, fmt.Sprintf("failed to subscribe to push log event: %v", err), ) diff --git a/net/peer.go b/net/peer.go index acdba2e9c8..4ca09c17eb 100644 --- a/net/peer.go +++ b/net/peer.go @@ -146,7 +146,7 @@ func (p *Peer) Start() error { addr := p.host.Peerstore().PeerInfo(id) err := p.host.Connect(p.ctx, addr) if err != nil { - log.Info( + log.InfoContext( p.ctx, "Failure while reconnecting to a known peer", logging.NewKV("peer", id), @@ -173,7 +173,7 @@ func (p *Peer) Start() error { } p.updateChannel = updateChannel - log.Info(p.ctx, "Starting internal broadcaster for pubsub network") + log.InfoContext(p.ctx, "Starting internal broadcaster for pubsub network") go p.handleBroadcastLoop() } @@ -182,7 +182,7 @@ func (p *Peer) Start() error { pb.RegisterServiceServer(p.p2pRPC, p.server) if err := p.p2pRPC.Serve(p2plistener); err != nil && !errors.Is(err, grpc.ErrServerStopped) { - log.FatalE(p.ctx, "Fatal P2P RPC server error", err) + log.FatalContextE(p.ctx, "Fatal P2P RPC server error", err) } }() @@ -196,13 +196,13 @@ func (p *Peer) Start() error { func (p *Peer) Close() { // close topics if err := p.server.removeAllPubsubTopics(); err != nil { - log.ErrorE(p.ctx, "Error closing pubsub topics", err) + log.ErrorContextE(p.ctx, "Error closing pubsub topics", err) } // stop gRPC server for _, c := range p.server.conns { if err := c.Close(); err != nil { - log.ErrorE(p.ctx, "Failed closing server RPC connections", err) + log.ErrorContextE(p.ctx, "Failed closing server RPC connections", err) } } stopGRPCServer(p.ctx, p.p2pRPC) @@ -211,12 +211,12 @@ func (p *Peer) Close() { // close event emitters if p.server.pubSubEmitter != nil { if err := p.server.pubSubEmitter.Close(); err != nil { - log.Info(p.ctx, "Could not close pubsub event emitter", logging.NewKV("Error", err.Error())) + log.InfoContext(p.ctx, "Could not close pubsub event emitter", "Error", err.Error()) } } if p.server.pushLogEmitter != nil { if err := p.server.pushLogEmitter.Close(); err != nil { - log.Info(p.ctx, "Could not close push log event emitter", logging.NewKV("Error", err.Error())) + log.InfoContext(p.ctx, "Could not close push log event emitter", "Error", err.Error()) } } @@ -225,11 +225,11 @@ func (p *Peer) Close() { } if err := p.bserv.Close(); err != nil { - log.ErrorE(p.ctx, "Error closing block service", err) + log.ErrorContextE(p.ctx, "Error closing block service", err) } if err := p.host.Close(); err != nil { - log.ErrorE(p.ctx, "Error closing host", err) + log.ErrorContextE(p.ctx, "Error closing host", err) } p.cancel() @@ -238,9 +238,9 @@ func (p *Peer) Close() { // handleBroadcast loop manages the transition of messages // from the internal broadcaster to the external pubsub network func (p *Peer) handleBroadcastLoop() { - log.Debug(p.ctx, "Waiting for messages on internal broadcaster") + log.DebugContext(p.ctx, "Waiting for messages on internal broadcaster") for { - log.Debug(p.ctx, "Handling internal broadcast bus message") + log.DebugContext(p.ctx, "Handling internal broadcast bus message") update, isOpen := <-p.updateChannel if !isOpen { return @@ -254,11 +254,11 @@ func (p *Peer) handleBroadcastLoop() { } else if update.Priority > 1 { err = p.handleDocUpdateLog(update) } else { - log.Info(p.ctx, "Skipping log with invalid priority of 0", logging.NewKV("CID", update.Cid)) + log.InfoContext(p.ctx, "Skipping log with invalid priority of 0", "CID", update.Cid) } if err != nil { - log.ErrorE(p.ctx, "Error while handling broadcast log", err) + log.ErrorContextE(p.ctx, "Error while handling broadcast log", err) } } } @@ -271,19 +271,19 @@ func (p *Peer) RegisterNewDocument( nd ipld.Node, schemaRoot string, ) error { - log.Debug( + log.DebugContext( p.ctx, "Registering a new document for our peer node", - logging.NewKV("DocID", docID.String()), + "DocID", docID.String(), ) // register topic if err := p.server.addPubSubTopic(docID.String(), !p.server.hasPubSubTopic(schemaRoot)); err != nil { - log.ErrorE( + log.ErrorContextE( p.ctx, "Failed to create new pubsub topic", err, - logging.NewKV("DocID", docID.String()), + "DocID", docID.String(), ) return err } @@ -314,7 +314,7 @@ func (p *Peer) pushToReplicator( ) { for docIDResult := range docIDsCh { if docIDResult.Err != nil { - log.ErrorE(ctx, "Key channel error", docIDResult.Err) + log.ErrorContextE(ctx, "Key channel error", docIDResult.Err) continue } docID := core.DataStoreKeyFromDocID(docIDResult.ID) @@ -324,30 +324,30 @@ func (p *Peer) pushToReplicator( ) cids, priority, err := headset.List(ctx) if err != nil { - log.ErrorE( + log.ErrorContextE( ctx, "Failed to get heads", err, - logging.NewKV("DocID", docIDResult.ID.String()), - logging.NewKV("PeerID", pid), - logging.NewKV("Collection", collection.Name())) + "DocID", docIDResult.ID.String(), + "PeerID", pid, + "Collection", collection.Name()) continue } // loop over heads, get block, make the required logs, and send for _, c := range cids { blk, err := txn.DAGstore().Get(ctx, c) if err != nil { - log.ErrorE(ctx, "Failed to get block", err, - logging.NewKV("CID", c), - logging.NewKV("PeerID", pid), - logging.NewKV("Collection", collection.Name())) + log.ErrorContextE(ctx, "Failed to get block", err, + "CID", c, + "PeerID", pid, + "Collection", collection.Name()) continue } // @todo: remove encode/decode loop for core.Log data nd, err := dag.DecodeProtobuf(blk.RawData()) if err != nil { - log.ErrorE(ctx, "Failed to decode protobuf", err, logging.NewKV("CID", c)) + log.ErrorContextE(ctx, "Failed to decode protobuf", err, "CID", c) continue } @@ -359,12 +359,12 @@ func (p *Peer) pushToReplicator( Priority: priority, } if err := p.server.pushLog(ctx, evt, pid); err != nil { - log.ErrorE( + log.ErrorContextE( ctx, "Failed to replicate log", err, - logging.NewKV("CID", c), - logging.NewKV("PeerID", pid), + "CID", c, + "PeerID", pid, ) } } @@ -396,7 +396,7 @@ func (p *Peer) loadReplicators(ctx context.Context) error { // This will be used during connection and stream creation by libp2p. p.host.Peerstore().AddAddrs(rep.Info.ID, rep.Info.Addrs, peerstore.PermanentAddrTTL) - log.Info(ctx, "loaded replicators from datastore", logging.NewKV("Replicator", rep)) + log.InfoContext(ctx, "loaded replicators from datastore", "Replicator", rep) } return nil @@ -442,12 +442,12 @@ func (p *Peer) handleDocUpdateLog(evt events.Update) error { if err != nil { return NewErrFailedToGetDocID(err) } - log.Debug( + log.DebugContext( p.ctx, "Preparing pubsub pushLog request from broadcast", - logging.NewKV("DocID", docID), - logging.NewKV("CID", evt.Cid), - logging.NewKV("SchemaRoot", evt.SchemaRoot)) + "DocID", docID, + "CID", evt.Cid, + "SchemaRoot", evt.SchemaRoot) body := &pb.PushLogRequest_Body{ DocID: []byte(docID.String()), @@ -499,13 +499,13 @@ func (p *Peer) pushLogToReplicators(ctx context.Context, lg events.Update) { } go func(peerID peer.ID) { if err := p.server.pushLog(p.ctx, lg, peerID); err != nil { - log.ErrorE( + log.ErrorContextE( p.ctx, "Failed pushing log", err, - logging.NewKV("DocID", lg.DocID), - logging.NewKV("CID", lg.Cid), - logging.NewKV("PeerID", peerID)) + "DocID", lg.DocID, + "CID", lg.Cid, + "PeerID", peerID) } }(pid) } @@ -531,7 +531,7 @@ func (p *Peer) newDAGSyncerTxn(txn datastore.Txn) ipld.DAGService { func (p *Peer) Session(ctx context.Context) ipld.NodeGetter { ng := dag.NewSession(ctx, p.DAGService) if ng == p.DAGService { - log.Info(ctx, "DAGService does not support sessions") + log.InfoContext(ctx, "DAGService does not support sessions") } return ng } @@ -546,7 +546,7 @@ func stopGRPCServer(ctx context.Context, server *grpc.Server) { select { case <-timer.C: server.Stop() - log.Info(ctx, "Peer gRPC server was shutdown ungracefully") + log.InfoContext(ctx, "Peer gRPC server was shutdown ungracefully") case <-stopped: timer.Stop() } diff --git a/net/process.go b/net/process.go index 4288f8ac14..bb68ab98cd 100644 --- a/net/process.go +++ b/net/process.go @@ -28,7 +28,6 @@ import ( "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/db/base" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" merklecrdt "github.com/sourcenetwork/defradb/merkle/crdt" ) @@ -65,12 +64,12 @@ func (bp *blockProcessor) mergeBlocks(ctx context.Context) { nd := e.Value.(ipld.Node) err := bp.processBlock(ctx, nd, "") if err != nil { - log.ErrorE( + log.ErrorContextE( ctx, "Failed to process block", err, - logging.NewKV("DocID", bp.dsKey.DocID), - logging.NewKV("CID", nd.Cid()), + "DocID", bp.dsKey.DocID, + "CID", nd.Cid(), ) } } @@ -107,12 +106,12 @@ func (bp *blockProcessor) processBlock(ctx context.Context, nd ipld.Node, field } if err := bp.processBlock(ctx, nd, link.Name); err != nil { - log.ErrorE( + log.ErrorContextE( ctx, "Failed to process block", err, - logging.NewKV("DocID", bp.dsKey.DocID), - logging.NewKV("CID", nd.Cid()), + "DocID", bp.dsKey.DocID, + "CID", nd.Cid(), ) } } @@ -140,7 +139,7 @@ func initCRDTForType( core.COMPOSITE_NAMESPACE, ) - log.Debug(ctx, "Got CRDT Type", logging.NewKV("CType", ctype), logging.NewKV("Field", field)) + log.DebugContext(ctx, "Got CRDT Type", "CType", ctype, "Field", field) return merklecrdt.NewMerkleCompositeDAG( txn, core.NewCollectionSchemaVersionKey(col.Schema().VersionID, col.ID()), @@ -157,7 +156,7 @@ func initCRDTForType( fieldID := fd.ID.String() key = base.MakeDataStoreKeyWithCollectionDescription(description).WithInstanceInfo(dsKey).WithFieldId(fieldID) - log.Debug(ctx, "Got CRDT Type", logging.NewKV("CType", ctype), logging.NewKV("Field", field)) + log.DebugContext(ctx, "Got CRDT Type", "CType", ctype, "Field", field) return merklecrdt.InstanceWithStore( txn, core.NewCollectionSchemaVersionKey(col.Schema().VersionID, col.ID()), @@ -183,7 +182,7 @@ func (bp *blockProcessor) processRemoteBlock( nd ipld.Node, isComposite bool, ) error { - log.Debug(ctx, "Running processLog") + log.DebugContext(ctx, "Running processLog") if err := bp.txn.DAGstore().Put(ctx, nd); err != nil { return err @@ -218,15 +217,15 @@ func (bp *blockProcessor) handleChildBlocks( exist, err := bp.txn.DAGstore().Has(ctx, link.Cid) if err != nil { - log.Error( + log.ErrorContext( ctx, "Failed to check for existing block", - logging.NewKV("CID", link.Cid), - logging.NewKV("ERROR", err), + "CID", link.Cid, + "ERROR", err, ) } if exist { - log.Debug(ctx, "Already have block locally, skipping.", logging.NewKV("CID", link.Cid)) + log.DebugContext(ctx, "Already have block locally, skipping.", "CID", link.Cid) continue } diff --git a/net/server.go b/net/server.go index 78781100b6..b44843c299 100644 --- a/net/server.go +++ b/net/server.go @@ -31,7 +31,6 @@ import ( "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" pb "github.com/sourcenetwork/defradb/net/pb" ) @@ -95,7 +94,7 @@ func newServer(p *Peer, db client.DB, opts ...grpc.DialOption) (*server, error) } // Get all DocIDs across all collections in the DB - log.Debug(p.ctx, "Getting all existing DocIDs...") + log.DebugContext(p.ctx, "Getting all existing DocIDs...") cols, err := s.db.GetAllCollections(s.peer.ctx, false) if err != nil { return nil, err @@ -113,10 +112,10 @@ func newServer(p *Peer, db client.DB, opts ...grpc.DialOption) (*server, error) } for docID := range docIDChan { - log.Debug( + log.DebugContext( p.ctx, "Registering existing DocID pubsub topic", - logging.NewKV("DocID", docID.ID.String()), + "DocID", docID.ID.String(), ) if err := s.addPubSubTopic(docID.ID.String(), true); err != nil { return nil, err @@ -124,17 +123,17 @@ func newServer(p *Peer, db client.DB, opts ...grpc.DialOption) (*server, error) i++ } } - log.Debug(p.ctx, "Finished registering all DocID pubsub topics", logging.NewKV("Count", i)) + log.DebugContext(p.ctx, "Finished registering all DocID pubsub topics", "Count", i) } var err error s.pubSubEmitter, err = s.peer.host.EventBus().Emitter(new(EvtPubSub)) if err != nil { - log.Info(s.peer.ctx, "could not create event emitter", logging.NewKV("Error", err.Error())) + log.InfoContext(s.peer.ctx, "could not create event emitter", "Error", err.Error()) } s.pushLogEmitter, err = s.peer.host.EventBus().Emitter(new(EvtReceivedPushLog)) if err != nil { - log.Info(s.peer.ctx, "could not create event emitter", logging.NewKV("Error", err.Error())) + log.InfoContext(s.peer.ctx, "could not create event emitter", "Error", err.Error()) } return s, nil @@ -199,7 +198,7 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL if err != nil { return nil, err } - log.Debug(ctx, "Received a PushLog request", logging.NewKV("PeerID", pid)) + log.DebugContext(ctx, "Received a PushLog request", "PeerID", pid) cid, err := cid.Cast(req.Body.Cid) if err != nil { @@ -216,7 +215,7 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL if s.pushLogEmitter != nil { byPeer, err := libpeer.Decode(req.Body.Creator) if err != nil { - log.Info(ctx, "could not decode the PeerID of the log creator", logging.NewKV("Error", err.Error())) + log.InfoContext(ctx, "could not decode the PeerID of the log creator", "Error", err.Error()) } err = s.pushLogEmitter.Emit(EvtReceivedPushLog{ FromPeer: pid, @@ -225,7 +224,7 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL if err != nil { // logging instead of returning an error because the event bus should // not break the PushLog execution. - log.Info(ctx, "could not emit push log event", logging.NewKV("Error", err.Error())) + log.InfoContext(ctx, "could not emit push log event", "Error", err.Error()) } } }() @@ -242,7 +241,7 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL return nil, errors.Wrap(fmt.Sprintf("failed to check for existing block %s", cid), err) } if exists { - log.Debug(ctx, fmt.Sprintf("Already have block %s locally, skipping.", cid)) + log.DebugContext(ctx, fmt.Sprintf("Already have block %s locally, skipping.", cid)) return &pb.PushLogReply{}, nil } @@ -280,7 +279,7 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL // Create a new DAG service with the current transaction var getter format.NodeGetter = s.peer.newDAGSyncerTxn(txn) if sessionMaker, ok := getter.(SessionDAGSyncer); ok { - log.Debug(ctx, "Upgrading DAGSyncer with a session") + log.DebugContext(ctx, "Upgrading DAGSyncer with a session") getter = sessionMaker.Session(ctx) } @@ -294,12 +293,12 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL bp := newBlockProcessor(s.peer, txn, col, dsKey, getter) err = bp.processRemoteBlock(ctx, &session, nd, true) if err != nil { - log.ErrorE( + log.ErrorContextE( ctx, "Failed to process remote block", err, - logging.NewKV("DocID", dsKey.DocID), - logging.NewKV("CID", cid), + "DocID", dsKey.DocID, + "CID", cid, ) } session.Wait() @@ -443,26 +442,26 @@ func (s *server) publishLog(ctx context.Context, topic string, req *pb.PushLogRe return err } - log.Debug( + log.DebugContext( ctx, "Published log", - logging.NewKV("CID", cid), - logging.NewKV("DocID", topic), + "CID", cid, + "DocID", topic, ) return nil } // pubSubMessageHandler handles incoming PushLog messages from the pubsub network. func (s *server) pubSubMessageHandler(from libpeer.ID, topic string, msg []byte) ([]byte, error) { - log.Debug( + log.DebugContext( s.peer.ctx, "Handling new pubsub message", - logging.NewKV("SenderID", from), - logging.NewKV("Topic", topic), + "SenderID", from, + "Topic", topic, ) req := new(pb.PushLogRequest) if err := proto.Unmarshal(msg, req); err != nil { - log.ErrorE(s.peer.ctx, "Failed to unmarshal pubsub message %s", err) + log.ErrorContextE(s.peer.ctx, "Failed to unmarshal pubsub message %s", err) return nil, err } @@ -470,7 +469,7 @@ func (s *server) pubSubMessageHandler(from libpeer.ID, topic string, msg []byte) Addr: addr{from}, }) if _, err := s.PushLog(ctx, req); err != nil { - log.ErrorE(ctx, "Failed pushing log for doc", err, logging.NewKV("Topic", topic)) + log.ErrorContextE(ctx, "Failed pushing log for doc", err, "Topic", topic) return nil, errors.Wrap(fmt.Sprintf("Failed pushing log for doc %s", topic), err) } return nil, nil @@ -478,12 +477,12 @@ func (s *server) pubSubMessageHandler(from libpeer.ID, topic string, msg []byte) // pubSubEventHandler logs events from the subscribed DocID topics. func (s *server) pubSubEventHandler(from libpeer.ID, topic string, msg []byte) { - log.Info( + log.InfoContext( s.peer.ctx, "Received new pubsub event", - logging.NewKV("SenderId", from), - logging.NewKV("Topic", topic), - logging.NewKV("Message", string(msg)), + "SenderId", from, + "Topic", topic, + "Message", string(msg), ) if s.pubSubEmitter != nil { @@ -491,7 +490,7 @@ func (s *server) pubSubEventHandler(from libpeer.ID, topic string, msg []byte) { Peer: from, }) if err != nil { - log.Info(s.peer.ctx, "could not emit pubsub event", logging.NewKV("Error", err.Error())) + log.InfoContext(s.peer.ctx, "could not emit pubsub event", "Error", err.Error()) } } } diff --git a/node/node.go b/node/node.go index f13fd0b1c2..b13e85059c 100644 --- a/node/node.go +++ b/node/node.go @@ -14,15 +14,15 @@ import ( "context" "github.com/libp2p/go-libp2p/core/peer" + "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/http" - "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/net" ) -var log = logging.MustNewLogger("node") +var log = corelog.NewLogger("node") // Options contains start configuration values. type Options struct { @@ -161,7 +161,7 @@ func (n *Node) Start(ctx context.Context) error { if n.Server != nil { go func() { if err := n.Server.ListenAndServe(); err != nil { - log.FeedbackErrorE(ctx, "HTTP server stopped", err) + log.ErrorContextE(ctx, "HTTP server stopped", err) } }() } diff --git a/tests/bench/bench_util.go b/tests/bench/bench_util.go index d7c00bd664..d09aa74b68 100644 --- a/tests/bench/bench_util.go +++ b/tests/bench/bench_util.go @@ -20,6 +20,7 @@ import ( ds "github.com/ipfs/go-datastore" "github.com/sourcenetwork/badger/v4" + "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/errors" @@ -35,7 +36,7 @@ const ( var ( storage string = "memory" - log = logging.MustNewLogger("tests.bench") + log = corelog.NewLogger("tests.bench") ) func init() { @@ -174,10 +175,10 @@ func BackfillBenchmarkDB( for { if err := cols[j].Create(ctx, doc); err != nil && err.Error() == badger.ErrConflict.Error() { - log.Info( + log.InfoContext( ctx, "Failed to commit TX for doc %s, retrying...\n", - logging.NewKV("DocID", doc.ID()), + "DocID", doc.ID(), ) continue } else if err != nil { diff --git a/tests/integration/explain.go b/tests/integration/explain.go index a8de0e6441..8ad438fc4d 100644 --- a/tests/integration/explain.go +++ b/tests/integration/explain.go @@ -20,7 +20,6 @@ import ( "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/logging" ) var ( @@ -150,7 +149,7 @@ func assertExplainRequestResults( // Note: if returned gql result is `nil` this panics (the panic seems useful while testing). resultantData := actualResult.Data.([]map[string]any) - log.Info(s.ctx, "", logging.NewKV("FullExplainGraphResult", actualResult.Data)) + log.InfoContext(s.ctx, "", "FullExplainGraphResult", actualResult.Data) // Check if the expected full explain graph (if provided) matches the actual full explain graph // that is returned, if doesn't match we would like to still see a diff comparison (handy while debugging). diff --git a/tests/integration/net/order/utils.go b/tests/integration/net/order/utils.go index 178272b155..cac2dc5d1c 100644 --- a/tests/integration/net/order/utils.go +++ b/tests/integration/net/order/utils.go @@ -16,6 +16,7 @@ import ( "strings" "testing" + "github.com/sourcenetwork/corelog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -23,14 +24,13 @@ import ( "github.com/sourcenetwork/defradb/config" coreDB "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/net" netutils "github.com/sourcenetwork/defradb/net/utils" testutils "github.com/sourcenetwork/defradb/tests/integration" ) var ( - log = logging.MustNewLogger("test.net") + log = corelog.NewLogger("test.net") ) const ( @@ -72,7 +72,7 @@ type P2PTestCase struct { func setupDefraNode(t *testing.T, cfg *config.Config, seeds []string) (*net.Node, []client.DocID, error) { ctx := context.Background() - log.Info(ctx, "Building new memory store") + log.InfoContext(ctx, "Building new memory store") db, err := testutils.NewBadgerMemoryDB(ctx, coreDB.WithUpdateEvents()) if err != nil { return nil, nil, err @@ -92,7 +92,7 @@ func setupDefraNode(t *testing.T, cfg *config.Config, seeds []string) (*net.Node // init the P2P node var n *net.Node - log.Info(ctx, "Starting P2P node", logging.NewKV("P2P addresses", cfg.Net.P2PAddresses)) + log.InfoContext(ctx, "Starting P2P node", "P2P addresses", cfg.Net.P2PAddresses) n, err = net.NewNode( ctx, db, @@ -106,12 +106,12 @@ func setupDefraNode(t *testing.T, cfg *config.Config, seeds []string) (*net.Node // parse peers and bootstrap if len(cfg.Net.Peers) != 0 { - log.Info(ctx, "Parsing bootstrap peers", logging.NewKV("Peers", cfg.Net.Peers)) + log.InfoContext(ctx, "Parsing bootstrap peers", "Peers", cfg.Net.Peers) addrs, err := netutils.ParsePeers(strings.Split(cfg.Net.Peers, ",")) if err != nil { return nil, nil, errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %v", cfg.Net.Peers), err) } - log.Info(ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs)) + log.InfoContext(ctx, "Bootstrapping with peers", "Addresses", addrs) n.Bootstrap(addrs) } @@ -200,13 +200,13 @@ func executeTestCase(t *testing.T, test P2PTestCase) { nodes := []*net.Node{} for i, cfg := range test.NodeConfig { - log.Info(ctx, fmt.Sprintf("Setting up node %d", i)) + log.InfoContext(ctx, fmt.Sprintf("Setting up node %d", i)) cfg.Datastore.Badger.Path = t.TempDir() if peers, ok := test.NodePeers[i]; ok { peerAddresses := []string{} for _, p := range peers { if p >= len(nodes) { - log.Info(ctx, "cannot set a peer that hasn't been started. Skipping to next peer") + log.InfoContext(ctx, "cannot set a peer that hasn't been started. Skipping to next peer") continue } peerInfo := nodes[p].PeerInfo() @@ -237,10 +237,10 @@ func executeTestCase(t *testing.T, test P2PTestCase) { if i == j { continue } - log.Info(ctx, fmt.Sprintf("Waiting for node %d to connect with peer %d", i, j)) + log.InfoContext(ctx, fmt.Sprintf("Waiting for node %d to connect with peer %d", i, j)) err := n.WaitForPubSubEvent(p.PeerID()) require.NoError(t, err) - log.Info(ctx, fmt.Sprintf("Node %d connected to peer %d", i, j)) + log.InfoContext(ctx, fmt.Sprintf("Node %d connected to peer %d", i, j)) } } } @@ -248,13 +248,13 @@ func executeTestCase(t *testing.T, test P2PTestCase) { // update and sync peers for n, updateMap := range test.Updates { if n >= len(nodes) { - log.Info(ctx, "cannot update a node that hasn't been started. Skipping to next node") + log.InfoContext(ctx, "cannot update a node that hasn't been started. Skipping to next node") continue } for d, updates := range updateMap { for _, update := range updates { - log.Info(ctx, fmt.Sprintf("Updating node %d with update %d", n, d)) + log.InfoContext(ctx, fmt.Sprintf("Updating node %d with update %d", n, d)) err := updateDocument(ctx, nodes[n].DB, docIDs[d], update) require.NoError(t, err) @@ -263,10 +263,10 @@ func executeTestCase(t *testing.T, test P2PTestCase) { if n2 == n { continue } - log.Info(ctx, fmt.Sprintf("Waiting for node %d to sync with peer %d", n2, n)) + log.InfoContext(ctx, fmt.Sprintf("Waiting for node %d to sync with peer %d", n2, n)) err := p.WaitForPushLogByPeerEvent(nodes[n].PeerInfo().ID) require.NoError(t, err) - log.Info(ctx, fmt.Sprintf("Node %d synced", n2)) + log.InfoContext(ctx, fmt.Sprintf("Node %d synced", n2)) } } } @@ -277,7 +277,7 @@ func executeTestCase(t *testing.T, test P2PTestCase) { continue } if n2 >= len(nodes) { - log.Info(ctx, "cannot check results of a node that hasn't been started. Skipping to next node") + log.InfoContext(ctx, "cannot check results of a node that hasn't been started. Skipping to next node") continue } @@ -319,10 +319,10 @@ func executeTestCase(t *testing.T, test P2PTestCase) { require.NoError(t, err) } for _, rep := range reps { - log.Info(ctx, fmt.Sprintf("Waiting for node %d to sync with peer %d", rep, n)) + log.InfoContext(ctx, fmt.Sprintf("Waiting for node %d to sync with peer %d", rep, n)) err := nodes[rep].WaitForPushLogByPeerEvent(nodes[n].PeerID()) require.NoError(t, err) - log.Info(ctx, fmt.Sprintf("Node %d synced", rep)) + log.InfoContext(ctx, fmt.Sprintf("Node %d synced", rep)) for docID, results := range test.ReplicatorResult[rep] { for field, result := range results { diff --git a/tests/integration/p2p.go b/tests/integration/p2p.go index 463d1bd653..5601e1793f 100644 --- a/tests/integration/p2p.go +++ b/tests/integration/p2p.go @@ -15,7 +15,6 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/tests/clients" "github.com/libp2p/go-libp2p/core/peer" @@ -149,7 +148,7 @@ func connectPeers( targetNode := s.nodes[cfg.TargetNodeID] addrs := []peer.AddrInfo{targetNode.PeerInfo()} - log.Info(s.ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs)) + log.InfoContext(s.ctx, "Bootstrapping with peers", "Addresses", addrs) sourceNode.Bootstrap(addrs) // Bootstrap triggers a bunch of async stuff for which we have no good way of waiting on. It must be diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index cfef7d870f..d5e6ae91fe 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -22,6 +22,7 @@ import ( "github.com/bxcodec/faker/support/slice" "github.com/fxamacker/cbor/v2" "github.com/libp2p/go-libp2p/core/crypto" + "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -30,7 +31,6 @@ import ( "github.com/sourcenetwork/defradb/datastore" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/net" "github.com/sourcenetwork/defradb/request/graphql" changeDetector "github.com/sourcenetwork/defradb/tests/change_detector" @@ -69,7 +69,7 @@ const ( ) var ( - log = logging.MustNewLogger("tests.integration") + log = corelog.NewLogger("tests.integration") mutationType MutationType ) @@ -174,18 +174,18 @@ func executeTestCase( dbt DatabaseType, clientType ClientType, ) { - log.Info( + log.InfoContext( ctx, testCase.Description, - logging.NewKV("database", dbt), - logging.NewKV("client", clientType), - logging.NewKV("mutationType", mutationType), - logging.NewKV("databaseDir", databaseDir), - logging.NewKV("changeDetector.Enabled", changeDetector.Enabled), - logging.NewKV("changeDetector.SetupOnly", changeDetector.SetupOnly), - logging.NewKV("changeDetector.SourceBranch", changeDetector.SourceBranch), - logging.NewKV("changeDetector.TargetBranch", changeDetector.TargetBranch), - logging.NewKV("changeDetector.Repository", changeDetector.Repository), + "database", dbt, + "client", clientType, + "mutationType", mutationType, + "databaseDir", databaseDir, + "changeDetector.Enabled", changeDetector.Enabled, + "changeDetector.SetupOnly", changeDetector.SetupOnly, + "changeDetector.SourceBranch", changeDetector.SourceBranch, + "changeDetector.TargetBranch", changeDetector.TargetBranch, + "changeDetector.Repository", changeDetector.Repository, ) startActionIndex, endActionIndex := getActionRange(t, testCase) @@ -779,7 +779,7 @@ func configureNode( require.NoError(s.t, err) var n *net.Node - log.Info(s.ctx, "Starting P2P node", logging.NewKV("P2P address", cfg.Net.P2PAddresses)) + log.InfoContext(s.ctx, "Starting P2P node", "P2P address", cfg.Net.P2PAddresses) n, err = net.NewNode( s.ctx, db, @@ -790,7 +790,7 @@ func configureNode( ) require.NoError(s.t, err) - log.Info(s.ctx, "Starting P2P node", logging.NewKV("P2P address", n.PeerInfo())) + log.InfoContext(s.ctx, "Starting P2P node", "P2P address", n.PeerInfo()) if err := n.Start(); err != nil { n.Close() require.NoError(s.t, err) @@ -1752,7 +1752,7 @@ func assertRequestResults( return true } - log.Info(s.ctx, "", logging.NewKV("RequestResults", result.Data)) + log.InfoContext(s.ctx, "", "RequestResults", result.Data) // compare results require.Equal(s.t, len(expectedResults), len(resultantData), From e4d13842f10578752cba3d3895145d0a421f4b0f Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Thu, 15 Feb 2024 18:31:03 -0800 Subject: [PATCH 2/6] remove logging package --- cli/cli.go | 4 +- cli/config.go | 58 +-- cli/config_test.go | 7 - cli/root.go | 34 +- cli/server_dump.go | 2 +- cli/start.go | 8 +- db/db.go | 3 +- go.mod | 4 +- go.sum | 4 +- logging/config.go | 264 ---------- logging/doc.go | 26 - logging/logger.go | 311 ----------- logging/logging.go | 77 --- logging/logging_test.go | 1011 ------------------------------------ logging/registry.go | 57 -- net/peer.go | 5 +- tests/bench/bench_util.go | 3 - tests/gen/cli/util_test.go | 8 +- 18 files changed, 28 insertions(+), 1858 deletions(-) delete mode 100644 logging/config.go delete mode 100644 logging/doc.go delete mode 100644 logging/logger.go delete mode 100644 logging/logging.go delete mode 100644 logging/logging_test.go delete mode 100644 logging/registry.go diff --git a/cli/cli.go b/cli/cli.go index 4cdb8c443b..b1059f1f2d 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -16,10 +16,10 @@ package cli import ( "github.com/spf13/cobra" - "github.com/sourcenetwork/defradb/logging" + "github.com/sourcenetwork/corelog" ) -var log = logging.MustNewLogger("cli") +var log = corelog.NewLogger("cli") // NewDefraCommand returns the root command instanciated with its tree of subcommands. func NewDefraCommand() *cobra.Command { diff --git a/cli/config.go b/cli/config.go index bb57a8cb3d..21a26b976c 100644 --- a/cli/config.go +++ b/cli/config.go @@ -17,8 +17,6 @@ import ( "github.com/spf13/pflag" "github.com/spf13/viper" - - "github.com/sourcenetwork/defradb/logging" ) const ( @@ -41,11 +39,12 @@ var configPaths = []string{ // configFlags is a mapping of config keys to cli flags to bind to. var configFlags = map[string]string{ - "log.level": "loglevel", - "log.output": "logoutput", - "log.format": "logformat", - "log.stacktrace": "logtrace", - "log.nocolor": "lognocolor", + "log.level": "log-level", + "log.output": "log-output", + "log.format": "log-format", + "log.stacktrace": "log-stacktrace", + "log.source": "log-source", + "log.overrides": "log-overrides", "api.address": "url", "datastore.maxtxnretries": "max-txn-retries", "datastore.store": "store", @@ -125,15 +124,6 @@ func loadConfig(rootdir string, flags *pflag.FlagSet) (*viper.Viper, error) { } } - logCfg := loggingConfig(cfg.Sub("log")) - logCfg.OverridesByLoggerName = make(map[string]logging.Config) - - // apply named logging overrides - for key := range cfg.GetStringMap("log.overrides") { - logCfg.OverridesByLoggerName[key] = loggingConfig(cfg.Sub("log.overrides." + key)) - } - logging.SetConfig(logCfg) - return cfg, nil } @@ -147,39 +137,3 @@ func bindConfigFlags(cfg *viper.Viper, flags *pflag.FlagSet) error { } return nil } - -// loggingConfig returns a new logging config from the given config. -func loggingConfig(cfg *viper.Viper) logging.Config { - var level int8 - switch value := cfg.GetString("level"); value { - case configLogLevelDebug: - level = logging.Debug - case configLogLevelInfo: - level = logging.Info - case configLogLevelError: - level = logging.Error - case configLogLevelFatal: - level = logging.Fatal - default: - level = logging.Info - } - - var format logging.EncoderFormat - switch value := cfg.GetString("format"); value { - case configLogFormatJSON: - format = logging.JSON - case configLogFormatCSV: - format = logging.CSV - default: - format = logging.CSV - } - - return logging.Config{ - Level: logging.NewLogLevelOption(level), - EnableStackTrace: logging.NewEnableStackTraceOption(cfg.GetBool("stacktrace")), - DisableColor: logging.NewDisableColorOption(cfg.GetBool("nocolor")), - EncoderFormat: logging.NewEncoderFormatOption(format), - OutputPaths: []string{cfg.GetString("output")}, - EnableCaller: logging.NewEnableCallerOption(cfg.GetBool("caller")), - } -} diff --git a/cli/config_test.go b/cli/config_test.go index 210743477c..fbb546ad39 100644 --- a/cli/config_test.go +++ b/cli/config_test.go @@ -51,11 +51,4 @@ func TestLoadConfigNotExist(t *testing.T) { assert.Equal(t, true, cfg.GetBool("net.pubsubenabled")) assert.Equal(t, false, cfg.GetBool("net.relay")) assert.Equal(t, []string{}, cfg.GetStringSlice("net.peers")) - - assert.Equal(t, "info", cfg.GetString("log.level")) - assert.Equal(t, false, cfg.GetBool("log.stacktrace")) - assert.Equal(t, "csv", cfg.GetString("log.format")) - assert.Equal(t, "stderr", cfg.GetString("log.output")) - assert.Equal(t, false, cfg.GetBool("log.nocolor")) - assert.Equal(t, false, cfg.GetBool("log.caller")) } diff --git a/cli/root.go b/cli/root.go index e4ba349f76..f91963ff06 100644 --- a/cli/root.go +++ b/cli/root.go @@ -11,6 +11,8 @@ package cli import ( + "github.com/sourcenetwork/corelog" + "github.com/spf13/cobra" ) @@ -37,36 +39,6 @@ Start a DefraDB node, interact with a local or remote node, and much more. "Directory for persistent data (default: $HOME/.defradb)", ) - cmd.PersistentFlags().String( - "loglevel", - "info", - "Log level to use. Options are debug, info, error, fatal", - ) - - cmd.PersistentFlags().String( - "logoutput", - "stderr", - "Log output path", - ) - - cmd.PersistentFlags().String( - "logformat", - "csv", - "Log format to use. Options are csv, json", - ) - - cmd.PersistentFlags().Bool( - "logtrace", - false, - "Include stacktrace in error and fatal logs", - ) - - cmd.PersistentFlags().Bool( - "lognocolor", - false, - "Disable colored log output", - ) - cmd.PersistentFlags().String( "url", "127.0.0.1:9181", @@ -127,5 +99,7 @@ Start a DefraDB node, interact with a local or remote node, and much more. "Path to the private key for tls", ) + cmd.PersistentFlags().AddFlagSet(corelog.FlagSet) + return cmd } diff --git a/cli/server_dump.go b/cli/server_dump.go index 6d2b2cefc6..f82ab44d97 100644 --- a/cli/server_dump.go +++ b/cli/server_dump.go @@ -24,7 +24,7 @@ func MakeServerDumpCmd() *cobra.Command { Short: "Dumps the state of the entire database", RunE: func(cmd *cobra.Command, _ []string) error { cfg := mustGetContextConfig(cmd) - log.FeedbackInfo(cmd.Context(), "Dumping DB state...") + log.InfoContext(cmd.Context(), "Dumping DB state...") if cfg.GetString("datastore.store") != configStoreBadger { return errors.New("server-side dump is only supported for the Badger datastore") diff --git a/cli/start.go b/cli/start.go index d4e789cbc6..90ca08d77a 100644 --- a/cli/start.go +++ b/cli/start.go @@ -108,11 +108,11 @@ func MakeStartCommand() *cobra.Command { defer func() { if err := n.Close(cmd.Context()); err != nil { - log.FeedbackErrorE(cmd.Context(), "Stopping DefraDB", err) + log.ErrorContextE(cmd.Context(), "Stopping DefraDB", err) } }() - log.FeedbackInfo(cmd.Context(), "Starting DefraDB") + log.InfoContext(cmd.Context(), "Starting DefraDB") if err := n.Start(cmd.Context()); err != nil { return err } @@ -122,9 +122,9 @@ func MakeStartCommand() *cobra.Command { select { case <-cmd.Context().Done(): - log.FeedbackInfo(cmd.Context(), "Received context cancellation; shutting down...") + log.InfoContext(cmd.Context(), "Received context cancellation; shutting down...") case <-signalCh: - log.FeedbackInfo(cmd.Context(), "Received interrupt; shutting down...") + log.InfoContext(cmd.Context(), "Received interrupt; shutting down...") } return nil diff --git a/db/db.go b/db/db.go index 3e1daf98a1..0bbf017175 100644 --- a/db/db.go +++ b/db/db.go @@ -31,7 +31,6 @@ import ( "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" "github.com/sourcenetwork/defradb/lens" - "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/request/graphql" ) @@ -287,7 +286,7 @@ func printStore(ctx context.Context, store datastore.DSReaderWriter) error { } for r := range results.Next() { - log.InfoContext(ctx, "", logging.NewKV(r.Key, r.Value)) + log.InfoContext(ctx, "", r.Key, r.Value) } return results.Close() diff --git a/go.mod b/go.mod index 5e26aca82a..05818e1075 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,6 @@ require ( github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-ipld-format v0.6.0 - github.com/ipfs/go-log v1.0.5 github.com/ipfs/go-log/v2 v2.5.1 github.com/jbenet/goprocess v0.1.4 github.com/lens-vm/lens/host-go v0.0.0-20231127204031-8d858ed2926c @@ -31,7 +30,7 @@ require ( github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multihash v0.2.3 github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 - github.com/sourcenetwork/corelog v0.0.1 + github.com/sourcenetwork/corelog v0.0.2 github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.13 github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd github.com/sourcenetwork/immutable v0.3.0 @@ -100,6 +99,7 @@ require ( github.com/ipfs/go-ipfs-pq v0.0.3 // indirect github.com/ipfs/go-ipfs-util v0.0.3 // indirect github.com/ipfs/go-ipld-legacy v0.2.1 // indirect + github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-metrics-interface v0.0.1 // indirect github.com/ipfs/go-peertaskqueue v0.8.1 // indirect github.com/ipfs/kubo v0.25.0 // indirect diff --git a/go.sum b/go.sum index 08775f972e..917bcfc3a4 100644 --- a/go.sum +++ b/go.sum @@ -510,8 +510,8 @@ github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIK github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 h1:TpQDDPfucDgCNH0NVqVUk6SSq6T6G8p9HIocmwZh9Tg= github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276/go.mod h1:lxiZTDBw0vheFMqSwX2OvB6RTDI1+/UtVCSU4rpThFM= -github.com/sourcenetwork/corelog v0.0.1 h1:hZPZxku0YCMOn3IINmGQWPUkyNQgIRDLbIPDiPY8A7I= -github.com/sourcenetwork/corelog v0.0.1/go.mod h1:mXsBA4ujUt0lAkDFoHoXuaIQjhdeXi+RfDNV7ZTiy5E= +github.com/sourcenetwork/corelog v0.0.2 h1:A6dYiyE/iT3QGk815C9ISn+LpBJI2olsUaJ4biM1Wk4= +github.com/sourcenetwork/corelog v0.0.2/go.mod h1:xq7Oa2K+ysY0lOpDGDau+LQsQlyD90Jb7LZ/9n2eRuA= github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.13 h1:d/PeGZutd5NcDr6ltAv8ubN5PxsHMp1YUnhHY/QCWB4= github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.13/go.mod h1:jUoQv592uUX1u7QBjAY4C+l24X9ArhPfifOqXpDHz4U= github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd h1:lmpW39/8wPJ0khWRhOcj7Bj0HYKbSmQ8rXMJw1cMB8U= diff --git a/logging/config.go b/logging/config.go deleted file mode 100644 index 63cde2ceb5..0000000000 --- a/logging/config.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package logging - -import ( - "context" - "io" - "os" -) - -type ( - // EncoderFormat is the format of the log output (JSON, CSV, ...). - EncoderFormat = int8 - EncoderFormatOption struct { - EncoderFormat EncoderFormat - HasValue bool - } -) - -// NewEncoderFormatOption creates a new EncoderFormatOption with the given value. -func NewEncoderFormatOption(v EncoderFormat) EncoderFormatOption { - return EncoderFormatOption{ - EncoderFormat: v, - HasValue: true, - } -} - -const ( - stderr = "stderr" - stdout = "stdout" - - JSON EncoderFormat = iota - CSV -) - -type ( - LogLevel = int8 - LogLevelOption struct { - LogLevel LogLevel - HasValue bool - } -) - -func NewLogLevelOption(v LogLevel) LogLevelOption { - return LogLevelOption{ - LogLevel: v, - HasValue: true, - } -} - -// Log levels. -const ( - Debug LogLevel = -1 - Info LogLevel = 0 - Warn LogLevel = 1 - Error LogLevel = 2 - Fatal LogLevel = 5 -) - -type EnableStackTraceOption struct { - EnableStackTrace bool - HasValue bool -} - -type EnableCallerOption struct { - EnableCaller bool - HasValue bool -} - -type DisableColorOption struct { - DisableColor bool - HasValue bool -} - -func NewEnableStackTraceOption(enable bool) EnableStackTraceOption { - return EnableStackTraceOption{ - EnableStackTrace: enable, - HasValue: true, - } -} - -func NewEnableCallerOption(enable bool) EnableCallerOption { - return EnableCallerOption{ - EnableCaller: enable, - HasValue: true, - } -} - -func NewDisableColorOption(disable bool) DisableColorOption { - return DisableColorOption{ - DisableColor: disable, - HasValue: true, - } -} - -type Config struct { - Level LogLevelOption - EncoderFormat EncoderFormatOption - EnableStackTrace EnableStackTraceOption - EnableCaller EnableCallerOption - DisableColor DisableColorOption - OutputPaths []string - OverridesByLoggerName map[string]Config - - Pipe io.Writer // this is used for testing purposes only -} - -func (c Config) forLogger(name string) Config { - loggerConfig := Config{ - Level: c.Level, - EnableStackTrace: c.EnableStackTrace, - DisableColor: c.DisableColor, - EnableCaller: c.EnableCaller, - EncoderFormat: c.EncoderFormat, - OutputPaths: c.OutputPaths, - Pipe: c.Pipe, - } - - if override, hasOverride := c.OverridesByLoggerName[name]; hasOverride { - if override.Level.HasValue { - loggerConfig.Level = override.Level - } - if override.EnableStackTrace.HasValue { - loggerConfig.EnableStackTrace = override.EnableStackTrace - } - if override.EnableCaller.HasValue { - loggerConfig.EnableCaller = override.EnableCaller - } - if override.DisableColor.HasValue { - loggerConfig.DisableColor = override.DisableColor - } - if override.EncoderFormat.HasValue { - loggerConfig.EncoderFormat = override.EncoderFormat - } - if len(override.OutputPaths) != 0 { - loggerConfig.OutputPaths = override.OutputPaths - } - if override.Pipe != nil { - loggerConfig.Pipe = override.Pipe - } - } - - return loggerConfig -} - -func (c Config) copy() Config { - overridesByLoggerName := make(map[string]Config, len(c.OverridesByLoggerName)) - for k, o := range c.OverridesByLoggerName { - overridesByLoggerName[k] = Config{ - Level: o.Level, - EnableStackTrace: o.EnableStackTrace, - EncoderFormat: o.EncoderFormat, - EnableCaller: o.EnableCaller, - DisableColor: o.DisableColor, - OutputPaths: o.OutputPaths, - Pipe: o.Pipe, - } - } - - return Config{ - Level: c.Level, - EnableStackTrace: c.EnableStackTrace, - EncoderFormat: c.EncoderFormat, - OutputPaths: c.OutputPaths, - EnableCaller: c.EnableCaller, - DisableColor: c.DisableColor, - OverridesByLoggerName: overridesByLoggerName, - Pipe: c.Pipe, - } -} - -// Create a new Config given new config options. Each updated Config field is handled. -func (oldConfig Config) with(newConfigOptions Config) Config { - newConfig := oldConfig.copy() - - if newConfigOptions.Level.HasValue { - newConfig.Level = newConfigOptions.Level - } - - if newConfigOptions.EnableStackTrace.HasValue { - newConfig.EnableStackTrace = newConfigOptions.EnableStackTrace - } - - if newConfigOptions.EnableCaller.HasValue { - newConfig.EnableCaller = newConfigOptions.EnableCaller - } - - if newConfigOptions.DisableColor.HasValue { - newConfig.DisableColor = newConfigOptions.DisableColor - } - - if newConfigOptions.EncoderFormat.HasValue { - newConfig.EncoderFormat = newConfigOptions.EncoderFormat - } - - if len(newConfigOptions.OutputPaths) != 0 { - newConfig.OutputPaths = validatePaths(newConfigOptions.OutputPaths) - } - - if newConfigOptions.Pipe != nil { - newConfig.Pipe = newConfigOptions.Pipe - } - - for k, o := range newConfigOptions.OverridesByLoggerName { - // We fully overwrite overrides to allow for ease of - // reset/removal (can provide empty to return to default) - newConfig.OverridesByLoggerName[k] = Config{ - Level: o.Level, - EnableStackTrace: o.EnableStackTrace, - EnableCaller: o.EnableCaller, - DisableColor: o.DisableColor, - EncoderFormat: o.EncoderFormat, - OutputPaths: validatePaths(o.OutputPaths), - Pipe: o.Pipe, - } - } - - return newConfig -} - -// validatePath ensure that all output paths are valid to avoid zap sync errors -// and also to ensure that the logs are not lost. -func validatePaths(paths []string) []string { - validatedPaths := make([]string, 0, len(paths)) - for _, p := range paths { - if p == stderr || p == stdout { - validatedPaths = append(validatedPaths, p) - continue - } - - if f, err := os.OpenFile(p, os.O_CREATE|os.O_APPEND, 0644); err != nil { - log.Info(context.Background(), "cannot use provided path", NewKV("err", err)) - } else { - err := f.Close() - if err != nil { - log.Info(context.Background(), "problem closing file", NewKV("err", err)) - } - - validatedPaths = append(validatedPaths, p) - } - } - - return validatedPaths -} - -func willOutputToStderrOrStdout(paths []string) bool { - if len(paths) == 0 { - return true - } - for _, p := range paths { - if p == stderr || p == stdout { - return true - } - } - return false -} diff --git a/logging/doc.go b/logging/doc.go deleted file mode 100644 index 2f6a0b8827..0000000000 --- a/logging/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -/* -Package logging abstracts away any underlying logging library providing -a single contact-point for the dependency allowing relatively easy -swapping out should we want to. - -This package allows configuration to be loaded and globally applied -after logger instances have been created, utilising an internal thread-safe -registry of named logger instances to apply the config to. - -Configuration may be applied globally, or to logger instances of a specific -name, with the named-configuration being used over the global settings if -both are provided. - -All configuration options are optional. -*/ -package logging diff --git a/logging/logger.go b/logging/logger.go deleted file mode 100644 index f93e305fce..0000000000 --- a/logging/logger.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package logging - -import ( - "context" - "fmt" - stdlog "log" - "os" - "sync" - - golog "github.com/ipfs/go-log" - gologV2 "github.com/ipfs/go-log/v2" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -type logger struct { - name string - logger *zap.Logger - consoleLogger *stdlog.Logger - syncLock sync.RWMutex -} - -var _ Logger = (*logger)(nil) - -func mustNewLogger(name string) *logger { - l, err := buildZapLogger(name, Config{}) - if err != nil { - panic(err) - } - - return &logger{ - name: name, - logger: l, - } -} - -func (l *logger) Debug(ctx context.Context, message string, keyvals ...KV) { - l.syncLock.RLock() - defer l.syncLock.RUnlock() - - l.logger.Debug(message, toZapFields(keyvals)...) -} - -func (l *logger) Info(ctx context.Context, message string, keyvals ...KV) { - l.syncLock.RLock() - defer l.syncLock.RUnlock() - - l.logger.Info(message, toZapFields(keyvals)...) -} - -func (l *logger) Error(ctx context.Context, message string, keyvals ...KV) { - l.syncLock.RLock() - defer l.syncLock.RUnlock() - - l.logger.Error(message, toZapFields(keyvals)...) -} - -func (l *logger) ErrorE(ctx context.Context, message string, err error, keyvals ...KV) { - kvs := keyvals - kvs = append(kvs, NewKV("Error", err.Error())) - kvs = withStackTrace(err, kvs) - - l.syncLock.RLock() - defer l.syncLock.RUnlock() - - l.logger.Error(message, toZapFields(kvs)...) -} - -func (l *logger) Fatal(ctx context.Context, message string, keyvals ...KV) { - l.syncLock.RLock() - defer l.syncLock.RUnlock() - - l.logger.Fatal(message, toZapFields(keyvals)...) -} - -func (l *logger) FatalE(ctx context.Context, message string, err error, keyvals ...KV) { - kvs := keyvals - kvs = append(kvs, NewKV("Error", err.Error())) - kvs = withStackTrace(err, kvs) - - l.syncLock.RLock() - defer l.syncLock.RUnlock() - - l.logger.Fatal(message, toZapFields(kvs)...) -} - -func (l *logger) FeedbackInfo(ctx context.Context, message string, keyvals ...KV) { - l.Info(ctx, message, keyvals...) - l.syncLock.RLock() - defer l.syncLock.RUnlock() - if l.consoleLogger != nil { - l.consoleLogger.Println(message) - } -} - -func (l *logger) FeedbackError(ctx context.Context, message string, keyvals ...KV) { - l.Error(ctx, message, keyvals...) - l.syncLock.RLock() - defer l.syncLock.RUnlock() - if l.consoleLogger != nil { - l.consoleLogger.Println(message) - } -} - -func (l *logger) FeedbackErrorE(ctx context.Context, message string, err error, keyvals ...KV) { - l.ErrorE(ctx, message, err, keyvals...) - l.syncLock.RLock() - defer l.syncLock.RUnlock() - if l.consoleLogger != nil { - l.consoleLogger.Println(message) - if stack, hasStack := getStackTrace(err); hasStack { - l.consoleLogger.Println(stack) - } - } -} - -func (l *logger) FeedbackFatal(ctx context.Context, message string, keyvals ...KV) { - l.Fatal(ctx, message, keyvals...) - l.syncLock.RLock() - defer l.syncLock.RUnlock() - if l.consoleLogger != nil { - l.consoleLogger.Println(message) - } -} - -func (l *logger) FeedbackFatalE(ctx context.Context, message string, err error, keyvals ...KV) { - l.FatalE(ctx, message, err, keyvals...) - l.syncLock.RLock() - defer l.syncLock.RUnlock() - if l.consoleLogger != nil { - l.consoleLogger.Println(message) - if stack, hasStack := getStackTrace(err); hasStack { - l.consoleLogger.Println(stack) - } - } -} - -func (l *logger) Flush() error { - return l.logger.Sync() -} - -func toZapFields(keyvals []KV) []zap.Field { - result := make([]zap.Field, len(keyvals)) - for i, kv := range keyvals { - result[i] = zap.Any(kv.key, kv.value) - } - return result -} - -func (l *logger) ApplyConfig(config Config) { - newLogger, err := buildZapLogger(l.name, config) - if err != nil { - l.logger.Error("Error applying config to logger", zap.Error(err)) - return - } - - l.syncLock.Lock() - defer l.syncLock.Unlock() - - // We need sync the old log before swapping it out - _ = l.logger.Sync() - l.logger = newLogger - - if !willOutputToStderrOrStdout(config.OutputPaths) { - if config.Pipe != nil { // for testing purposes only - l.consoleLogger = stdlog.New(config.Pipe, "", 0) - } else { - l.consoleLogger = stdlog.New(os.Stderr, "", 0) - } - } else { - l.consoleLogger = nil - } -} - -func withStackTrace(err error, keyvals []KV) []KV { - if stack, hasStack := getStackTrace(err); hasStack { - return append(keyvals, NewKV("stacktrace", stack)) - } - - return keyvals -} - -func getStackTrace(err error) (string, bool) { - configMutex.RLock() - defer configMutex.RUnlock() - - if cachedConfig.EnableStackTrace.EnableStackTrace { - return fmt.Sprintf("%+v", err), true - } - - return "", false -} - -func buildZapLogger(name string, config Config) (*zap.Logger, error) { - const ( - encodingTypeConsole string = "console" - encodingTypeJSON string = "json" - ) - defaultConfig := zap.NewProductionConfig() - defaultConfig.Encoding = encodingTypeConsole - defaultConfig.EncoderConfig.ConsoleSeparator = ", " - defaultConfig.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder - defaultConfig.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder - defaultConfig.DisableStacktrace = true - defaultConfig.DisableCaller = true - - if config.Level.HasValue { - defaultConfig.Level = zap.NewAtomicLevelAt(zapcore.Level(config.Level.LogLevel)) - } - - if config.DisableColor.HasValue && config.DisableColor.DisableColor { - defaultConfig.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder - } - - if config.EnableCaller.HasValue { - defaultConfig.DisableCaller = !config.EnableCaller.EnableCaller - } - - if config.EncoderFormat.HasValue { - if config.EncoderFormat.EncoderFormat == JSON { - defaultConfig.Encoding = encodingTypeJSON - defaultConfig.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder - } else if config.EncoderFormat.EncoderFormat == CSV { - defaultConfig.Encoding = encodingTypeConsole - } - } - - if len(config.OutputPaths) != 0 { - defaultConfig.OutputPaths = config.OutputPaths[:] - } - - // We must skip the first caller, as this will always be our wrapper - newLogger, err := defaultConfig.Build(zap.AddCallerSkip(1)) - if err != nil { - return nil, err - } - - if willOutputToStderrOrStdout(defaultConfig.OutputPaths) && config.Pipe != nil { - newLogger = newLogger.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core { - cfg := zap.NewProductionEncoderConfig() - cfg.ConsoleSeparator = defaultConfig.EncoderConfig.ConsoleSeparator - cfg.EncodeTime = defaultConfig.EncoderConfig.EncodeTime - cfg.EncodeLevel = defaultConfig.EncoderConfig.EncodeLevel - return zapcore.NewCore( - zapcore.NewJSONEncoder(cfg), - zapcore.Lock(zapcore.AddSync(config.Pipe)), - zap.NewAtomicLevelAt(zapcore.Level(config.Level.LogLevel)), - ) - })) - } - - return newLogger.Named(name), nil -} - -/* - The following are wrappers for external packages loggers that are compatible with - our own logger (i.e. Zap based). They offer a way to access the internal logger stores - and apply our configuration. They should implement ApplyConfig. -*/ - -// goLogger is a wrapper for a go-log logger -// Used by github.com/ipfs/go-ipfs-provider -type goLogger struct { - *logger - *golog.ZapEventLogger -} - -func GetGoLogger(name string) *goLogger { - l := mustNewLogger(name) - gl := golog.Logger(name) - return &goLogger{ - logger: l, - ZapEventLogger: gl, - } -} - -func (l *goLogger) ApplyConfig(config Config) { - l.logger.ApplyConfig(config) - l.ZapEventLogger.SugaredLogger = *l.logger.logger.Sugar() -} - -// goLoggerV2 is a wrapper for a go-log V2 logger -// Used by github.com/sourcenetwork/defradb/datastore/badger/v4 -type goLoggerV2 struct { - *logger - *gologV2.ZapEventLogger -} - -func GetGoLoggerV2(name string) *goLoggerV2 { - l := mustNewLogger(name) - gl := gologV2.Logger(name) - return &goLoggerV2{ - logger: l, - ZapEventLogger: gl, - } -} - -func (l *goLoggerV2) ApplyConfig(config Config) { - l.logger.ApplyConfig(config) - l.ZapEventLogger.SugaredLogger = *l.logger.logger.Sugar() -} diff --git a/logging/logging.go b/logging/logging.go deleted file mode 100644 index 1f1883bedb..0000000000 --- a/logging/logging.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package logging - -import ( - "context" -) - -var log = MustNewLogger("logging") - -// KV is a key-value pair used to pass structured data to loggers. -type KV struct { - key string - value any -} - -// NewKV creates a new KV key-value pair. -func NewKV(key string, value any) KV { - return KV{ - key: key, - value: value, - } -} - -type Logger interface { - // Debug logs a message at debug log level. Key-value pairs can be added. - Debug(ctx context.Context, message string, keyvals ...KV) - // Info logs a message at info log level. Key-value pairs can be added. - Info(ctx context.Context, message string, keyvals ...KV) - // Error logs a message at error log level. Key-value pairs can be added. - Error(ctx context.Context, message string, keyvals ...KV) - // ErrorErr logs a message and an error at error log level. Key-value pairs can be added. - ErrorE(ctx context.Context, message string, err error, keyvals ...KV) - // Fatal logs a message at fatal log level. Key-value pairs can be added. - Fatal(ctx context.Context, message string, keyvals ...KV) - // FatalE logs a message and an error at fatal log level. Key-value pairs can be added. - FatalE(ctx context.Context, message string, err error, keyvals ...KV) - - // Feedback prefixed method ensure that messsages reach a user in case the logs are sent to a file. - - // FeedbackInfo calls Info and sends the message to stderr if logs are sent to a file. - FeedbackInfo(ctx context.Context, message string, keyvals ...KV) - // FeedbackError calls Error and sends the message to stderr if logs are sent to a file. - FeedbackError(ctx context.Context, message string, keyvals ...KV) - // FeedbackErrorE calls ErrorE and sends the message to stderr if logs are sent to a file. - FeedbackErrorE(ctx context.Context, message string, err error, keyvals ...KV) - // FeedbackFatal calls Fatal and sends the message to stderr if logs are sent to a file. - FeedbackFatal(ctx context.Context, message string, keyvals ...KV) - // FeedbackFatalE calls FatalE and sends the message to stderr if logs are sent to a file. - FeedbackFatalE(ctx context.Context, message string, err error, keyvals ...KV) - - // Flush flushes any buffered log entries. - Flush() error - // ApplyConfig updates the logger with a new config. - ApplyConfig(config Config) -} - -// MustNewLogger creates and registers a new logger with the given name, and panics if there is an error. -func MustNewLogger(name string) Logger { - logger := mustNewLogger(name) - register(name, logger) - return logger -} - -// SetConfig updates all registered loggers with the given config. -func SetConfig(newConfig Config) { - updatedConfig := setConfig(newConfig) - updateLoggers(updatedConfig) -} diff --git a/logging/logging_test.go b/logging/logging_test.go deleted file mode 100644 index 5a19cfb744..0000000000 --- a/logging/logging_test.go +++ /dev/null @@ -1,1011 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -// todo: The logger(s) appear to leak resources and do not close down promptly on windows, -// the log files have open readers when the Golang test runner attempts to delete them. -// See https://github.com/sourcenetwork/defradb/issues/2057 for more info. - -//go:build !windows - -package logging - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "io" - "os" - "os/exec" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/errors" -) - -func TestLogWritesFatalMessageToLogAndKillsProcess(t *testing.T) { - logMessage := "test log message" - - if os.Getenv("OS_EXIT") == "1" { - ctx := context.Background() - logPath := os.Getenv("LOG_PATH") - logger, logPath := getLogger(t, func(c *Config) { - c.OutputPaths = []string{logPath} - }) - - logger.Fatal(ctx, logMessage) - return - } - - dir := t.TempDir() - logPath := dir + "/log.txt" - cmd := exec.Command(os.Args[0], "-test.run=TestLogWritesFatalMessageToLogAndKillsProcess") - cmd.Env = append(os.Environ(), "OS_EXIT=1", "LOG_PATH="+logPath) - err := cmd.Run() - if e, ok := err.(*exec.ExitError); !ok || e.Success() { - t.Fatalf("Logger.Fatal failed to kill the process, error: %v", err) - } - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, "FATAL", logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - // caller is disabled by default - assert.NotContains(t, logLines[0], "logging_test.go") - // stacktrace is disabled by default - assert.NotContains(t, logLines[0], "stacktrace") -} - -func TestLogWritesFatalMessageWithStackTraceToLogAndKillsProcessGivenStackTraceEnabled(t *testing.T) { - logMessage := "test log message" - - if os.Getenv("OS_EXIT") == "1" { - ctx := context.Background() - logPath := os.Getenv("LOG_PATH") - logger, logPath := getLogger(t, func(c *Config) { - c.OutputPaths = []string{logPath} - c.EnableStackTrace = NewEnableStackTraceOption(true) - }) - - logger.Fatal(ctx, logMessage) - return - } - - dir := t.TempDir() - logPath := dir + "/log.txt" - cmd := exec.Command(os.Args[0], "-test.run=TestLogWritesFatalMessageWithStackTraceToLogAndKillsProcessGivenStackTraceEnabled") - cmd.Env = append(os.Environ(), "OS_EXIT=1", "LOG_PATH="+logPath) - err := cmd.Run() - if e, ok := err.(*exec.ExitError); !ok || e.Success() { - t.Fatalf("Logger.Fatal failed to kill the process, error: %v", err) - } - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, "FATAL", logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - // no stacktrace will be present since no error was sent to the logger. - assert.NotContains(t, logLines[0], "stacktrace") - // caller is disabled by default - assert.NotContains(t, logLines[0], "logging_test.go") -} - -func TestLogWritesFatalEMessageToLogAndKillsProcess(t *testing.T) { - logMessage := "test log message" - - if os.Getenv("OS_EXIT") == "1" { - ctx := context.Background() - logPath := os.Getenv("LOG_PATH") - logger, logPath := getLogger(t, func(c *Config) { - c.OutputPaths = []string{logPath} - }) - - logger.FatalE(ctx, logMessage, errors.New("dummy error")) - return - } - - dir := t.TempDir() - logPath := dir + "/log.txt" - cmd := exec.Command(os.Args[0], "-test.run=TestLogWritesFatalEMessageToLogAndKillsProcess") - cmd.Env = append(os.Environ(), "OS_EXIT=1", "LOG_PATH="+logPath) - err := cmd.Run() - if e, ok := err.(*exec.ExitError); !ok || e.Success() { - t.Fatalf("Logger.Fatal failed to kill the process, error: %v", err) - } - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, "FATAL", logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - // caller is disabled by default - assert.NotContains(t, logLines[0], "logging_test.go") - // stacktrace is disabled by default - assert.NotContains(t, logLines[0], "stacktrace") -} - -func TestLogWritesFatalEMessageWithStackTraceToLogAndKillsProcessGivenStackTraceEnabled(t *testing.T) { - logMessage := "test log message" - - if os.Getenv("OS_EXIT") == "1" { - ctx := context.Background() - logPath := os.Getenv("LOG_PATH") - logger, logPath := getLogger(t, func(c *Config) { - c.OutputPaths = []string{logPath} - c.EnableStackTrace = NewEnableStackTraceOption(true) - }) - - logger.FatalE(ctx, logMessage, errors.New("dummy error")) - return - } - - dir := t.TempDir() - logPath := dir + "/log.txt" - cmd := exec.Command(os.Args[0], "-test.run=TestLogWritesFatalEMessageWithStackTraceToLogAndKillsProcessGivenStackTraceEnabled") - cmd.Env = append(os.Environ(), "OS_EXIT=1", "LOG_PATH="+logPath) - err := cmd.Run() - if e, ok := err.(*exec.ExitError); !ok || e.Success() { - t.Fatalf("Logger.Fatal failed to kill the process, error: %v", err) - } - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, "FATAL", logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - assert.Contains(t, logLines[0], "stacktrace") - // caller is disabled by default - assert.NotContains(t, logLines[0], "logging_test.go") -} - -type LogLevelTestCase struct { - LogLevel LogLevel - LogFunc func(Logger, context.Context, string) - ExpectedLogLevel string - WithStackTrace bool - ExpectStackTrace bool - WithCaller bool -} - -func logDebug(l Logger, c context.Context, m string) { l.Debug(c, m) } -func logInfo(l Logger, c context.Context, m string) { l.Info(c, m) } -func logError(l Logger, c context.Context, m string) { l.Error(c, m) } -func logErrorE(l Logger, c context.Context, m string) { l.ErrorE(c, m, errors.New("test error")) } - -func getLogLevelTestCase() []LogLevelTestCase { - return []LogLevelTestCase{ - {Debug, logDebug, "DEBUG", false, false, true}, - {Debug, logDebug, "DEBUG", false, false, false}, - {Debug, logInfo, "INFO", false, false, false}, - {Debug, logError, "ERROR", false, false, false}, - {Debug, logError, "ERROR", true, false, false}, - {Debug, logErrorE, "ERROR", false, false, false}, - {Debug, logErrorE, "ERROR", true, true, false}, - {Info, logDebug, "", false, false, false}, - {Info, logInfo, "INFO", false, false, true}, - {Info, logInfo, "INFO", false, false, false}, - {Info, logError, "ERROR", false, false, false}, - {Info, logError, "ERROR", true, false, false}, - {Info, logErrorE, "ERROR", false, false, false}, - {Info, logErrorE, "ERROR", true, true, false}, - {Warn, logDebug, "", false, false, false}, - {Warn, logInfo, "", false, false, false}, - {Warn, logError, "ERROR", false, false, false}, - {Warn, logError, "ERROR", true, false, false}, - {Warn, logErrorE, "ERROR", false, false, false}, - {Warn, logErrorE, "ERROR", true, true, false}, - {Error, logDebug, "", false, false, false}, - {Error, logInfo, "", false, false, false}, - {Error, logError, "ERROR", false, false, true}, - {Error, logError, "ERROR", false, false, false}, - {Error, logError, "ERROR", true, false, false}, - {Error, logErrorE, "ERROR", false, false, false}, - {Error, logErrorE, "ERROR", true, true, false}, - {Fatal, logDebug, "", false, false, true}, - {Fatal, logDebug, "", false, false, false}, - {Fatal, logInfo, "", false, false, false}, - {Fatal, logError, "", false, false, false}, - {Fatal, logErrorE, "", false, false, false}, - } -} - -func TestLogWritesMessagesToLog(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - for _, tc := range getLogLevelTestCase() { - ctx := context.Background() - logger, logPath := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(tc.LogLevel) - c.EnableStackTrace = NewEnableStackTraceOption(tc.WithStackTrace) - c.EnableCaller = NewEnableCallerOption(tc.WithCaller) - }) - logMessage := "test log message" - - tc.LogFunc(logger, ctx, logMessage) - logger.Flush() - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - if tc.ExpectedLogLevel == "" { - assert.Len(t, logLines, 0) - } else { - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, tc.ExpectedLogLevel, logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - _, hasStackTrace := logLines[0]["stacktrace"] - assert.Equal(t, tc.ExpectStackTrace, hasStackTrace) - _, hasCaller := logLines[0]["caller"] - assert.Equal(t, tc.WithCaller, hasCaller) - } - - clearRegistry("TestLogName") - } -} - -func TestLogWritesMessagesToLogGivenUpdatedLogLevel(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - for _, tc := range getLogLevelTestCase() { - ctx := context.Background() - logger, logPath := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(Fatal) - }) - SetConfig(Config{ - Level: NewLogLevelOption(tc.LogLevel), - EnableStackTrace: NewEnableStackTraceOption(tc.WithStackTrace), - EnableCaller: NewEnableCallerOption(tc.WithCaller), - }) - logMessage := "test log message" - - tc.LogFunc(logger, ctx, logMessage) - logger.Flush() - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - if tc.ExpectedLogLevel == "" { - assert.Len(t, logLines, 0) - } else { - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, tc.ExpectedLogLevel, logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - _, hasStackTrace := logLines[0]["stacktrace"] - assert.Equal(t, tc.ExpectStackTrace, hasStackTrace) - _, hasCaller := logLines[0]["caller"] - assert.Equal(t, tc.WithCaller, hasCaller) - } - - clearRegistry("TestLogName") - } -} - -func TestLogWritesMessagesToLogGivenUpdatedContextLogLevel(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - for _, tc := range getLogLevelTestCase() { - ctx := context.Background() - logger, logPath := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(Fatal) - }) - SetConfig(Config{ - Level: NewLogLevelOption(Error), - }) - SetConfig(Config{ - Level: NewLogLevelOption(tc.LogLevel), - EnableStackTrace: NewEnableStackTraceOption(tc.WithStackTrace), - EnableCaller: NewEnableCallerOption(tc.WithCaller), - }) - logMessage := "test log message" - - tc.LogFunc(logger, ctx, logMessage) - logger.Flush() - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - if tc.ExpectedLogLevel == "" { - assert.Len(t, logLines, 0) - } else { - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, tc.ExpectedLogLevel, logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - _, hasStackTrace := logLines[0]["stacktrace"] - assert.Equal(t, tc.ExpectStackTrace, hasStackTrace) - _, hasCaller := logLines[0]["caller"] - assert.Equal(t, tc.WithCaller, hasCaller) - } - - clearRegistry("TestLogName") - } -} - -func TestLogDoesntWriteMessagesToLogGivenNoLogPath(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - for _, tc := range getLogLevelTestCase() { - ctx := context.Background() - b := &bytes.Buffer{} - logger, _ := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(tc.LogLevel) - c.OutputPaths = []string{} - c.Pipe = b - }) - - logMessage := "test log message" - - tc.LogFunc(logger, ctx, logMessage) - logger.Flush() - - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if tc.ExpectedLogLevel == "" { - assert.Len(t, logLines, 0) - } else { - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, tc.ExpectedLogLevel, logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - } - - clearRegistry("TestLogName") - } -} - -func TestLogDoesntWriteMessagesToLogGivenNotFoundLogPath(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - for _, tc := range getLogLevelTestCase() { - ctx := context.Background() - b := &bytes.Buffer{} - logger, _ := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(tc.LogLevel) - c.OutputPaths = []string{"/path/not/found"} - c.Pipe = b - }) - - logMessage := "test log message" - - tc.LogFunc(logger, ctx, logMessage) - logger.Flush() - - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if tc.ExpectedLogLevel == "" { - assert.Len(t, logLines, 0) - } else { - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, tc.ExpectedLogLevel, logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - } - - clearRegistry("TestLogName") - } -} - -func TestLogDoesntWriteMessagesToLogGivenStderrLogPath(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - for _, tc := range getLogLevelTestCase() { - ctx := context.Background() - b := &bytes.Buffer{} - logger, _ := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(tc.LogLevel) - c.OutputPaths = []string{stderr} - c.Pipe = b - }) - - logMessage := "test log message" - - tc.LogFunc(logger, ctx, logMessage) - logger.Flush() - - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if tc.ExpectedLogLevel == "" { - assert.Len(t, logLines, 0) - } else { - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, tc.ExpectedLogLevel, logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - } - - clearRegistry("TestLogName") - } -} - -func TestLogWritesMessagesToLogGivenUpdatedLogPath(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - for _, tc := range getLogLevelTestCase() { - ctx := context.Background() - logger, _ := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(tc.LogLevel) - c.OutputPaths = []string{} - }) - - dir := t.TempDir() - logPath := dir + "/log.txt" - SetConfig(Config{ - OutputPaths: []string{logPath}, - }) - logMessage := "test log message" - - tc.LogFunc(logger, ctx, logMessage) - logger.Flush() - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - if tc.ExpectedLogLevel == "" { - assert.Len(t, logLines, 0) - } else { - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, tc.ExpectedLogLevel, logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - } - - clearRegistry("TestLogName") - } -} - -func logFeedbackInfo(l Logger, c context.Context, m string) { l.FeedbackInfo(c, m) } -func logFeedbackError(l Logger, c context.Context, m string) { l.FeedbackError(c, m) } -func logFeedbackErrorE(l Logger, c context.Context, m string) { - l.FeedbackErrorE(c, m, errors.New("test error")) -} - -func getFeedbackLogLevelTestCase() []LogLevelTestCase { - return []LogLevelTestCase{ - {Debug, logFeedbackInfo, "INFO", false, false, false}, - {Debug, logFeedbackError, "ERROR", false, false, false}, - {Debug, logFeedbackError, "ERROR", true, false, false}, - {Debug, logFeedbackErrorE, "ERROR", false, false, false}, - {Debug, logFeedbackErrorE, "ERROR", true, true, false}, - {Info, logFeedbackInfo, "INFO", false, false, true}, - {Info, logFeedbackInfo, "INFO", false, false, false}, - {Info, logFeedbackError, "ERROR", false, false, false}, - {Info, logFeedbackError, "ERROR", true, false, false}, - {Info, logFeedbackErrorE, "ERROR", false, false, false}, - {Info, logFeedbackErrorE, "ERROR", true, true, false}, - {Warn, logFeedbackInfo, "", false, false, false}, - {Warn, logFeedbackError, "ERROR", false, false, false}, - {Warn, logFeedbackError, "ERROR", true, false, false}, - {Warn, logFeedbackErrorE, "ERROR", false, false, false}, - {Warn, logFeedbackErrorE, "ERROR", true, true, false}, - {Error, logFeedbackInfo, "", false, false, false}, - {Error, logFeedbackError, "ERROR", false, false, true}, - {Error, logFeedbackError, "ERROR", false, false, false}, - {Error, logFeedbackError, "ERROR", true, false, false}, - {Error, logFeedbackErrorE, "ERROR", false, false, false}, - {Error, logFeedbackErrorE, "ERROR", true, true, false}, - {Fatal, logFeedbackInfo, "", false, false, false}, - {Fatal, logFeedbackError, "", false, false, false}, - {Fatal, logFeedbackErrorE, "", false, false, false}, - } -} - -func TestLogWritesMessagesToFeedbackLog(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - for i, tc := range getFeedbackLogLevelTestCase() { - ctx := context.Background() - b := &bytes.Buffer{} - logger, logPath := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(tc.LogLevel) - c.EnableStackTrace = NewEnableStackTraceOption(tc.WithStackTrace) - c.EnableCaller = NewEnableCallerOption(tc.WithCaller) - c.Pipe = b - }) - logMessage := "test log message" - - tc.LogFunc(logger, ctx, logMessage) - logger.Flush() - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - if tc.ExpectedLogLevel == "" { - assert.Len(t, logLines, 0) - } else { - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines for tc %d", len(logLines), i) - } - - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, tc.ExpectedLogLevel, logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - _, hasStackTrace := logLines[0]["stacktrace"] - assert.Equal(t, tc.ExpectStackTrace, hasStackTrace) - _, hasCaller := logLines[0]["caller"] - assert.Equal(t, tc.WithCaller, hasCaller) - } - - if tc.ExpectStackTrace { - assert.Contains(t, b.String(), logMessage+"\ntest error. Stack:") - } else { - assert.Equal(t, logMessage+"\n", b.String()) - } - - clearRegistry("TestLogName") - } -} - -func TestLogWritesMessagesToLogGivenPipeWithValidPath(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - ctx := context.Background() - b := &bytes.Buffer{} - logger, logPath := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(Info) - c.Pipe = b - }) - logMessage := "test log message" - - logger.Info(ctx, logMessage) - logger.Flush() - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, "INFO", logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - // caller is disabled by default - assert.NotContains(t, logLines[0], "logging_test.go") -} - -func TestLogDoesNotWriteMessagesToLogGivenOverrideForAnotherLoggerReducingLogLevel(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - ctx := context.Background() - logger, logPath := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(Fatal) - c.OverridesByLoggerName = map[string]Config{ - "not this logger": {Level: NewLogLevelOption(Info)}, - } - }) - logMessage := "test log message" - - logger.Info(ctx, logMessage) - logger.Flush() - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - assert.Len(t, logLines, 0) -} - -func TestLogWritesMessagesToLogGivenOverrideForLoggerReducingLogLevel(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - ctx := context.Background() - logger, logPath := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(Fatal) - c.OverridesByLoggerName = map[string]Config{ - "TestLogName": {Level: NewLogLevelOption(Info)}, - } - }) - logMessage := "test log message" - - logger.Info(ctx, logMessage) - logger.Flush() - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, "INFO", logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - // caller is disabled by default - assert.NotContains(t, logLines[0], "logging_test.go") -} - -func TestLogWritesMessagesToLogGivenOverrideForLoggerRaisingLogLevel(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - ctx := context.Background() - logger, logPath := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(Info) - c.OverridesByLoggerName = map[string]Config{ - "not this logger": {Level: NewLogLevelOption(Fatal)}, - } - }) - logMessage := "test log message" - - logger.Info(ctx, logMessage) - logger.Flush() - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, "INFO", logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - // caller is disabled by default - assert.NotContains(t, logLines[0], "logging_test.go") -} - -func TestLogDoesNotWriteMessagesToLogGivenOverrideForLoggerRaisingLogLevel(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - ctx := context.Background() - logger, logPath := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(Info) - c.OverridesByLoggerName = map[string]Config{ - "TestLogName": {Level: NewLogLevelOption(Fatal)}, - } - }) - logMessage := "test log message" - - logger.Info(ctx, logMessage) - logger.Flush() - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - assert.Len(t, logLines, 0) -} - -func TestLogDoesNotWriteMessagesToLogGivenOverrideUpdatedForAnotherLoggerReducingLogLevel(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - ctx := context.Background() - logger, logPath := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(Fatal) - }) - SetConfig(Config{ - OverridesByLoggerName: map[string]Config{ - "not this logger": {Level: NewLogLevelOption(Info)}, - }, - }) - logMessage := "test log message" - - logger.Info(ctx, logMessage) - logger.Flush() - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - assert.Len(t, logLines, 0) -} - -func TestLogWritesMessagesToLogGivenOverrideUpdatedForLoggerReducingLogLevel(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - ctx := context.Background() - logger, logPath := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(Fatal) - }) - SetConfig(Config{ - OverridesByLoggerName: map[string]Config{ - "TestLogName": {Level: NewLogLevelOption(Info)}, - }, - }) - logMessage := "test log message" - - logger.Info(ctx, logMessage) - logger.Flush() - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, "INFO", logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - // caller is disabled by default - assert.NotContains(t, logLines[0], "logging_test.go") -} - -func TestLogWritesMessagesToLogGivenOverrideUpdatedForAnotherLoggerRaisingLogLevel(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - ctx := context.Background() - logger, logPath := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(Info) - }) - SetConfig(Config{ - OverridesByLoggerName: map[string]Config{ - "not this logger": {Level: NewLogLevelOption(Fatal)}, - }, - }) - logMessage := "test log message" - - logger.Info(ctx, logMessage) - logger.Flush() - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - - assert.Equal(t, logMessage, logLines[0]["msg"]) - assert.Equal(t, "INFO", logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) - // caller is disabled by default - assert.NotContains(t, logLines[0], "logging_test.go") -} - -func TestLogDoesNotWriteMessagesToLogGivenOverrideUpdatedForLoggerRaisingLogLevel(t *testing.T) { - defer clearConfig() - defer clearRegistry("TestLogName") - ctx := context.Background() - logger, logPath := getLogger(t, func(c *Config) { - c.Level = NewLogLevelOption(Info) - }) - SetConfig(Config{ - OverridesByLoggerName: map[string]Config{ - "TestLogName": {Level: NewLogLevelOption(Fatal)}, - }, - }) - logMessage := "test log message" - - logger.Info(ctx, logMessage) - logger.Flush() - - logLines, err := getLogLines(t, logPath) - if err != nil { - t.Fatal(err) - } - - assert.Len(t, logLines, 0) -} - -func TestGetGoLogger(t *testing.T) { - l := GetGoLogger("TestLogName") - assert.NotNil(t, l.ZapEventLogger) - assert.NotNil(t, l.logger) -} - -func TestGetGoLoggerAndApplyConfig(t *testing.T) { - l := GetGoLogger("TestLogName") - assert.NotNil(t, l.ZapEventLogger) - assert.NotNil(t, l.logger) - - b := &bytes.Buffer{} - l.ApplyConfig(Config{ - EncoderFormat: NewEncoderFormatOption(JSON), - Pipe: b, - }) - - l.ZapEventLogger.Info("some info") - - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - assert.Equal(t, "some info", logLines[0]["msg"]) - assert.Equal(t, "INFO", logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) -} - -func TestGetGoLoggerV2(t *testing.T) { - l := GetGoLoggerV2("TestLogName") - assert.NotNil(t, l.ZapEventLogger) - assert.NotNil(t, l.logger) -} - -func TestGetGoLoggerV2AndApplyConfig(t *testing.T) { - l := GetGoLoggerV2("TestLogName") - assert.NotNil(t, l.ZapEventLogger) - assert.NotNil(t, l.logger) - - b := &bytes.Buffer{} - l.ApplyConfig(Config{ - EncoderFormat: NewEncoderFormatOption(JSON), - Pipe: b, - }) - - l.ZapEventLogger.Info("some info") - - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - assert.Equal(t, "some info", logLines[0]["msg"]) - assert.Equal(t, "INFO", logLines[0]["level"]) - assert.Equal(t, "TestLogName", logLines[0]["logger"]) -} - -type Option = func(*Config) - -func getLogger(t *testing.T, options ...Option) (Logger, string) { - dir := t.TempDir() - logPath := dir + "/log.txt" - name := "TestLogName" - logConfig := Config{ - EncoderFormat: NewEncoderFormatOption(JSON), - OutputPaths: []string{logPath}, - } - - for _, o := range options { - o(&logConfig) - } - - logger := MustNewLogger(name) - SetConfig(logConfig) - return logger, getFirstOutputPath(logConfig.OutputPaths) -} - -func getFirstOutputPath(outputPaths []string) string { - if len(outputPaths) == 0 { - return stderr - } - return outputPaths[0] -} - -var errloggingToConsole = errors.New("no file to open. Logging to console") - -func getLogLines(t *testing.T, logPath string) ([]map[string]any, error) { - if logPath == stderr { - return nil, errloggingToConsole - } - - file, err := os.Open(logPath) - if err != nil { - return nil, err - } - defer func() { - err := file.Close() - if err != nil { - t.Error(err) - } - }() - - return parseLines(file) -} - -func parseLines(r io.Reader) ([]map[string]any, error) { - fileScanner := bufio.NewScanner(r) - - fileScanner.Split(bufio.ScanLines) - - logLines := []map[string]any{} - for fileScanner.Scan() { - loggedLine := make(map[string]any) - err := json.Unmarshal(fileScanner.Bytes(), &loggedLine) - if err != nil { - return nil, err - } - logLines = append(logLines, loggedLine) - } - - return logLines, nil -} - -func clearRegistry(name string) { - for _, logger := range registry[name] { - logger.Flush() - } - registry[name] = []Logger{} -} - -func clearConfig() { - configMutex.Lock() - defer configMutex.Unlock() - - cachedConfig = Config{} -} diff --git a/logging/registry.go b/logging/registry.go deleted file mode 100644 index 9410498a72..0000000000 --- a/logging/registry.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package logging - -import ( - "sync" -) - -var configMutex sync.RWMutex -var cachedConfig Config - -var registryMutex sync.Mutex -var registry = map[string][]Logger{ - "reprovider.simple": {GetGoLogger("reprovider.simple")}, - "badger": {GetGoLoggerV2("badger")}, -} - -func register(name string, logger Logger) { - registryMutex.Lock() - defer registryMutex.Unlock() - - loggers, exists := registry[name] - if !exists { - loggers = []Logger{} - } - loggers = append(loggers, logger) - registry[name] = loggers -} - -func setConfig(newConfig Config) Config { - configMutex.Lock() - defer configMutex.Unlock() - - cachedConfig = cachedConfig.with(newConfig) - return cachedConfig -} - -func updateLoggers(config Config) { - registryMutex.Lock() - defer registryMutex.Unlock() - - for loggerName, loggers := range registry { - newLoggerConfig := config.forLogger(loggerName) - - for _, logger := range loggers { - logger.ApplyConfig(newLoggerConfig) - } - } -} diff --git a/net/peer.go b/net/peer.go index 4ca09c17eb..022d7b4099 100644 --- a/net/peer.go +++ b/net/peer.go @@ -39,7 +39,6 @@ import ( "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" - "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/merkle/clock" pb "github.com/sourcenetwork/defradb/net/pb" ) @@ -149,8 +148,8 @@ func (p *Peer) Start() error { log.InfoContext( p.ctx, "Failure while reconnecting to a known peer", - logging.NewKV("peer", id), - logging.NewKV("error", err), + "peer", id, + "error", err, ) } }(id) diff --git a/tests/bench/bench_util.go b/tests/bench/bench_util.go index d09aa74b68..a7bc5d3e21 100644 --- a/tests/bench/bench_util.go +++ b/tests/bench/bench_util.go @@ -24,7 +24,6 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/tests/bench/fixtures" testutils "github.com/sourcenetwork/defradb/tests/integration" ) @@ -40,8 +39,6 @@ var ( ) func init() { - logging.SetConfig(logging.Config{Level: logging.NewLogLevelOption(logging.Error)}) - // assign if not empty if s := os.Getenv(storageEnvName); s != "" { storage = s diff --git a/tests/gen/cli/util_test.go b/tests/gen/cli/util_test.go index 10bd98ca99..7f58fbe0dd 100644 --- a/tests/gen/cli/util_test.go +++ b/tests/gen/cli/util_test.go @@ -16,6 +16,7 @@ import ( "testing" badger "github.com/sourcenetwork/badger/v4" + "github.com/sourcenetwork/corelog" "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" @@ -23,10 +24,9 @@ import ( "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" httpapi "github.com/sourcenetwork/defradb/http" - "github.com/sourcenetwork/defradb/logging" ) -var log = logging.MustNewLogger("cli") +var log = corelog.NewLogger("cli") type defraInstance struct { db client.DB @@ -39,9 +39,9 @@ func (di *defraInstance) close(ctx context.Context) { } func start(ctx context.Context) (*defraInstance, error) { - log.FeedbackInfo(ctx, "Starting DefraDB service...") + log.InfoContext(ctx, "Starting DefraDB service...") - log.FeedbackInfo(ctx, "Building new memory store") + log.InfoContext(ctx, "Building new memory store") opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} rootstore, err := badgerds.NewDatastore("", &opts) From 2a1486aa3fc2a3a4e453747ae3cbda703e983b5b Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Wed, 13 Mar 2024 12:07:27 -0700 Subject: [PATCH 3/6] add logging config docs --- docs/config.md | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/docs/config.md b/docs/config.md index 5f8985f71c..0d66fdd564 100644 --- a/docs/config.md +++ b/docs/config.md @@ -63,4 +63,28 @@ https://docs.libp2p.io/concepts/addressing/ Enable libp2p's Circuit relay transport protocol. Defaults to `false`. -https://docs.libp2p.io/concepts/circuit-relay/ \ No newline at end of file +https://docs.libp2p.io/concepts/circuit-relay/ + +## `log.level` + +Log level to use. Options are `debug`, `info`, `error`, `fatal`. Defaults to `info`. + +## `log.output` + +Log output path. Options are `stderr` or `stdout`. Defaults to `stderr`. + +## `log.format` + +Log format to use. Options are `text` or `json`. Defaults to `text`. + +## `log.stacktrace` + +Include stacktrace in error and fatal logs. Defaults to `false`. + +## `log.source` + +Include source location in logs. Defaults to `false`. + +## `log.overrides` + +Logger config overrides. Format `,=,...;,...`. From e7edde7b4105edd0d52e230fb44b04c884ae9032 Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Fri, 15 Mar 2024 10:40:00 -0700 Subject: [PATCH 4/6] update to corelog v0.0.5 --- datastore/blockstore.go | 1 - db/db.go | 7 +-- go.mod | 2 +- go.sum | 4 +- http/logger.go | 10 ++--- merkle/clock/clock.go | 9 +--- merkle/clock/heads.go | 7 +-- merkle/crdt/composite.go | 2 - merkle/crdt/merklecrdt.go | 5 --- net/client.go | 14 ------ net/dag.go | 20 +++++---- net/dag_test.go | 1 - net/node.go | 9 ++-- net/peer.go | 66 ++++++++++++---------------- net/process.go | 22 ++++------ net/server.go | 51 +++++---------------- tests/bench/bench_util.go | 2 +- tests/integration/explain.go | 3 +- tests/integration/net/order/utils.go | 6 +-- tests/integration/p2p.go | 3 +- tests/integration/utils2.go | 22 +++++----- 21 files changed, 96 insertions(+), 170 deletions(-) diff --git a/datastore/blockstore.go b/datastore/blockstore.go index ba994ca30b..be25894a3d 100644 --- a/datastore/blockstore.go +++ b/datastore/blockstore.go @@ -64,7 +64,6 @@ func (bs *bstore) HashOnRead(enabled bool) { // Get returns a block from the blockstore. func (bs *bstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { if !k.Defined() { - log.ErrorContext(ctx, "Undefined CID in blockstore") return nil, ipld.ErrNotFound{Cid: k} } bdata, err := bs.store.Get(ctx, dshelp.MultihashToDsKey(k.Hash())) diff --git a/db/db.go b/db/db.go index 13b83f39fa..30036fc55d 100644 --- a/db/db.go +++ b/db/db.go @@ -109,7 +109,6 @@ func NewDB(ctx context.Context, rootstore datastore.RootStore, options ...Option } func newDB(ctx context.Context, rootstore datastore.RootStore, options ...Option) (*implicitTxnDB, error) { - log.DebugContext(ctx, "Loading: internal datastores") multistore := datastore.MultiStoreFrom(rootstore) parser, err := graphql.NewParser() @@ -197,7 +196,6 @@ func (db *db) initialize(ctx context.Context) error { } defer txn.Discard(ctx) - log.DebugContext(ctx, "Checking if DB has already been initialized...") exists, err := txn.Systemstore().Has(ctx, ds.NewKey("init")) if err != nil && !errors.Is(err, ds.ErrNotFound) { return err @@ -205,7 +203,6 @@ func (db *db) initialize(ctx context.Context) error { // if we're loading an existing database, just load the schema // and migrations and finish initialization if exists { - log.DebugContext(ctx, "DB has already been initialized, continuing") err = db.loadSchema(ctx, txn) if err != nil { return err @@ -222,8 +219,6 @@ func (db *db) initialize(ctx context.Context) error { return txn.Commit(ctx) } - log.DebugContext(ctx, "Opened a new DB, needs full initialization") - // init meta data // collection sequence _, err = db.getSequence(ctx, txn, core.CollectionIDSequenceKey{}) @@ -286,7 +281,7 @@ func printStore(ctx context.Context, store datastore.DSReaderWriter) error { } for r := range results.Next() { - log.InfoContext(ctx, "", r.Key, r.Value) + log.InfoContext(ctx, "", corelog.Any(r.Key, r.Value)) } return results.Close() diff --git a/go.mod b/go.mod index 9c0b483acc..c28d9a46d4 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,7 @@ require ( github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multihash v0.2.3 github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 - github.com/sourcenetwork/corelog v0.0.4 + github.com/sourcenetwork/corelog v0.0.5 github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.13 github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd github.com/sourcenetwork/immutable v0.3.0 diff --git a/go.sum b/go.sum index beab7af6ef..1aba7a05e4 100644 --- a/go.sum +++ b/go.sum @@ -510,8 +510,8 @@ github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIK github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 h1:TpQDDPfucDgCNH0NVqVUk6SSq6T6G8p9HIocmwZh9Tg= github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276/go.mod h1:lxiZTDBw0vheFMqSwX2OvB6RTDI1+/UtVCSU4rpThFM= -github.com/sourcenetwork/corelog v0.0.4 h1:DGAzVeY+XODrgwmetPAER3LCCoNyAf2cldlqejCnp4Y= -github.com/sourcenetwork/corelog v0.0.4/go.mod h1:mXsBA4ujUt0lAkDFoHoXuaIQjhdeXi+RfDNV7ZTiy5E= +github.com/sourcenetwork/corelog v0.0.5 h1:7DynoceolaivI/Pd10KJpdYN7Vm6boJSGHGZss7Febg= +github.com/sourcenetwork/corelog v0.0.5/go.mod h1:mXsBA4ujUt0lAkDFoHoXuaIQjhdeXi+RfDNV7ZTiy5E= github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.13 h1:d/PeGZutd5NcDr6ltAv8ubN5PxsHMp1YUnhHY/QCWB4= github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.13/go.mod h1:jUoQv592uUX1u7QBjAY4C+l24X9ArhPfifOqXpDHz4U= github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd h1:lmpW39/8wPJ0khWRhOcj7Bj0HYKbSmQ8rXMJw1cMB8U= diff --git a/http/logger.go b/http/logger.go index 21dba68e0f..c4e715f695 100644 --- a/http/logger.go +++ b/http/logger.go @@ -30,11 +30,11 @@ func (e *logEntry) Write(status, bytes int, header http.Header, elapsed time.Dur log.InfoContext( e.req.Context(), "Request", - "Method", e.req.Method, - "Path", e.req.URL.Path, - "Status", status, - "LengthBytes", bytes, - "ElapsedTime", elapsed.String(), + corelog.String("Method", e.req.Method), + corelog.String("Path", e.req.URL.Path), + corelog.Int("Status", status), + corelog.Int("LengthBytes", bytes), + corelog.Duration("ElapsedTime", elapsed), ) } diff --git a/merkle/clock/clock.go b/merkle/clock/clock.go index a6b310f7ea..3f1ae47cf6 100644 --- a/merkle/clock/clock.go +++ b/merkle/clock/clock.go @@ -122,7 +122,6 @@ func (mc *MerkleClock) ProcessNode( nodeCid := node.Cid() priority := delta.GetPriority() - log.DebugContext(ctx, "Running ProcessNode", "CID", nodeCid) err := mc.crdt.Merge(ctx, delta) if err != nil { return NewErrMergingDelta(nodeCid, err) @@ -131,16 +130,13 @@ func (mc *MerkleClock) ProcessNode( links := node.Links() // check if we have any HEAD links hasHeads := false - log.DebugContext(ctx, "Stepping through node links") for _, l := range links { - log.DebugContext(ctx, "Checking link", "Name", l.Name, "CID", l.Cid) if l.Name == "_head" { hasHeads = true break } } if !hasHeads { // reached the bottom, at a leaf - log.DebugContext(ctx, "No heads found") err := mc.headset.Write(ctx, nodeCid, priority) if err != nil { return NewErrAddingHead(nodeCid, err) @@ -149,14 +145,12 @@ func (mc *MerkleClock) ProcessNode( for _, l := range links { linkCid := l.Cid - log.DebugContext(ctx, "Scanning for replacement heads", "Child", linkCid) isHead, err := mc.headset.IsHead(ctx, linkCid) if err != nil { return NewErrCheckingHead(linkCid, err) } if isHead { - log.DebugContext(ctx, "Found head, replacing!") // reached one of the current heads, replace it with the tip // of current branch err = mc.headset.Replace(ctx, linkCid, nodeCid, priority) @@ -174,14 +168,13 @@ func (mc *MerkleClock) ProcessNode( if known { // we reached a non-head node in the known tree. // This means our root block is a new head - log.DebugContext(ctx, "Adding head") err := mc.headset.Write(ctx, nodeCid, priority) if err != nil { log.ErrorContextE( ctx, "Failure adding head (when root is a new head)", err, - "Root", nodeCid, + corelog.Any("Root", nodeCid), ) // OR should this also return like below comment?? // return nil, errors.Wrap("error adding head (when root is new head): %s ", root, err) diff --git a/merkle/clock/heads.go b/merkle/clock/heads.go index 5f43b90172..2bbb04d2d9 100644 --- a/merkle/clock/heads.go +++ b/merkle/clock/heads.go @@ -18,6 +18,7 @@ import ( cid "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore/query" + "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" @@ -57,9 +58,9 @@ func (hh *heads) Replace(ctx context.Context, old cid.Cid, new cid.Cid, height u log.InfoContext( ctx, "Replacing DAG head", - "Old", old, - "CID", new, - "Height", height) + corelog.Any("Old", old), + corelog.Any("CID", new), + corelog.Uint64("Height", height)) err := hh.store.Delete(ctx, hh.key(old).ToDS()) if err != nil { diff --git a/merkle/crdt/composite.go b/merkle/crdt/composite.go index a922a81e02..f58813235a 100644 --- a/merkle/crdt/composite.go +++ b/merkle/crdt/composite.go @@ -59,7 +59,6 @@ func (m *MerkleCompositeDAG) Delete( ) (ipld.Node, uint64, error) { // Set() call on underlying CompositeDAG CRDT // persist/publish delta - log.DebugContext(ctx, "Applying delta-mutator 'Delete' on CompositeDAG") delta := m.reg.Set(links) delta.Status = client.Deleted nd, err := m.clock.AddDAGNode(ctx, delta) @@ -78,7 +77,6 @@ func (m *MerkleCompositeDAG) Save(ctx context.Context, data any) (ipld.Node, uin } // Set() call on underlying CompositeDAG CRDT // persist/publish delta - log.DebugContext(ctx, "Applying delta-mutator 'Set' on CompositeDAG") delta := m.reg.Set(value) nd, err := m.clock.AddDAGNode(ctx, delta) if err != nil { diff --git a/merkle/crdt/merklecrdt.go b/merkle/crdt/merklecrdt.go index 8c52d3688e..c96791d07c 100644 --- a/merkle/crdt/merklecrdt.go +++ b/merkle/crdt/merklecrdt.go @@ -17,17 +17,12 @@ import ( "context" ipld "github.com/ipfs/go-ipld-format" - "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" ) -var ( - log = corelog.NewLogger("merklecrdt") -) - type Stores interface { Datastore() datastore.DSReaderWriter DAGstore() datastore.DAGStore diff --git a/net/client.go b/net/client.go index 5757ddc422..414ee62e47 100644 --- a/net/client.go +++ b/net/client.go @@ -32,13 +32,6 @@ var ( // pushLog creates a pushLog request and sends it to another node // over libp2p grpc connection func (s *server) pushLog(ctx context.Context, evt events.Update, pid peer.ID) error { - log.DebugContext( - ctx, - "Preparing pushLog request", - "DocID", evt.DocID, - "CID", evt.Cid, - "SchemaRoot", evt.SchemaRoot) - body := &pb.PushLogRequest_Body{ DocID: []byte(evt.DocID), Cid: evt.Cid.Bytes(), @@ -52,13 +45,6 @@ func (s *server) pushLog(ctx context.Context, evt events.Update, pid peer.ID) er Body: body, } - log.DebugContext( - ctx, "Pushing log", - "DocID", evt.DocID, - "CID", evt.Cid, - "PeerID", pid, - ) - client, err := s.dial(pid) // grpc dial over P2P stream if err != nil { return NewErrPushLog(err) diff --git a/net/dag.go b/net/dag.go index b45c037feb..cc20629c0f 100644 --- a/net/dag.go +++ b/net/dag.go @@ -19,6 +19,7 @@ import ( "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" + "github.com/sourcenetwork/corelog" ) var ( @@ -98,13 +99,6 @@ func (p *Peer) sendJobWorker() { // initialization in New(). func (p *Peer) dagWorker(jobs chan *dagJob) { for job := range jobs { - log.DebugContext( - p.ctx, - "Starting new job from DAG queue", - "Datastore Key", job.bp.dsKey, - "CID", job.cid, - ) - select { case <-p.ctx.Done(): // drain jobs from queue when we are done @@ -117,7 +111,11 @@ func (p *Peer) dagWorker(jobs chan *dagJob) { if j.bp.getter != nil && j.cid.Defined() { cNode, err := j.bp.getter.Get(p.ctx, j.cid) if err != nil { - log.ErrorContextE(p.ctx, "Failed to get node", err, "CID", j.cid) + log.ErrorContextE( + p.ctx, + "Failed to get node", + err, + corelog.Any("CID", j.cid)) j.session.Done() return } @@ -128,7 +126,11 @@ func (p *Peer) dagWorker(jobs chan *dagJob) { j.isComposite, ) if err != nil { - log.ErrorContextE(p.ctx, "Failed to process remote block", err, "CID", j.cid) + log.ErrorContextE( + p.ctx, + "Failed to process remote block", + err, + corelog.Any("CID", j.cid)) } } p.queuedChildren.Remove(j.cid) diff --git a/net/dag_test.go b/net/dag_test.go index 6cc471f6c7..ddd9e9aab3 100644 --- a/net/dag_test.go +++ b/net/dag_test.go @@ -188,7 +188,6 @@ func TestSendJobWorker_WithPeer_NoError(t *testing.T) { var getter ipld.NodeGetter = n2.Peer.newDAGSyncerTxn(txn2) if sessionMaker, ok := getter.(SessionDAGSyncer); ok { - log.DebugContext(ctx, "Upgrading DAGSyncer with a session") getter = sessionMaker.Session(ctx) } diff --git a/net/node.go b/net/node.go index a0af242ef6..a52e296712 100644 --- a/net/node.go +++ b/net/node.go @@ -38,6 +38,7 @@ import ( "github.com/libp2p/go-libp2p/core/routing" "github.com/multiformats/go-multiaddr" + "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/go-libp2p-pubsub-rpc/finalizer" // @TODO: https://github.com/sourcenetwork/defradb/issues/1902 @@ -146,8 +147,8 @@ func NewNode( log.InfoContext( ctx, "Created LibP2P host", - "PeerId", h.ID(), - "Address", options.ListenAddresses, + corelog.Any("PeerId", h.ID()), + corelog.Any("Address", options.ListenAddresses), ) var ps *pubsub.PubSub @@ -213,10 +214,10 @@ func (n *Node) Bootstrap(addrs []peer.AddrInfo) { defer wg.Done() err := n.host.Connect(n.ctx, pinfo) if err != nil { - log.InfoContext(n.ctx, "Cannot connect to peer", "Error", err) + log.InfoContext(n.ctx, "Cannot connect to peer", corelog.Any("Error", err)) return } - log.InfoContext(n.ctx, "Connected", "PeerID", pinfo.ID) + log.InfoContext(n.ctx, "Connected", corelog.Any("PeerID", pinfo.ID)) atomic.AddUint64(&connected, 1) }(pinfo) } diff --git a/net/peer.go b/net/peer.go index b084e77805..61711b3918 100644 --- a/net/peer.go +++ b/net/peer.go @@ -31,6 +31,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" peerstore "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/routing" + "github.com/sourcenetwork/corelog" "google.golang.org/grpc" "github.com/sourcenetwork/defradb/client" @@ -148,8 +149,8 @@ func (p *Peer) Start() error { log.InfoContext( p.ctx, "Failure while reconnecting to a known peer", - "peer", id, - "error", err, + corelog.Any("peer", id), + corelog.Any("error", err), ) } }(id) @@ -176,13 +177,16 @@ func (p *Peer) Start() error { go p.handleBroadcastLoop() } - log.InfoContext(p.ctx, "Starting P2P node", "P2P addresses", p.host.Addrs()) + log.InfoContext( + p.ctx, + "Starting P2P node", + corelog.Any("P2P addresses", p.host.Addrs())) // register the P2P gRPC server go func() { pb.RegisterServiceServer(p.p2pRPC, p.server) if err := p.p2pRPC.Serve(p2plistener); err != nil && !errors.Is(err, grpc.ErrServerStopped) { - log.FatalContextE(p.ctx, "Fatal P2P RPC server error", err) + log.ErrorContextE(p.ctx, "Fatal P2P RPC server error", err) } }() @@ -211,12 +215,12 @@ func (p *Peer) Close() { // close event emitters if p.server.pubSubEmitter != nil { if err := p.server.pubSubEmitter.Close(); err != nil { - log.InfoContext(p.ctx, "Could not close pubsub event emitter", "Error", err.Error()) + log.InfoContext(p.ctx, "Could not close pubsub event emitter", corelog.Any("Error", err.Error())) } } if p.server.pushLogEmitter != nil { if err := p.server.pushLogEmitter.Close(); err != nil { - log.InfoContext(p.ctx, "Could not close push log event emitter", "Error", err.Error()) + log.InfoContext(p.ctx, "Could not close push log event emitter", corelog.Any("Error", err.Error())) } } @@ -238,9 +242,7 @@ func (p *Peer) Close() { // handleBroadcast loop manages the transition of messages // from the internal broadcaster to the external pubsub network func (p *Peer) handleBroadcastLoop() { - log.DebugContext(p.ctx, "Waiting for messages on internal broadcaster") for { - log.DebugContext(p.ctx, "Handling internal broadcast bus message") update, isOpen := <-p.updateChannel if !isOpen { return @@ -254,7 +256,7 @@ func (p *Peer) handleBroadcastLoop() { } else if update.Priority > 1 { err = p.handleDocUpdateLog(update) } else { - log.InfoContext(p.ctx, "Skipping log with invalid priority of 0", "CID", update.Cid) + log.InfoContext(p.ctx, "Skipping log with invalid priority of 0", corelog.Any("CID", update.Cid)) } if err != nil { @@ -271,19 +273,13 @@ func (p *Peer) RegisterNewDocument( nd ipld.Node, schemaRoot string, ) error { - log.DebugContext( - p.ctx, - "Registering a new document for our peer node", - "DocID", docID.String(), - ) - // register topic if err := p.server.addPubSubTopic(docID.String(), !p.server.hasPubSubTopic(schemaRoot)); err != nil { log.ErrorContextE( p.ctx, "Failed to create new pubsub topic", err, - "DocID", docID.String(), + corelog.String("DocID", docID.String()), ) return err } @@ -328,9 +324,9 @@ func (p *Peer) pushToReplicator( ctx, "Failed to get heads", err, - "DocID", docIDResult.ID.String(), - "PeerID", pid, - "Collection", collection.Name()) + corelog.String("DocID", docIDResult.ID.String()), + corelog.Any("PeerID", pid), + corelog.Any("Collection", collection.Name())) continue } // loop over heads, get block, make the required logs, and send @@ -338,16 +334,16 @@ func (p *Peer) pushToReplicator( blk, err := txn.DAGstore().Get(ctx, c) if err != nil { log.ErrorContextE(ctx, "Failed to get block", err, - "CID", c, - "PeerID", pid, - "Collection", collection.Name()) + corelog.Any("CID", c), + corelog.Any("PeerID", pid), + corelog.Any("Collection", collection.Name())) continue } // @todo: remove encode/decode loop for core.Log data nd, err := dag.DecodeProtobuf(blk.RawData()) if err != nil { - log.ErrorContextE(ctx, "Failed to decode protobuf", err, "CID", c) + log.ErrorContextE(ctx, "Failed to decode protobuf", err, corelog.Any("CID", c)) continue } @@ -363,8 +359,8 @@ func (p *Peer) pushToReplicator( ctx, "Failed to replicate log", err, - "CID", c, - "PeerID", pid, + corelog.Any("CID", c), + corelog.Any("PeerID", pid), ) } } @@ -396,7 +392,7 @@ func (p *Peer) loadReplicators(ctx context.Context) error { // This will be used during connection and stream creation by libp2p. p.host.Peerstore().AddAddrs(rep.Info.ID, rep.Info.Addrs, peerstore.PermanentAddrTTL) - log.InfoContext(ctx, "loaded replicators from datastore", "Replicator", rep) + log.InfoContext(ctx, "loaded replicators from datastore", corelog.Any("Replicator", rep)) } return nil @@ -432,7 +428,7 @@ func (p *Peer) handleDocCreateLog(evt events.Update) error { return err } // push to each peer (replicator) - p.pushLogToReplicators(p.ctx, evt) + p.pushLogToReplicators(evt) return nil } @@ -442,12 +438,6 @@ func (p *Peer) handleDocUpdateLog(evt events.Update) error { if err != nil { return NewErrFailedToGetDocID(err) } - log.DebugContext( - p.ctx, - "Preparing pubsub pushLog request from broadcast", - "DocID", docID, - "CID", evt.Cid, - "SchemaRoot", evt.SchemaRoot) body := &pb.PushLogRequest_Body{ DocID: []byte(docID.String()), @@ -463,7 +453,7 @@ func (p *Peer) handleDocUpdateLog(evt events.Update) error { } // push to each peer (replicator) - p.pushLogToReplicators(p.ctx, evt) + p.pushLogToReplicators(evt) if err := p.server.publishLog(p.ctx, evt.DocID, req); err != nil { return NewErrPublishingToDocIDTopic(err, evt.Cid.String(), evt.DocID) @@ -476,7 +466,7 @@ func (p *Peer) handleDocUpdateLog(evt events.Update) error { return nil } -func (p *Peer) pushLogToReplicators(ctx context.Context, lg events.Update) { +func (p *Peer) pushLogToReplicators(lg events.Update) { // push to each peer (replicator) peers := make(map[string]struct{}) for _, peer := range p.ps.ListPeers(lg.DocID) { @@ -503,9 +493,9 @@ func (p *Peer) pushLogToReplicators(ctx context.Context, lg events.Update) { p.ctx, "Failed pushing log", err, - "DocID", lg.DocID, - "CID", lg.Cid, - "PeerID", peerID) + corelog.String("DocID", lg.DocID), + corelog.Any("CID", lg.Cid), + corelog.Any("PeerID", peerID)) } }(pid) } diff --git a/net/process.go b/net/process.go index 02a41432b1..6779ada29f 100644 --- a/net/process.go +++ b/net/process.go @@ -22,6 +22,7 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" + "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" @@ -68,8 +69,8 @@ func (bp *blockProcessor) mergeBlocks(ctx context.Context) { ctx, "Failed to process block", err, - "DocID", bp.dsKey.DocID, - "CID", nd.Cid(), + corelog.String("DocID", bp.dsKey.DocID), + corelog.Any("CID", nd.Cid()), ) } } @@ -77,7 +78,7 @@ func (bp *blockProcessor) mergeBlocks(ctx context.Context) { // processBlock merges the block and its children to the datastore and sets the head accordingly. func (bp *blockProcessor) processBlock(ctx context.Context, nd ipld.Node, field string) error { - crdt, err := initCRDTForType(ctx, bp.txn, bp.col, bp.dsKey, field) + crdt, err := initCRDTForType(bp.txn, bp.col, bp.dsKey, field) if err != nil { return err } @@ -110,8 +111,8 @@ func (bp *blockProcessor) processBlock(ctx context.Context, nd ipld.Node, field ctx, "Failed to process block", err, - "DocID", bp.dsKey.DocID, - "CID", nd.Cid(), + corelog.String("DocID", bp.dsKey.DocID), + corelog.Any("CID", nd.Cid()), ) } } @@ -120,7 +121,6 @@ func (bp *blockProcessor) processBlock(ctx context.Context, nd ipld.Node, field } func initCRDTForType( - ctx context.Context, txn datastore.Txn, col client.Collection, dsKey core.DataStoreKey, @@ -130,7 +130,6 @@ func initCRDTForType( var ctype client.CType description := col.Description() if field == "" { // empty field name implies composite type - ctype = client.COMPOSITE key = base.MakeDataStoreKeyWithCollectionDescription( description, ).WithInstanceInfo( @@ -139,7 +138,6 @@ func initCRDTForType( core.COMPOSITE_NAMESPACE, ) - log.DebugContext(ctx, "Got CRDT Type", "CType", ctype, "Field", field) return merklecrdt.NewMerkleCompositeDAG( txn, core.NewCollectionSchemaVersionKey(col.Schema().VersionID, col.ID()), @@ -156,7 +154,6 @@ func initCRDTForType( fieldID := fd.ID.String() key = base.MakeDataStoreKeyWithCollectionDescription(description).WithInstanceInfo(dsKey).WithFieldId(fieldID) - log.DebugContext(ctx, "Got CRDT Type", "CType", ctype, "Field", field) return merklecrdt.InstanceWithStore( txn, core.NewCollectionSchemaVersionKey(col.Schema().VersionID, col.ID()), @@ -182,8 +179,6 @@ func (bp *blockProcessor) processRemoteBlock( nd ipld.Node, isComposite bool, ) error { - log.DebugContext(ctx, "Running processLog") - if err := bp.txn.DAGstore().Put(ctx, nd); err != nil { return err } @@ -220,12 +215,11 @@ func (bp *blockProcessor) handleChildBlocks( log.ErrorContext( ctx, "Failed to check for existing block", - "CID", link.Cid, - "ERROR", err, + corelog.Any("CID", link.Cid), + corelog.Any("ERROR", err), ) } if exist { - log.DebugContext(ctx, "Already have block locally, skipping.", "CID", link.Cid) continue } diff --git a/net/server.go b/net/server.go index 7fd2c534c8..41cfd3625b 100644 --- a/net/server.go +++ b/net/server.go @@ -21,6 +21,7 @@ import ( format "github.com/ipfs/go-ipld-format" "github.com/libp2p/go-libp2p/core/event" libpeer "github.com/libp2p/go-libp2p/core/peer" + "github.com/sourcenetwork/corelog" rpc "github.com/sourcenetwork/go-libp2p-pubsub-rpc" "github.com/sourcenetwork/immutable" "google.golang.org/grpc" @@ -95,7 +96,6 @@ func newServer(p *Peer, db client.DB, opts ...grpc.DialOption) (*server, error) } // Get all DocIDs across all collections in the DB - log.DebugContext(p.ctx, "Getting all existing DocIDs...") cols, err := s.db.GetCollections(s.peer.ctx, client.CollectionFetchOptions{}) if err != nil { return nil, err @@ -113,28 +113,22 @@ func newServer(p *Peer, db client.DB, opts ...grpc.DialOption) (*server, error) } for docID := range docIDChan { - log.DebugContext( - p.ctx, - "Registering existing DocID pubsub topic", - "DocID", docID.ID.String(), - ) if err := s.addPubSubTopic(docID.ID.String(), true); err != nil { return nil, err } i++ } } - log.DebugContext(p.ctx, "Finished registering all DocID pubsub topics", "Count", i) } var err error s.pubSubEmitter, err = s.peer.host.EventBus().Emitter(new(EvtPubSub)) if err != nil { - log.InfoContext(s.peer.ctx, "could not create event emitter", "Error", err.Error()) + log.InfoContext(s.peer.ctx, "could not create event emitter", corelog.String("Error", err.Error())) } s.pushLogEmitter, err = s.peer.host.EventBus().Emitter(new(EvtReceivedPushLog)) if err != nil { - log.InfoContext(s.peer.ctx, "could not create event emitter", "Error", err.Error()) + log.InfoContext(s.peer.ctx, "could not create event emitter", corelog.String("Error", err.Error())) } return s, nil @@ -199,8 +193,6 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL if err != nil { return nil, err } - log.DebugContext(ctx, "Received a PushLog request", "PeerID", pid) - cid, err := cid.Cast(req.Body.Cid) if err != nil { return nil, err @@ -216,7 +208,7 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL if s.pushLogEmitter != nil { byPeer, err := libpeer.Decode(req.Body.Creator) if err != nil { - log.InfoContext(ctx, "could not decode the PeerID of the log creator", "Error", err.Error()) + log.InfoContext(ctx, "could not decode the PeerID of the log creator", corelog.String("Error", err.Error())) } err = s.pushLogEmitter.Emit(EvtReceivedPushLog{ FromPeer: pid, @@ -225,7 +217,7 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL if err != nil { // logging instead of returning an error because the event bus should // not break the PushLog execution. - log.InfoContext(ctx, "could not emit push log event", "Error", err.Error()) + log.InfoContext(ctx, "could not emit push log event", corelog.String("Error", err.Error())) } } }() @@ -242,7 +234,6 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL return nil, errors.Wrap(fmt.Sprintf("failed to check for existing block %s", cid), err) } if exists { - log.DebugContext(ctx, fmt.Sprintf("Already have block %s locally, skipping.", cid)) return &pb.PushLogReply{}, nil } @@ -269,7 +260,6 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL // Create a new DAG service with the current transaction var getter format.NodeGetter = s.peer.newDAGSyncerTxn(txn) if sessionMaker, ok := getter.(SessionDAGSyncer); ok { - log.DebugContext(ctx, "Upgrading DAGSyncer with a session") getter = sessionMaker.Session(ctx) } @@ -287,8 +277,8 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL ctx, "Failed to process remote block", err, - "DocID", dsKey.DocID, - "CID", cid, + corelog.String("DocID", dsKey.DocID), + corelog.Any("CID", cid), ) } session.Wait() @@ -489,29 +479,11 @@ func (s *server) publishLog(ctx context.Context, topic string, req *pb.PushLogRe if _, err := t.Publish(ctx, data, rpc.WithIgnoreResponse(true)); err != nil { return errors.Wrap(fmt.Sprintf("failed publishing to thread %s", topic), err) } - - cid, err := cid.Cast(req.Body.Cid) - if err != nil { - return err - } - - log.DebugContext( - ctx, - "Published log", - "CID", cid, - "DocID", topic, - ) return nil } // pubSubMessageHandler handles incoming PushLog messages from the pubsub network. func (s *server) pubSubMessageHandler(from libpeer.ID, topic string, msg []byte) ([]byte, error) { - log.DebugContext( - s.peer.ctx, - "Handling new pubsub message", - "SenderID", from, - "Topic", topic, - ) req := new(pb.PushLogRequest) if err := proto.Unmarshal(msg, req); err != nil { log.ErrorContextE(s.peer.ctx, "Failed to unmarshal pubsub message %s", err) @@ -522,7 +494,6 @@ func (s *server) pubSubMessageHandler(from libpeer.ID, topic string, msg []byte) Addr: addr{from}, }) if _, err := s.PushLog(ctx, req); err != nil { - log.ErrorContextE(ctx, "Failed pushing log for doc", err, "Topic", topic) return nil, errors.Wrap(fmt.Sprintf("Failed pushing log for doc %s", topic), err) } return nil, nil @@ -533,9 +504,9 @@ func (s *server) pubSubEventHandler(from libpeer.ID, topic string, msg []byte) { log.InfoContext( s.peer.ctx, "Received new pubsub event", - "SenderId", from, - "Topic", topic, - "Message", string(msg), + corelog.Any("SenderId", from), + corelog.String("Topic", topic), + corelog.String("Message", string(msg)), ) if s.pubSubEmitter != nil { @@ -543,7 +514,7 @@ func (s *server) pubSubEventHandler(from libpeer.ID, topic string, msg []byte) { Peer: from, }) if err != nil { - log.InfoContext(s.peer.ctx, "could not emit pubsub event", "Error", err.Error()) + log.InfoContext(s.peer.ctx, "could not emit pubsub event", corelog.Any("Error", err.Error())) } } } diff --git a/tests/bench/bench_util.go b/tests/bench/bench_util.go index a7bc5d3e21..dac81d0ce2 100644 --- a/tests/bench/bench_util.go +++ b/tests/bench/bench_util.go @@ -175,7 +175,7 @@ func BackfillBenchmarkDB( log.InfoContext( ctx, "Failed to commit TX for doc %s, retrying...\n", - "DocID", doc.ID(), + corelog.Any("DocID", doc.ID()), ) continue } else if err != nil { diff --git a/tests/integration/explain.go b/tests/integration/explain.go index 72969586dc..eb44744e57 100644 --- a/tests/integration/explain.go +++ b/tests/integration/explain.go @@ -15,6 +15,7 @@ import ( "sort" "testing" + "github.com/sourcenetwork/corelog" "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -150,7 +151,7 @@ func assertExplainRequestResults( // Note: if returned gql result is `nil` this panics (the panic seems useful while testing). resultantData := actualResult.Data.([]map[string]any) - log.InfoContext(s.ctx, "", "FullExplainGraphResult", actualResult.Data) + log.InfoContext(s.ctx, "", corelog.Any("FullExplainGraphResult", actualResult.Data)) // Check if the expected full explain graph (if provided) matches the actual full explain graph // that is returned, if doesn't match we would like to still see a diff comparison (handy while debugging). diff --git a/tests/integration/net/order/utils.go b/tests/integration/net/order/utils.go index 59ad4aa754..58857e5b94 100644 --- a/tests/integration/net/order/utils.go +++ b/tests/integration/net/order/utils.go @@ -102,16 +102,16 @@ func setupDefraNode( // parse peers and bootstrap if len(peers) != 0 { - log.InfoContext(ctx, "Parsing bootstrap peers", "Peers", peers) + log.InfoContext(ctx, "Parsing bootstrap peers", corelog.Any("Peers", peers)) addrs, err := netutils.ParsePeers(peers) if err != nil { return nil, nil, errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %v", peers), err) } - log.InfoContext(ctx, "Bootstrapping with peers", "Addresses", addrs) + log.InfoContext(ctx, "Bootstrapping with peers", corelog.Any("Addresses", addrs)) n.Bootstrap(addrs) } - log.InfoContext(ctx, "Starting P2P node", "P2P addresses", n.PeerInfo().Addrs) + log.InfoContext(ctx, "Starting P2P node", corelog.Any("P2P addresses", n.PeerInfo().Addrs)) if err := n.Start(); err != nil { n.Close() return nil, nil, errors.Wrap("unable to start P2P listeners", err) diff --git a/tests/integration/p2p.go b/tests/integration/p2p.go index 6f8e84d20b..7c6a919373 100644 --- a/tests/integration/p2p.go +++ b/tests/integration/p2p.go @@ -18,6 +18,7 @@ import ( "github.com/sourcenetwork/defradb/tests/clients" "github.com/libp2p/go-libp2p/core/peer" + "github.com/sourcenetwork/corelog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -148,7 +149,7 @@ func connectPeers( targetNode := s.nodes[cfg.TargetNodeID] addrs := []peer.AddrInfo{targetNode.PeerInfo()} - log.InfoContext(s.ctx, "Bootstrapping with peers", "Addresses", addrs) + log.InfoContext(s.ctx, "Bootstrapping with peers", corelog.Any("Addresses", addrs)) sourceNode.Bootstrap(addrs) // Bootstrap triggers a bunch of async stuff for which we have no good way of waiting on. It must be diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 90fa807477..e1f8b0ee60 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -177,15 +177,15 @@ func executeTestCase( log.InfoContext( ctx, testCase.Description, - "database", dbt, - "client", clientType, - "mutationType", mutationType, - "databaseDir", databaseDir, - "changeDetector.Enabled", changeDetector.Enabled, - "changeDetector.SetupOnly", changeDetector.SetupOnly, - "changeDetector.SourceBranch", changeDetector.SourceBranch, - "changeDetector.TargetBranch", changeDetector.TargetBranch, - "changeDetector.Repository", changeDetector.Repository, + corelog.Any("database", dbt), + corelog.Any("client", clientType), + corelog.Any("mutationType", mutationType), + corelog.String("databaseDir", databaseDir), + corelog.Bool("changeDetector.Enabled", changeDetector.Enabled), + corelog.Bool("changeDetector.SetupOnly", changeDetector.SetupOnly), + corelog.String("changeDetector.SourceBranch", changeDetector.SourceBranch), + corelog.String("changeDetector.TargetBranch", changeDetector.TargetBranch), + corelog.String("changeDetector.Repository", changeDetector.Repository), ) startActionIndex, endActionIndex := getActionRange(t, testCase) @@ -777,7 +777,7 @@ func configureNode( n, err = net.NewNode(s.ctx, db, nodeOpts...) require.NoError(s.t, err) - log.InfoContext(s.ctx, "Starting P2P node", "P2P address", n.PeerInfo()) + log.InfoContext(s.ctx, "Starting P2P node", corelog.Any("P2P address", n.PeerInfo())) if err := n.Start(); err != nil { n.Close() require.NoError(s.t, err) @@ -1736,7 +1736,7 @@ func assertRequestResults( return true } - log.InfoContext(s.ctx, "", "RequestResults", result.Data) + log.InfoContext(s.ctx, "", corelog.Any("RequestResults", result.Data)) // compare results require.Equal(s.t, len(expectedResults), len(resultantData), From 4d69fc3ae33e647988a150712e6bbe0dd059cc99 Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Fri, 15 Mar 2024 10:47:46 -0700 Subject: [PATCH 5/6] update log config docs. update config tests. --- cli/config_test.go | 7 +++++++ docs/config.md | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/cli/config_test.go b/cli/config_test.go index fbb546ad39..492774398c 100644 --- a/cli/config_test.go +++ b/cli/config_test.go @@ -51,4 +51,11 @@ func TestLoadConfigNotExist(t *testing.T) { assert.Equal(t, true, cfg.GetBool("net.pubsubenabled")) assert.Equal(t, false, cfg.GetBool("net.relay")) assert.Equal(t, []string{}, cfg.GetStringSlice("net.peers")) + + assert.Equal(t, "info", cfg.GetString("log.level")) + assert.Equal(t, "stderr", cfg.GetString("log.output")) + assert.Equal(t, "text", cfg.GetString("log.format")) + assert.Equal(t, false, cfg.GetBool("log.stacktrace")) + assert.Equal(t, false, cfg.GetBool("log.source")) + assert.Equal(t, "", cfg.GetString("log.overrides")) } diff --git a/docs/config.md b/docs/config.md index 0d66fdd564..80c6d437ec 100644 --- a/docs/config.md +++ b/docs/config.md @@ -67,7 +67,7 @@ https://docs.libp2p.io/concepts/circuit-relay/ ## `log.level` -Log level to use. Options are `debug`, `info`, `error`, `fatal`. Defaults to `info`. +Log level to use. Options are `info` or `error`. Defaults to `info`. ## `log.output` From ee68bc92ae6f83925378b7a0927d79405a20cc53 Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Mon, 18 Mar 2024 10:39:34 -0700 Subject: [PATCH 6/6] update to corelog v0.0.6 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c28d9a46d4..eff296f3e3 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,7 @@ require ( github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multihash v0.2.3 github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 - github.com/sourcenetwork/corelog v0.0.5 + github.com/sourcenetwork/corelog v0.0.6 github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.13 github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd github.com/sourcenetwork/immutable v0.3.0 diff --git a/go.sum b/go.sum index 1aba7a05e4..22c2ef750c 100644 --- a/go.sum +++ b/go.sum @@ -510,8 +510,8 @@ github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIK github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 h1:TpQDDPfucDgCNH0NVqVUk6SSq6T6G8p9HIocmwZh9Tg= github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276/go.mod h1:lxiZTDBw0vheFMqSwX2OvB6RTDI1+/UtVCSU4rpThFM= -github.com/sourcenetwork/corelog v0.0.5 h1:7DynoceolaivI/Pd10KJpdYN7Vm6boJSGHGZss7Febg= -github.com/sourcenetwork/corelog v0.0.5/go.mod h1:mXsBA4ujUt0lAkDFoHoXuaIQjhdeXi+RfDNV7ZTiy5E= +github.com/sourcenetwork/corelog v0.0.6 h1:3q3Kh1G0C4iHimkPrOpAZOKIKESIv4zZ51vKuY8pANA= +github.com/sourcenetwork/corelog v0.0.6/go.mod h1:mXsBA4ujUt0lAkDFoHoXuaIQjhdeXi+RfDNV7ZTiy5E= github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.13 h1:d/PeGZutd5NcDr6ltAv8ubN5PxsHMp1YUnhHY/QCWB4= github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.13/go.mod h1:jUoQv592uUX1u7QBjAY4C+l24X9ArhPfifOqXpDHz4U= github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd h1:lmpW39/8wPJ0khWRhOcj7Bj0HYKbSmQ8rXMJw1cMB8U=